Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * raid10.c : Multiple Devices driver for Linux
   4 *
   5 * Copyright (C) 2000-2004 Neil Brown
   6 *
   7 * RAID-10 support for md.
   8 *
   9 * Base on code in raid1.c.  See raid1.c for further copyright information.
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12#include <linux/slab.h>
  13#include <linux/delay.h>
  14#include <linux/blkdev.h>
  15#include <linux/module.h>
  16#include <linux/seq_file.h>
  17#include <linux/ratelimit.h>
  18#include <linux/kthread.h>
  19#include <linux/raid/md_p.h>
  20#include <trace/events/block.h>
  21#include "md.h"
  22#include "raid10.h"
  23#include "raid0.h"
  24#include "md-bitmap.h"
  25
  26/*
  27 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  28 * The layout of data is defined by
  29 *    chunk_size
  30 *    raid_disks
  31 *    near_copies (stored in low byte of layout)
  32 *    far_copies (stored in second byte of layout)
  33 *    far_offset (stored in bit 16 of layout )
  34 *    use_far_sets (stored in bit 17 of layout )
  35 *    use_far_sets_bugfixed (stored in bit 18 of layout )
  36 *
  37 * The data to be stored is divided into chunks using chunksize.  Each device
  38 * is divided into far_copies sections.   In each section, chunks are laid out
  39 * in a style similar to raid0, but near_copies copies of each chunk is stored
  40 * (each on a different drive).  The starting device for each section is offset
  41 * near_copies from the starting device of the previous section.  Thus there
  42 * are (near_copies * far_copies) of each chunk, and each is on a different
  43 * drive.  near_copies and far_copies must be at least one, and their product
  44 * is at most raid_disks.
  45 *
  46 * If far_offset is true, then the far_copies are handled a bit differently.
  47 * The copies are still in different stripes, but instead of being very far
  48 * apart on disk, there are adjacent stripes.
  49 *
  50 * The far and offset algorithms are handled slightly differently if
  51 * 'use_far_sets' is true.  In this case, the array's devices are grouped into
  52 * sets that are (near_copies * far_copies) in size.  The far copied stripes
  53 * are still shifted by 'near_copies' devices, but this shifting stays confined
  54 * to the set rather than the entire array.  This is done to improve the number
  55 * of device combinations that can fail without causing the array to fail.
  56 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
  57 * on a device):
  58 *    A B C D    A B C D E
  59 *      ...         ...
  60 *    D A B C    E A B C D
  61 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
  62 *    [A B] [C D]    [A B] [C D E]
  63 *    |...| |...|    |...| | ... |
  64 *    [B A] [D C]    [B A] [E C D]
  65 */
  66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  67static void allow_barrier(struct r10conf *conf);
  68static void lower_barrier(struct r10conf *conf);
  69static int _enough(struct r10conf *conf, int previous, int ignore);
  70static int enough(struct r10conf *conf, int ignore);
  71static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
  72				int *skipped);
  73static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
  74static void end_reshape_write(struct bio *bio);
  75static void end_reshape(struct r10conf *conf);
  76
  77#define raid10_log(md, fmt, args...)				\
  78	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
  79
  80#include "raid1-10.c"
  81
  82#define NULL_CMD
  83#define cmd_before(conf, cmd) \
  84	do { \
  85		write_sequnlock_irq(&(conf)->resync_lock); \
  86		cmd; \
  87	} while (0)
  88#define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
  89
  90#define wait_event_barrier_cmd(conf, cond, cmd) \
  91	wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
  92		       cmd_after(conf))
  93
  94#define wait_event_barrier(conf, cond) \
  95	wait_event_barrier_cmd(conf, cond, NULL_CMD)
  96
  97/*
  98 * for resync bio, r10bio pointer can be retrieved from the per-bio
  99 * 'struct resync_pages'.
 100 */
 101static inline struct r10bio *get_resync_r10bio(struct bio *bio)
 102{
 103	return get_resync_pages(bio)->raid_bio;
 104}
 105
 106static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 107{
 108	struct r10conf *conf = data;
 109	int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
 110
 111	/* allocate a r10bio with room for raid_disks entries in the
 112	 * bios array */
 113	return kzalloc(size, gfp_flags);
 114}
 115
 116#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
 
 
 
 
 
 
 
 117/* amount of memory to reserve for resync requests */
 118#define RESYNC_WINDOW (1024*1024)
 119/* maximum number of concurrent requests, memory permitting */
 120#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
 121#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
 122#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 123
 124/*
 125 * When performing a resync, we need to read and compare, so
 126 * we need as many pages are there are copies.
 127 * When performing a recovery, we need 2 bios, one for read,
 128 * one for write (we recover only one drive per r10buf)
 129 *
 130 */
 131static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 132{
 133	struct r10conf *conf = data;
 
 134	struct r10bio *r10_bio;
 135	struct bio *bio;
 136	int j;
 137	int nalloc, nalloc_rp;
 138	struct resync_pages *rps;
 139
 140	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 141	if (!r10_bio)
 142		return NULL;
 143
 144	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
 145	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
 146		nalloc = conf->copies; /* resync */
 147	else
 148		nalloc = 2; /* recovery */
 149
 150	/* allocate once for all bios */
 151	if (!conf->have_replacement)
 152		nalloc_rp = nalloc;
 153	else
 154		nalloc_rp = nalloc * 2;
 155	rps = kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags);
 156	if (!rps)
 157		goto out_free_r10bio;
 158
 159	/*
 160	 * Allocate bios.
 161	 */
 162	for (j = nalloc ; j-- ; ) {
 163		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
 164		if (!bio)
 165			goto out_free_bio;
 166		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
 167		r10_bio->devs[j].bio = bio;
 168		if (!conf->have_replacement)
 169			continue;
 170		bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
 171		if (!bio)
 172			goto out_free_bio;
 173		bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
 174		r10_bio->devs[j].repl_bio = bio;
 175	}
 176	/*
 177	 * Allocate RESYNC_PAGES data pages and attach them
 178	 * where needed.
 179	 */
 180	for (j = 0; j < nalloc; j++) {
 181		struct bio *rbio = r10_bio->devs[j].repl_bio;
 182		struct resync_pages *rp, *rp_repl;
 183
 184		rp = &rps[j];
 185		if (rbio)
 186			rp_repl = &rps[nalloc + j];
 187
 188		bio = r10_bio->devs[j].bio;
 189
 190		if (!j || test_bit(MD_RECOVERY_SYNC,
 191				   &conf->mddev->recovery)) {
 192			if (resync_alloc_pages(rp, gfp_flags))
 
 
 
 
 
 
 
 193				goto out_free_pages;
 194		} else {
 195			memcpy(rp, &rps[0], sizeof(*rp));
 196			resync_get_all_pages(rp);
 197		}
 198
 199		rp->raid_bio = r10_bio;
 200		bio->bi_private = rp;
 201		if (rbio) {
 202			memcpy(rp_repl, rp, sizeof(*rp));
 203			rbio->bi_private = rp_repl;
 204		}
 205	}
 206
 207	return r10_bio;
 208
 209out_free_pages:
 210	while (--j >= 0)
 211		resync_free_pages(&rps[j]);
 212
 
 
 213	j = 0;
 214out_free_bio:
 215	for ( ; j < nalloc; j++) {
 216		if (r10_bio->devs[j].bio)
 217			bio_uninit(r10_bio->devs[j].bio);
 218		kfree(r10_bio->devs[j].bio);
 219		if (r10_bio->devs[j].repl_bio)
 220			bio_uninit(r10_bio->devs[j].repl_bio);
 221		kfree(r10_bio->devs[j].repl_bio);
 222	}
 223	kfree(rps);
 224out_free_r10bio:
 225	rbio_pool_free(r10_bio, conf);
 226	return NULL;
 227}
 228
 229static void r10buf_pool_free(void *__r10_bio, void *data)
 230{
 
 231	struct r10conf *conf = data;
 232	struct r10bio *r10bio = __r10_bio;
 233	int j;
 234	struct resync_pages *rp = NULL;
 235
 236	for (j = conf->copies; j--; ) {
 237		struct bio *bio = r10bio->devs[j].bio;
 238
 239		if (bio) {
 240			rp = get_resync_pages(bio);
 241			resync_free_pages(rp);
 242			bio_uninit(bio);
 243			kfree(bio);
 
 244		}
 245
 246		bio = r10bio->devs[j].repl_bio;
 247		if (bio) {
 248			bio_uninit(bio);
 249			kfree(bio);
 250		}
 251	}
 252
 253	/* resync pages array stored in the 1st bio's .bi_private */
 254	kfree(rp);
 255
 256	rbio_pool_free(r10bio, conf);
 257}
 258
 259static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 260{
 261	int i;
 262
 263	for (i = 0; i < conf->geo.raid_disks; i++) {
 264		struct bio **bio = & r10_bio->devs[i].bio;
 265		if (!BIO_SPECIAL(*bio))
 266			bio_put(*bio);
 267		*bio = NULL;
 268		bio = &r10_bio->devs[i].repl_bio;
 269		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
 270			bio_put(*bio);
 271		*bio = NULL;
 272	}
 273}
 274
 275static void free_r10bio(struct r10bio *r10_bio)
 276{
 277	struct r10conf *conf = r10_bio->mddev->private;
 278
 279	put_all_bios(conf, r10_bio);
 280	mempool_free(r10_bio, &conf->r10bio_pool);
 281}
 282
 283static void put_buf(struct r10bio *r10_bio)
 284{
 285	struct r10conf *conf = r10_bio->mddev->private;
 286
 287	mempool_free(r10_bio, &conf->r10buf_pool);
 288
 289	lower_barrier(conf);
 290}
 291
 292static void wake_up_barrier(struct r10conf *conf)
 293{
 294	if (wq_has_sleeper(&conf->wait_barrier))
 295		wake_up(&conf->wait_barrier);
 296}
 297
 298static void reschedule_retry(struct r10bio *r10_bio)
 299{
 300	unsigned long flags;
 301	struct mddev *mddev = r10_bio->mddev;
 302	struct r10conf *conf = mddev->private;
 303
 304	spin_lock_irqsave(&conf->device_lock, flags);
 305	list_add(&r10_bio->retry_list, &conf->retry_list);
 306	conf->nr_queued ++;
 307	spin_unlock_irqrestore(&conf->device_lock, flags);
 308
 309	/* wake up frozen array... */
 310	wake_up(&conf->wait_barrier);
 311
 312	md_wakeup_thread(mddev->thread);
 313}
 314
 315/*
 316 * raid_end_bio_io() is called when we have finished servicing a mirrored
 317 * operation and are ready to return a success/failure code to the buffer
 318 * cache layer.
 319 */
 320static void raid_end_bio_io(struct r10bio *r10_bio)
 321{
 322	struct bio *bio = r10_bio->master_bio;
 
 323	struct r10conf *conf = r10_bio->mddev->private;
 324
 
 
 
 
 
 
 
 
 325	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 326		bio->bi_status = BLK_STS_IOERR;
 327
 328	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
 329		bio_end_io_acct(bio, r10_bio->start_time);
 330	bio_endio(bio);
 331	/*
 332	 * Wake up any possible resync thread that waits for the device
 333	 * to go idle.
 334	 */
 335	allow_barrier(conf);
 336
 337	free_r10bio(r10_bio);
 338}
 339
 340/*
 341 * Update disk head position estimator based on IRQ completion info.
 342 */
 343static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 344{
 345	struct r10conf *conf = r10_bio->mddev->private;
 346
 347	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 348		r10_bio->devs[slot].addr + (r10_bio->sectors);
 349}
 350
 351/*
 352 * Find the disk number which triggered given bio
 353 */
 354static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 355			 struct bio *bio, int *slotp, int *replp)
 356{
 357	int slot;
 358	int repl = 0;
 359
 360	for (slot = 0; slot < conf->geo.raid_disks; slot++) {
 361		if (r10_bio->devs[slot].bio == bio)
 362			break;
 363		if (r10_bio->devs[slot].repl_bio == bio) {
 364			repl = 1;
 365			break;
 366		}
 367	}
 368
 
 369	update_head_pos(slot, r10_bio);
 370
 371	if (slotp)
 372		*slotp = slot;
 373	if (replp)
 374		*replp = repl;
 375	return r10_bio->devs[slot].devnum;
 376}
 377
 378static void raid10_end_read_request(struct bio *bio)
 379{
 380	int uptodate = !bio->bi_status;
 381	struct r10bio *r10_bio = bio->bi_private;
 382	int slot;
 383	struct md_rdev *rdev;
 384	struct r10conf *conf = r10_bio->mddev->private;
 385
 386	slot = r10_bio->read_slot;
 
 387	rdev = r10_bio->devs[slot].rdev;
 388	/*
 389	 * this branch is our 'one mirror IO has finished' event handler:
 390	 */
 391	update_head_pos(slot, r10_bio);
 392
 393	if (uptodate) {
 394		/*
 395		 * Set R10BIO_Uptodate in our master bio, so that
 396		 * we will return a good error code to the higher
 397		 * levels even if IO on some other mirrored buffer fails.
 398		 *
 399		 * The 'master' represents the composite IO operation to
 400		 * user-side. So if something waits for IO, then it will
 401		 * wait for the 'master' bio.
 402		 */
 403		set_bit(R10BIO_Uptodate, &r10_bio->state);
 404	} else {
 405		/* If all other devices that store this block have
 406		 * failed, we want to return the error upwards rather
 407		 * than fail the last device.  Here we redefine
 408		 * "uptodate" to mean "Don't want to retry"
 409		 */
 410		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
 411			     rdev->raid_disk))
 412			uptodate = 1;
 413	}
 414	if (uptodate) {
 415		raid_end_bio_io(r10_bio);
 416		rdev_dec_pending(rdev, conf->mddev);
 417	} else {
 418		/*
 419		 * oops, read error - keep the refcount on the rdev
 420		 */
 421		pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
 
 
 422				   mdname(conf->mddev),
 423				   rdev->bdev,
 424				   (unsigned long long)r10_bio->sector);
 425		set_bit(R10BIO_ReadError, &r10_bio->state);
 426		reschedule_retry(r10_bio);
 427	}
 428}
 429
 430static void close_write(struct r10bio *r10_bio)
 431{
 432	/* clear the bitmap if all writes complete successfully */
 433	md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 434			   r10_bio->sectors,
 435			   !test_bit(R10BIO_Degraded, &r10_bio->state),
 436			   0);
 437	md_write_end(r10_bio->mddev);
 438}
 439
 440static void one_write_done(struct r10bio *r10_bio)
 441{
 442	if (atomic_dec_and_test(&r10_bio->remaining)) {
 443		if (test_bit(R10BIO_WriteError, &r10_bio->state))
 444			reschedule_retry(r10_bio);
 445		else {
 446			close_write(r10_bio);
 447			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 448				reschedule_retry(r10_bio);
 449			else
 450				raid_end_bio_io(r10_bio);
 451		}
 452	}
 453}
 454
 455static void raid10_end_write_request(struct bio *bio)
 456{
 457	struct r10bio *r10_bio = bio->bi_private;
 458	int dev;
 459	int dec_rdev = 1;
 460	struct r10conf *conf = r10_bio->mddev->private;
 461	int slot, repl;
 462	struct md_rdev *rdev = NULL;
 463	struct bio *to_put = NULL;
 464	bool discard_error;
 465
 466	discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 467
 468	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 469
 470	if (repl)
 471		rdev = conf->mirrors[dev].replacement;
 472	if (!rdev) {
 473		smp_rmb();
 474		repl = 0;
 475		rdev = conf->mirrors[dev].rdev;
 476	}
 477	/*
 478	 * this branch is our 'one mirror IO has finished' event handler:
 479	 */
 480	if (bio->bi_status && !discard_error) {
 481		if (repl)
 482			/* Never record new bad blocks to replacement,
 483			 * just fail it.
 484			 */
 485			md_error(rdev->mddev, rdev);
 486		else {
 487			set_bit(WriteErrorSeen,	&rdev->flags);
 488			if (!test_and_set_bit(WantReplacement, &rdev->flags))
 489				set_bit(MD_RECOVERY_NEEDED,
 490					&rdev->mddev->recovery);
 491
 492			dec_rdev = 0;
 493			if (test_bit(FailFast, &rdev->flags) &&
 494			    (bio->bi_opf & MD_FAILFAST)) {
 495				md_error(rdev->mddev, rdev);
 496			}
 497
 498			/*
 499			 * When the device is faulty, it is not necessary to
 500			 * handle write error.
 501			 */
 502			if (!test_bit(Faulty, &rdev->flags))
 503				set_bit(R10BIO_WriteError, &r10_bio->state);
 504			else {
 505				/* Fail the request */
 506				set_bit(R10BIO_Degraded, &r10_bio->state);
 507				r10_bio->devs[slot].bio = NULL;
 508				to_put = bio;
 509				dec_rdev = 1;
 510			}
 511		}
 512	} else {
 513		/*
 514		 * Set R10BIO_Uptodate in our master bio, so that
 515		 * we will return a good error code for to the higher
 516		 * levels even if IO on some other mirrored buffer fails.
 517		 *
 518		 * The 'master' represents the composite IO operation to
 519		 * user-side. So if something waits for IO, then it will
 520		 * wait for the 'master' bio.
 521		 */
 522		sector_t first_bad;
 523		int bad_sectors;
 524
 525		/*
 526		 * Do not set R10BIO_Uptodate if the current device is
 527		 * rebuilding or Faulty. This is because we cannot use
 528		 * such device for properly reading the data back (we could
 529		 * potentially use it, if the current write would have felt
 530		 * before rdev->recovery_offset, but for simplicity we don't
 531		 * check this here.
 532		 */
 533		if (test_bit(In_sync, &rdev->flags) &&
 534		    !test_bit(Faulty, &rdev->flags))
 535			set_bit(R10BIO_Uptodate, &r10_bio->state);
 536
 537		/* Maybe we can clear some bad blocks. */
 538		if (is_badblock(rdev,
 539				r10_bio->devs[slot].addr,
 540				r10_bio->sectors,
 541				&first_bad, &bad_sectors) && !discard_error) {
 542			bio_put(bio);
 543			if (repl)
 544				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
 545			else
 546				r10_bio->devs[slot].bio = IO_MADE_GOOD;
 547			dec_rdev = 0;
 548			set_bit(R10BIO_MadeGood, &r10_bio->state);
 549		}
 550	}
 551
 552	/*
 553	 *
 554	 * Let's see if all mirrored write operations have finished
 555	 * already.
 556	 */
 557	one_write_done(r10_bio);
 558	if (dec_rdev)
 559		rdev_dec_pending(rdev, conf->mddev);
 560	if (to_put)
 561		bio_put(to_put);
 562}
 563
 564/*
 565 * RAID10 layout manager
 566 * As well as the chunksize and raid_disks count, there are two
 567 * parameters: near_copies and far_copies.
 568 * near_copies * far_copies must be <= raid_disks.
 569 * Normally one of these will be 1.
 570 * If both are 1, we get raid0.
 571 * If near_copies == raid_disks, we get raid1.
 572 *
 573 * Chunks are laid out in raid0 style with near_copies copies of the
 574 * first chunk, followed by near_copies copies of the next chunk and
 575 * so on.
 576 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 577 * as described above, we start again with a device offset of near_copies.
 578 * So we effectively have another copy of the whole array further down all
 579 * the drives, but with blocks on different drives.
 580 * With this layout, and block is never stored twice on the one device.
 581 *
 582 * raid10_find_phys finds the sector offset of a given virtual sector
 583 * on each device that it is on.
 584 *
 585 * raid10_find_virt does the reverse mapping, from a device and a
 586 * sector offset to a virtual address
 587 */
 588
 589static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
 590{
 591	int n,f;
 592	sector_t sector;
 593	sector_t chunk;
 594	sector_t stripe;
 595	int dev;
 596	int slot = 0;
 597	int last_far_set_start, last_far_set_size;
 598
 599	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 600	last_far_set_start *= geo->far_set_size;
 601
 602	last_far_set_size = geo->far_set_size;
 603	last_far_set_size += (geo->raid_disks % geo->far_set_size);
 604
 605	/* now calculate first sector/dev */
 606	chunk = r10bio->sector >> geo->chunk_shift;
 607	sector = r10bio->sector & geo->chunk_mask;
 608
 609	chunk *= geo->near_copies;
 610	stripe = chunk;
 611	dev = sector_div(stripe, geo->raid_disks);
 612	if (geo->far_offset)
 613		stripe *= geo->far_copies;
 614
 615	sector += stripe << geo->chunk_shift;
 616
 617	/* and calculate all the others */
 618	for (n = 0; n < geo->near_copies; n++) {
 619		int d = dev;
 620		int set;
 621		sector_t s = sector;
 622		r10bio->devs[slot].devnum = d;
 623		r10bio->devs[slot].addr = s;
 624		slot++;
 625
 626		for (f = 1; f < geo->far_copies; f++) {
 627			set = d / geo->far_set_size;
 628			d += geo->near_copies;
 629
 630			if ((geo->raid_disks % geo->far_set_size) &&
 631			    (d > last_far_set_start)) {
 632				d -= last_far_set_start;
 633				d %= last_far_set_size;
 634				d += last_far_set_start;
 635			} else {
 636				d %= geo->far_set_size;
 637				d += geo->far_set_size * set;
 638			}
 639			s += geo->stride;
 640			r10bio->devs[slot].devnum = d;
 641			r10bio->devs[slot].addr = s;
 642			slot++;
 643		}
 644		dev++;
 645		if (dev >= geo->raid_disks) {
 646			dev = 0;
 647			sector += (geo->chunk_mask + 1);
 648		}
 649	}
 650}
 651
 652static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 653{
 654	struct geom *geo = &conf->geo;
 655
 656	if (conf->reshape_progress != MaxSector &&
 657	    ((r10bio->sector >= conf->reshape_progress) !=
 658	     conf->mddev->reshape_backwards)) {
 659		set_bit(R10BIO_Previous, &r10bio->state);
 660		geo = &conf->prev;
 661	} else
 662		clear_bit(R10BIO_Previous, &r10bio->state);
 663
 664	__raid10_find_phys(geo, r10bio);
 665}
 666
 667static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 668{
 669	sector_t offset, chunk, vchunk;
 670	/* Never use conf->prev as this is only called during resync
 671	 * or recovery, so reshape isn't happening
 672	 */
 673	struct geom *geo = &conf->geo;
 674	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
 675	int far_set_size = geo->far_set_size;
 676	int last_far_set_start;
 677
 678	if (geo->raid_disks % geo->far_set_size) {
 679		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 680		last_far_set_start *= geo->far_set_size;
 681
 682		if (dev >= last_far_set_start) {
 683			far_set_size = geo->far_set_size;
 684			far_set_size += (geo->raid_disks % geo->far_set_size);
 685			far_set_start = last_far_set_start;
 686		}
 687	}
 688
 689	offset = sector & geo->chunk_mask;
 690	if (geo->far_offset) {
 691		int fc;
 692		chunk = sector >> geo->chunk_shift;
 693		fc = sector_div(chunk, geo->far_copies);
 694		dev -= fc * geo->near_copies;
 695		if (dev < far_set_start)
 696			dev += far_set_size;
 697	} else {
 698		while (sector >= geo->stride) {
 699			sector -= geo->stride;
 700			if (dev < (geo->near_copies + far_set_start))
 701				dev += far_set_size - geo->near_copies;
 702			else
 703				dev -= geo->near_copies;
 704		}
 705		chunk = sector >> geo->chunk_shift;
 706	}
 707	vchunk = chunk * geo->raid_disks + dev;
 708	sector_div(vchunk, geo->near_copies);
 709	return (vchunk << geo->chunk_shift) + offset;
 710}
 711
 712/*
 713 * This routine returns the disk from which the requested read should
 714 * be done. There is a per-array 'next expected sequential IO' sector
 715 * number - if this matches on the next IO then we use the last disk.
 716 * There is also a per-disk 'last know head position' sector that is
 717 * maintained from IRQ contexts, both the normal and the resync IO
 718 * completion handlers update this position correctly. If there is no
 719 * perfect sequential match then we pick the disk whose head is closest.
 720 *
 721 * If there are 2 mirrors in the same 2 devices, performance degrades
 722 * because position is mirror, not device based.
 723 *
 724 * The rdev for the device selected will have nr_pending incremented.
 725 */
 726
 727/*
 728 * FIXME: possibly should rethink readbalancing and do it differently
 729 * depending on near_copies / far_copies geometry.
 730 */
 731static struct md_rdev *read_balance(struct r10conf *conf,
 732				    struct r10bio *r10_bio,
 733				    int *max_sectors)
 734{
 735	const sector_t this_sector = r10_bio->sector;
 736	int disk, slot;
 737	int sectors = r10_bio->sectors;
 738	int best_good_sectors;
 739	sector_t new_distance, best_dist;
 740	struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
 741	int do_balance;
 742	int best_dist_slot, best_pending_slot;
 743	bool has_nonrot_disk = false;
 744	unsigned int min_pending;
 745	struct geom *geo = &conf->geo;
 746
 747	raid10_find_phys(conf, r10_bio);
 748	rcu_read_lock();
 749	best_dist_slot = -1;
 750	min_pending = UINT_MAX;
 751	best_dist_rdev = NULL;
 752	best_pending_rdev = NULL;
 753	best_dist = MaxSector;
 754	best_good_sectors = 0;
 755	do_balance = 1;
 756	clear_bit(R10BIO_FailFast, &r10_bio->state);
 757	/*
 758	 * Check if we can balance. We can balance on the whole
 759	 * device if no resync is going on (recovery is ok), or below
 760	 * the resync window. We take the first readable disk when
 761	 * above the resync window.
 762	 */
 763	if ((conf->mddev->recovery_cp < MaxSector
 764	     && (this_sector + sectors >= conf->next_resync)) ||
 765	    (mddev_is_clustered(conf->mddev) &&
 766	     md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
 767					    this_sector + sectors)))
 768		do_balance = 0;
 769
 770	for (slot = 0; slot < conf->copies ; slot++) {
 771		sector_t first_bad;
 772		int bad_sectors;
 773		sector_t dev_sector;
 774		unsigned int pending;
 775		bool nonrot;
 776
 777		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 778			continue;
 779		disk = r10_bio->devs[slot].devnum;
 780		rdev = rcu_dereference(conf->mirrors[disk].replacement);
 781		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
 782		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 783			rdev = rcu_dereference(conf->mirrors[disk].rdev);
 784		if (rdev == NULL ||
 785		    test_bit(Faulty, &rdev->flags))
 786			continue;
 787		if (!test_bit(In_sync, &rdev->flags) &&
 788		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 789			continue;
 790
 791		dev_sector = r10_bio->devs[slot].addr;
 792		if (is_badblock(rdev, dev_sector, sectors,
 793				&first_bad, &bad_sectors)) {
 794			if (best_dist < MaxSector)
 795				/* Already have a better slot */
 796				continue;
 797			if (first_bad <= dev_sector) {
 798				/* Cannot read here.  If this is the
 799				 * 'primary' device, then we must not read
 800				 * beyond 'bad_sectors' from another device.
 801				 */
 802				bad_sectors -= (dev_sector - first_bad);
 803				if (!do_balance && sectors > bad_sectors)
 804					sectors = bad_sectors;
 805				if (best_good_sectors > sectors)
 806					best_good_sectors = sectors;
 807			} else {
 808				sector_t good_sectors =
 809					first_bad - dev_sector;
 810				if (good_sectors > best_good_sectors) {
 811					best_good_sectors = good_sectors;
 812					best_dist_slot = slot;
 813					best_dist_rdev = rdev;
 814				}
 815				if (!do_balance)
 816					/* Must read from here */
 817					break;
 818			}
 819			continue;
 820		} else
 821			best_good_sectors = sectors;
 822
 823		if (!do_balance)
 824			break;
 825
 826		nonrot = bdev_nonrot(rdev->bdev);
 827		has_nonrot_disk |= nonrot;
 828		pending = atomic_read(&rdev->nr_pending);
 829		if (min_pending > pending && nonrot) {
 830			min_pending = pending;
 831			best_pending_slot = slot;
 832			best_pending_rdev = rdev;
 833		}
 834
 835		if (best_dist_slot >= 0)
 836			/* At least 2 disks to choose from so failfast is OK */
 837			set_bit(R10BIO_FailFast, &r10_bio->state);
 838		/* This optimisation is debatable, and completely destroys
 839		 * sequential read speed for 'far copies' arrays.  So only
 840		 * keep it for 'near' arrays, and review those later.
 841		 */
 842		if (geo->near_copies > 1 && !pending)
 843			new_distance = 0;
 844
 845		/* for far > 1 always use the lowest address */
 846		else if (geo->far_copies > 1)
 847			new_distance = r10_bio->devs[slot].addr;
 848		else
 849			new_distance = abs(r10_bio->devs[slot].addr -
 850					   conf->mirrors[disk].head_position);
 851
 852		if (new_distance < best_dist) {
 853			best_dist = new_distance;
 854			best_dist_slot = slot;
 855			best_dist_rdev = rdev;
 856		}
 857	}
 858	if (slot >= conf->copies) {
 859		if (has_nonrot_disk) {
 860			slot = best_pending_slot;
 861			rdev = best_pending_rdev;
 862		} else {
 863			slot = best_dist_slot;
 864			rdev = best_dist_rdev;
 865		}
 866	}
 867
 868	if (slot >= 0) {
 869		atomic_inc(&rdev->nr_pending);
 
 
 
 
 
 
 
 870		r10_bio->read_slot = slot;
 871	} else
 872		rdev = NULL;
 873	rcu_read_unlock();
 874	*max_sectors = best_good_sectors;
 875
 876	return rdev;
 877}
 878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879static void flush_pending_writes(struct r10conf *conf)
 880{
 881	/* Any writes that have been queued but are awaiting
 882	 * bitmap updates get flushed here.
 883	 */
 884	spin_lock_irq(&conf->device_lock);
 885
 886	if (conf->pending_bio_list.head) {
 887		struct blk_plug plug;
 888		struct bio *bio;
 889
 890		bio = bio_list_get(&conf->pending_bio_list);
 
 891		spin_unlock_irq(&conf->device_lock);
 892
 893		/*
 894		 * As this is called in a wait_event() loop (see freeze_array),
 895		 * current->state might be TASK_UNINTERRUPTIBLE which will
 896		 * cause a warning when we prepare to wait again.  As it is
 897		 * rare that this path is taken, it is perfectly safe to force
 898		 * us to go around the wait_event() loop again, so the warning
 899		 * is a false-positive. Silence the warning by resetting
 900		 * thread state
 901		 */
 902		__set_current_state(TASK_RUNNING);
 903
 904		blk_start_plug(&plug);
 905		/* flush any pending bitmap writes to disk
 906		 * before proceeding w/ I/O */
 907		md_bitmap_unplug(conf->mddev->bitmap);
 908		wake_up(&conf->wait_barrier);
 909
 910		while (bio) { /* submit pending writes */
 911			struct bio *next = bio->bi_next;
 912			struct md_rdev *rdev = (void*)bio->bi_bdev;
 913			bio->bi_next = NULL;
 914			bio_set_dev(bio, rdev->bdev);
 915			if (test_bit(Faulty, &rdev->flags)) {
 916				bio_io_error(bio);
 917			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
 918					    !bdev_max_discard_sectors(bio->bi_bdev)))
 919				/* Just ignore it */
 920				bio_endio(bio);
 921			else
 922				submit_bio_noacct(bio);
 923			bio = next;
 924		}
 925		blk_finish_plug(&plug);
 926	} else
 927		spin_unlock_irq(&conf->device_lock);
 928}
 929
 930/* Barriers....
 931 * Sometimes we need to suspend IO while we do something else,
 932 * either some resync/recovery, or reconfigure the array.
 933 * To do this we raise a 'barrier'.
 934 * The 'barrier' is a counter that can be raised multiple times
 935 * to count how many activities are happening which preclude
 936 * normal IO.
 937 * We can only raise the barrier if there is no pending IO.
 938 * i.e. if nr_pending == 0.
 939 * We choose only to raise the barrier if no-one is waiting for the
 940 * barrier to go down.  This means that as soon as an IO request
 941 * is ready, no other operations which require a barrier will start
 942 * until the IO request has had a chance.
 943 *
 944 * So: regular IO calls 'wait_barrier'.  When that returns there
 945 *    is no backgroup IO happening,  It must arrange to call
 946 *    allow_barrier when it has finished its IO.
 947 * backgroup IO calls must call raise_barrier.  Once that returns
 948 *    there is no normal IO happeing.  It must arrange to call
 949 *    lower_barrier when the particular background IO completes.
 950 */
 951
 952static void raise_barrier(struct r10conf *conf, int force)
 953{
 954	write_seqlock_irq(&conf->resync_lock);
 955	BUG_ON(force && !conf->barrier);
 
 956
 957	/* Wait until no block IO is waiting (unless 'force') */
 958	wait_event_barrier(conf, force || !conf->nr_waiting);
 
 959
 960	/* block any new IO from starting */
 961	WRITE_ONCE(conf->barrier, conf->barrier + 1);
 962
 963	/* Now wait for all pending IO to complete */
 964	wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
 965				 conf->barrier < RESYNC_DEPTH);
 
 966
 967	write_sequnlock_irq(&conf->resync_lock);
 968}
 969
 970static void lower_barrier(struct r10conf *conf)
 971{
 972	unsigned long flags;
 973
 974	write_seqlock_irqsave(&conf->resync_lock, flags);
 975	WRITE_ONCE(conf->barrier, conf->barrier - 1);
 976	write_sequnlock_irqrestore(&conf->resync_lock, flags);
 977	wake_up(&conf->wait_barrier);
 978}
 979
 980static bool stop_waiting_barrier(struct r10conf *conf)
 981{
 982	struct bio_list *bio_list = current->bio_list;
 983
 984	/* barrier is dropped */
 985	if (!conf->barrier)
 986		return true;
 987
 988	/*
 989	 * If there are already pending requests (preventing the barrier from
 990	 * rising completely), and the pre-process bio queue isn't empty, then
 991	 * don't wait, as we need to empty that queue to get the nr_pending
 992	 * count down.
 993	 */
 994	if (atomic_read(&conf->nr_pending) && bio_list &&
 995	    (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
 996		return true;
 997
 998	/* move on if recovery thread is blocked by us */
 999	if (conf->mddev->thread->tsk == current &&
1000	    test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) &&
1001	    conf->nr_queued > 0)
1002		return true;
1003
1004	return false;
1005}
1006
1007static bool wait_barrier_nolock(struct r10conf *conf)
1008{
1009	unsigned int seq = read_seqbegin(&conf->resync_lock);
1010
1011	if (READ_ONCE(conf->barrier))
1012		return false;
1013
1014	atomic_inc(&conf->nr_pending);
1015	if (!read_seqretry(&conf->resync_lock, seq))
1016		return true;
1017
1018	if (atomic_dec_and_test(&conf->nr_pending))
1019		wake_up_barrier(conf);
1020
1021	return false;
1022}
1023
1024static bool wait_barrier(struct r10conf *conf, bool nowait)
1025{
1026	bool ret = true;
1027
1028	if (wait_barrier_nolock(conf))
1029		return true;
1030
1031	write_seqlock_irq(&conf->resync_lock);
1032	if (conf->barrier) {
1033		/* Return false when nowait flag is set */
1034		if (nowait) {
1035			ret = false;
1036		} else {
1037			conf->nr_waiting++;
1038			raid10_log(conf->mddev, "wait barrier");
1039			wait_event_barrier(conf, stop_waiting_barrier(conf));
1040			conf->nr_waiting--;
1041		}
1042		if (!conf->nr_waiting)
1043			wake_up(&conf->wait_barrier);
1044	}
1045	/* Only increment nr_pending when we wait */
1046	if (ret)
1047		atomic_inc(&conf->nr_pending);
1048	write_sequnlock_irq(&conf->resync_lock);
1049	return ret;
 
 
 
1050}
1051
1052static void allow_barrier(struct r10conf *conf)
1053{
1054	if ((atomic_dec_and_test(&conf->nr_pending)) ||
1055			(conf->array_freeze_pending))
1056		wake_up_barrier(conf);
 
 
1057}
1058
1059static void freeze_array(struct r10conf *conf, int extra)
1060{
1061	/* stop syncio and normal IO and wait for everything to
1062	 * go quiet.
1063	 * We increment barrier and nr_waiting, and then
1064	 * wait until nr_pending match nr_queued+extra
1065	 * This is called in the context of one normal IO request
1066	 * that has failed. Thus any sync request that might be pending
1067	 * will be blocked by nr_pending, and we need to wait for
1068	 * pending IO requests to complete or be queued for re-try.
1069	 * Thus the number queued (nr_queued) plus this request (extra)
1070	 * must match the number of pending IOs (nr_pending) before
1071	 * we continue.
1072	 */
1073	write_seqlock_irq(&conf->resync_lock);
1074	conf->array_freeze_pending++;
1075	WRITE_ONCE(conf->barrier, conf->barrier + 1);
1076	conf->nr_waiting++;
1077	wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1078			conf->nr_queued + extra, flush_pending_writes(conf));
1079	conf->array_freeze_pending--;
1080	write_sequnlock_irq(&conf->resync_lock);
 
 
1081}
1082
1083static void unfreeze_array(struct r10conf *conf)
1084{
1085	/* reverse the effect of the freeze */
1086	write_seqlock_irq(&conf->resync_lock);
1087	WRITE_ONCE(conf->barrier, conf->barrier - 1);
1088	conf->nr_waiting--;
1089	wake_up(&conf->wait_barrier);
1090	write_sequnlock_irq(&conf->resync_lock);
1091}
1092
1093static sector_t choose_data_offset(struct r10bio *r10_bio,
1094				   struct md_rdev *rdev)
1095{
1096	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1097	    test_bit(R10BIO_Previous, &r10_bio->state))
1098		return rdev->data_offset;
1099	else
1100		return rdev->new_data_offset;
1101}
1102
 
 
 
 
 
 
1103static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1104{
1105	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, cb);
 
1106	struct mddev *mddev = plug->cb.data;
1107	struct r10conf *conf = mddev->private;
1108	struct bio *bio;
1109
1110	if (from_schedule || current->bio_list) {
1111		spin_lock_irq(&conf->device_lock);
1112		bio_list_merge(&conf->pending_bio_list, &plug->pending);
 
1113		spin_unlock_irq(&conf->device_lock);
1114		wake_up(&conf->wait_barrier);
1115		md_wakeup_thread(mddev->thread);
1116		kfree(plug);
1117		return;
1118	}
1119
1120	/* we aren't scheduling, so we can do the write-out directly. */
1121	bio = bio_list_get(&plug->pending);
1122	md_bitmap_unplug(mddev->bitmap);
1123	wake_up(&conf->wait_barrier);
1124
1125	while (bio) { /* submit pending writes */
1126		struct bio *next = bio->bi_next;
1127		struct md_rdev *rdev = (void*)bio->bi_bdev;
1128		bio->bi_next = NULL;
1129		bio_set_dev(bio, rdev->bdev);
1130		if (test_bit(Faulty, &rdev->flags)) {
1131			bio_io_error(bio);
1132		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
1133				    !bdev_max_discard_sectors(bio->bi_bdev)))
1134			/* Just ignore it */
1135			bio_endio(bio);
1136		else
1137			submit_bio_noacct(bio);
1138		bio = next;
1139	}
1140	kfree(plug);
1141}
1142
1143/*
1144 * 1. Register the new request and wait if the reconstruction thread has put
1145 * up a bar for new requests. Continue immediately if no resync is active
1146 * currently.
1147 * 2. If IO spans the reshape position.  Need to wait for reshape to pass.
1148 */
1149static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1150				 struct bio *bio, sector_t sectors)
1151{
1152	/* Bail out if REQ_NOWAIT is set for the bio */
1153	if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1154		bio_wouldblock_error(bio);
1155		return false;
1156	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1158	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1159	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
 
 
 
1160		allow_barrier(conf);
1161		if (bio->bi_opf & REQ_NOWAIT) {
1162			bio_wouldblock_error(bio);
1163			return false;
1164		}
1165		raid10_log(conf->mddev, "wait reshape");
1166		wait_event(conf->wait_barrier,
1167			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1168			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1169			   sectors);
1170		wait_barrier(conf, false);
1171	}
1172	return true;
1173}
1174
1175static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1176				struct r10bio *r10_bio)
1177{
1178	struct r10conf *conf = mddev->private;
1179	struct bio *read_bio;
1180	const enum req_op op = bio_op(bio);
1181	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1182	int max_sectors;
1183	struct md_rdev *rdev;
1184	char b[BDEVNAME_SIZE];
1185	int slot = r10_bio->read_slot;
1186	struct md_rdev *err_rdev = NULL;
1187	gfp_t gfp = GFP_NOIO;
1188
1189	if (slot >= 0 && r10_bio->devs[slot].rdev) {
1190		/*
1191		 * This is an error retry, but we cannot
1192		 * safely dereference the rdev in the r10_bio,
1193		 * we must use the one in conf.
1194		 * If it has already been disconnected (unlikely)
1195		 * we lose the device name in error messages.
1196		 */
1197		int disk;
1198		/*
1199		 * As we are blocking raid10, it is a little safer to
1200		 * use __GFP_HIGH.
1201		 */
1202		gfp = GFP_NOIO | __GFP_HIGH;
1203
1204		rcu_read_lock();
1205		disk = r10_bio->devs[slot].devnum;
1206		err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1207		if (err_rdev)
1208			snprintf(b, sizeof(b), "%pg", err_rdev->bdev);
1209		else {
1210			strcpy(b, "???");
1211			/* This never gets dereferenced */
1212			err_rdev = r10_bio->devs[slot].rdev;
1213		}
1214		rcu_read_unlock();
1215	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1216
1217	if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1218		return;
1219	rdev = read_balance(conf, r10_bio, &max_sectors);
1220	if (!rdev) {
1221		if (err_rdev) {
1222			pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
1223					    mdname(mddev), b,
1224					    (unsigned long long)r10_bio->sector);
1225		}
1226		raid_end_bio_io(r10_bio);
1227		return;
1228	}
1229	if (err_rdev)
1230		pr_err_ratelimited("md/raid10:%s: %pg: redirecting sector %llu to another mirror\n",
1231				   mdname(mddev),
1232				   rdev->bdev,
1233				   (unsigned long long)r10_bio->sector);
1234	if (max_sectors < bio_sectors(bio)) {
1235		struct bio *split = bio_split(bio, max_sectors,
1236					      gfp, &conf->bio_split);
1237		bio_chain(split, bio);
1238		allow_barrier(conf);
1239		submit_bio_noacct(bio);
1240		wait_barrier(conf, false);
1241		bio = split;
1242		r10_bio->master_bio = bio;
1243		r10_bio->sectors = max_sectors;
1244	}
1245	slot = r10_bio->read_slot;
1246
1247	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1248		r10_bio->start_time = bio_start_io_acct(bio);
1249	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1250
1251	r10_bio->devs[slot].bio = read_bio;
1252	r10_bio->devs[slot].rdev = rdev;
1253
1254	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1255		choose_data_offset(r10_bio, rdev);
1256	read_bio->bi_end_io = raid10_end_read_request;
1257	read_bio->bi_opf = op | do_sync;
1258	if (test_bit(FailFast, &rdev->flags) &&
1259	    test_bit(R10BIO_FailFast, &r10_bio->state))
1260	        read_bio->bi_opf |= MD_FAILFAST;
1261	read_bio->bi_private = r10_bio;
1262
1263	if (mddev->gendisk)
1264	        trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1265	                              r10_bio->sector);
1266	submit_bio_noacct(read_bio);
1267	return;
1268}
1269
1270static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1271				  struct bio *bio, bool replacement,
1272				  int n_copy)
1273{
1274	const enum req_op op = bio_op(bio);
1275	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1276	const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
1277	unsigned long flags;
1278	struct blk_plug_cb *cb;
1279	struct raid1_plug_cb *plug = NULL;
1280	struct r10conf *conf = mddev->private;
1281	struct md_rdev *rdev;
1282	int devnum = r10_bio->devs[n_copy].devnum;
1283	struct bio *mbio;
1284
1285	if (replacement) {
1286		rdev = conf->mirrors[devnum].replacement;
1287		if (rdev == NULL) {
1288			/* Replacement just got moved to main 'rdev' */
1289			smp_mb();
1290			rdev = conf->mirrors[devnum].rdev;
1291		}
1292	} else
1293		rdev = conf->mirrors[devnum].rdev;
1294
1295	mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1296	if (replacement)
1297		r10_bio->devs[n_copy].repl_bio = mbio;
1298	else
1299		r10_bio->devs[n_copy].bio = mbio;
1300
1301	mbio->bi_iter.bi_sector	= (r10_bio->devs[n_copy].addr +
1302				   choose_data_offset(r10_bio, rdev));
1303	mbio->bi_end_io	= raid10_end_write_request;
1304	mbio->bi_opf = op | do_sync | do_fua;
1305	if (!replacement && test_bit(FailFast,
1306				     &conf->mirrors[devnum].rdev->flags)
1307			 && enough(conf, devnum))
1308		mbio->bi_opf |= MD_FAILFAST;
1309	mbio->bi_private = r10_bio;
1310
1311	if (conf->mddev->gendisk)
1312		trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
1313				      r10_bio->sector);
1314	/* flush_pending_writes() needs access to the rdev so...*/
1315	mbio->bi_bdev = (void *)rdev;
1316
1317	atomic_inc(&r10_bio->remaining);
 
 
1318
1319	cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1320	if (cb)
1321		plug = container_of(cb, struct raid1_plug_cb, cb);
1322	else
1323		plug = NULL;
1324	if (plug) {
1325		bio_list_add(&plug->pending, mbio);
1326	} else {
1327		spin_lock_irqsave(&conf->device_lock, flags);
1328		bio_list_add(&conf->pending_bio_list, mbio);
1329		spin_unlock_irqrestore(&conf->device_lock, flags);
1330		md_wakeup_thread(mddev->thread);
1331	}
1332}
1333
1334static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1335{
1336	int i;
1337	struct r10conf *conf = mddev->private;
1338	struct md_rdev *blocked_rdev;
 
1339
1340retry_wait:
1341	blocked_rdev = NULL;
1342	rcu_read_lock();
1343	for (i = 0; i < conf->copies; i++) {
1344		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1345		struct md_rdev *rrdev = rcu_dereference(
1346			conf->mirrors[i].replacement);
1347		if (rdev == rrdev)
1348			rrdev = NULL;
1349		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1350			atomic_inc(&rdev->nr_pending);
1351			blocked_rdev = rdev;
1352			break;
1353		}
1354		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1355			atomic_inc(&rrdev->nr_pending);
1356			blocked_rdev = rrdev;
1357			break;
1358		}
 
1359
1360		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1361			sector_t first_bad;
1362			sector_t dev_sector = r10_bio->devs[i].addr;
1363			int bad_sectors;
1364			int is_bad;
1365
1366			/*
1367			 * Discard request doesn't care the write result
1368			 * so it doesn't need to wait blocked disk here.
 
 
 
 
 
 
 
 
1369			 */
1370			if (!r10_bio->sectors)
1371				continue;
1372
1373			is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
1374					     &first_bad, &bad_sectors);
1375			if (is_bad < 0) {
1376				/*
1377				 * Mustn't write here until the bad block
1378				 * is acknowledged
1379				 */
1380				atomic_inc(&rdev->nr_pending);
1381				set_bit(BlockedBadBlocks, &rdev->flags);
1382				blocked_rdev = rdev;
1383				break;
1384			}
1385		}
1386	}
1387	rcu_read_unlock();
1388
1389	if (unlikely(blocked_rdev)) {
1390		/* Have to wait for this device to get unblocked, then retry */
1391		allow_barrier(conf);
1392		raid10_log(conf->mddev, "%s wait rdev %d blocked",
1393				__func__, blocked_rdev->raid_disk);
1394		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1395		wait_barrier(conf, false);
1396		goto retry_wait;
1397	}
1398}
1399
1400static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1401				 struct r10bio *r10_bio)
1402{
1403	struct r10conf *conf = mddev->private;
1404	int i;
1405	sector_t sectors;
1406	int max_sectors;
1407
1408	if ((mddev_is_clustered(mddev) &&
1409	     md_cluster_ops->area_resyncing(mddev, WRITE,
1410					    bio->bi_iter.bi_sector,
1411					    bio_end_sector(bio)))) {
1412		DEFINE_WAIT(w);
1413		/* Bail out if REQ_NOWAIT is set for the bio */
1414		if (bio->bi_opf & REQ_NOWAIT) {
1415			bio_wouldblock_error(bio);
1416			return;
1417		}
1418		for (;;) {
1419			prepare_to_wait(&conf->wait_barrier,
1420					&w, TASK_IDLE);
1421			if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1422				 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1423				break;
1424			schedule();
1425		}
1426		finish_wait(&conf->wait_barrier, &w);
1427	}
1428
1429	sectors = r10_bio->sectors;
1430	if (!regular_request_wait(mddev, conf, bio, sectors))
1431		return;
1432	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1433	    (mddev->reshape_backwards
1434	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1435		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1436	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1437		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1438		/* Need to update reshape_position in metadata */
1439		mddev->reshape_position = conf->reshape_progress;
1440		set_mask_bits(&mddev->sb_flags, 0,
1441			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1442		md_wakeup_thread(mddev->thread);
1443		if (bio->bi_opf & REQ_NOWAIT) {
1444			allow_barrier(conf);
1445			bio_wouldblock_error(bio);
1446			return;
1447		}
1448		raid10_log(conf->mddev, "wait reshape metadata");
1449		wait_event(mddev->sb_wait,
1450			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1451
1452		conf->reshape_safe = mddev->reshape_position;
1453	}
1454
1455	/* first select target devices under rcu_lock and
1456	 * inc refcount on their rdev.  Record them by setting
1457	 * bios[x] to bio
1458	 * If there are known/acknowledged bad blocks on any device
1459	 * on which we have seen a write error, we want to avoid
1460	 * writing to those blocks.  This potentially requires several
1461	 * writes to write around the bad blocks.  Each set of writes
1462	 * gets its own r10_bio with a set of bios attached.
 
 
1463	 */
1464
1465	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1466	raid10_find_phys(conf, r10_bio);
1467
1468	wait_blocked_dev(mddev, r10_bio);
1469
1470	rcu_read_lock();
1471	max_sectors = r10_bio->sectors;
1472
1473	for (i = 0;  i < conf->copies; i++) {
1474		int d = r10_bio->devs[i].devnum;
1475		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1476		struct md_rdev *rrdev = rcu_dereference(
1477			conf->mirrors[d].replacement);
1478		if (rdev == rrdev)
1479			rrdev = NULL;
 
 
 
 
 
 
 
 
 
 
1480		if (rdev && (test_bit(Faulty, &rdev->flags)))
1481			rdev = NULL;
1482		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1483			rrdev = NULL;
1484
1485		r10_bio->devs[i].bio = NULL;
1486		r10_bio->devs[i].repl_bio = NULL;
1487
1488		if (!rdev && !rrdev) {
1489			set_bit(R10BIO_Degraded, &r10_bio->state);
1490			continue;
1491		}
1492		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1493			sector_t first_bad;
1494			sector_t dev_sector = r10_bio->devs[i].addr;
1495			int bad_sectors;
1496			int is_bad;
1497
1498			is_bad = is_badblock(rdev, dev_sector, max_sectors,
 
1499					     &first_bad, &bad_sectors);
 
 
 
 
 
 
 
 
 
1500			if (is_bad && first_bad <= dev_sector) {
1501				/* Cannot write here at all */
1502				bad_sectors -= (dev_sector - first_bad);
1503				if (bad_sectors < max_sectors)
1504					/* Mustn't write more than bad_sectors
1505					 * to other devices yet
1506					 */
1507					max_sectors = bad_sectors;
1508				/* We don't set R10BIO_Degraded as that
1509				 * only applies if the disk is missing,
1510				 * so it might be re-added, and we want to
1511				 * know to recover this chunk.
1512				 * In this case the device is here, and the
1513				 * fact that this chunk is not in-sync is
1514				 * recorded in the bad block log.
1515				 */
1516				continue;
1517			}
1518			if (is_bad) {
1519				int good_sectors = first_bad - dev_sector;
1520				if (good_sectors < max_sectors)
1521					max_sectors = good_sectors;
1522			}
1523		}
1524		if (rdev) {
1525			r10_bio->devs[i].bio = bio;
1526			atomic_inc(&rdev->nr_pending);
1527		}
1528		if (rrdev) {
1529			r10_bio->devs[i].repl_bio = bio;
1530			atomic_inc(&rrdev->nr_pending);
1531		}
1532	}
1533	rcu_read_unlock();
1534
1535	if (max_sectors < r10_bio->sectors)
1536		r10_bio->sectors = max_sectors;
 
 
1537
1538	if (r10_bio->sectors < bio_sectors(bio)) {
1539		struct bio *split = bio_split(bio, r10_bio->sectors,
1540					      GFP_NOIO, &conf->bio_split);
1541		bio_chain(split, bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
1542		allow_barrier(conf);
1543		submit_bio_noacct(bio);
1544		wait_barrier(conf, false);
1545		bio = split;
1546		r10_bio->master_bio = bio;
1547	}
1548
1549	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
1550		r10_bio->start_time = bio_start_io_acct(bio);
1551	atomic_set(&r10_bio->remaining, 1);
1552	md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1553
1554	for (i = 0; i < conf->copies; i++) {
1555		if (r10_bio->devs[i].bio)
1556			raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1557		if (r10_bio->devs[i].repl_bio)
1558			raid10_write_one_disk(mddev, r10_bio, bio, true, i);
 
1559	}
1560	one_write_done(r10_bio);
1561}
1562
1563static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1564{
1565	struct r10conf *conf = mddev->private;
1566	struct r10bio *r10_bio;
1567
1568	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1569
1570	r10_bio->master_bio = bio;
1571	r10_bio->sectors = sectors;
1572
1573	r10_bio->mddev = mddev;
1574	r10_bio->sector = bio->bi_iter.bi_sector;
1575	r10_bio->state = 0;
1576	r10_bio->read_slot = -1;
1577	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1578			conf->geo.raid_disks);
1579
1580	if (bio_data_dir(bio) == READ)
1581		raid10_read_request(mddev, bio, r10_bio);
1582	else
1583		raid10_write_request(mddev, bio, r10_bio);
1584}
 
 
 
 
 
 
 
 
 
 
 
 
 
1585
1586static void raid_end_discard_bio(struct r10bio *r10bio)
1587{
1588	struct r10conf *conf = r10bio->mddev->private;
1589	struct r10bio *first_r10bio;
1590
1591	while (atomic_dec_and_test(&r10bio->remaining)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1592
1593		allow_barrier(conf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1594
1595		if (!test_bit(R10BIO_Discard, &r10bio->state)) {
1596			first_r10bio = (struct r10bio *)r10bio->master_bio;
1597			free_r10bio(r10bio);
1598			r10bio = first_r10bio;
1599		} else {
1600			md_write_end(r10bio->mddev);
1601			bio_endio(r10bio->master_bio);
1602			free_r10bio(r10bio);
1603			break;
1604		}
1605	}
1606}
1607
1608static void raid10_end_discard_request(struct bio *bio)
1609{
1610	struct r10bio *r10_bio = bio->bi_private;
1611	struct r10conf *conf = r10_bio->mddev->private;
1612	struct md_rdev *rdev = NULL;
1613	int dev;
1614	int slot, repl;
1615
1616	/*
1617	 * We don't care the return value of discard bio
1618	 */
1619	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1620		set_bit(R10BIO_Uptodate, &r10_bio->state);
1621
1622	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1623	if (repl)
1624		rdev = conf->mirrors[dev].replacement;
1625	if (!rdev) {
1626		/*
1627		 * raid10_remove_disk uses smp_mb to make sure rdev is set to
1628		 * replacement before setting replacement to NULL. It can read
1629		 * rdev first without barrier protect even replacment is NULL
1630		 */
1631		smp_rmb();
1632		rdev = conf->mirrors[dev].rdev;
1633	}
 
1634
1635	raid_end_discard_bio(r10_bio);
1636	rdev_dec_pending(rdev, conf->mddev);
 
 
 
 
1637}
1638
1639/*
1640 * There are some limitations to handle discard bio
1641 * 1st, the discard size is bigger than stripe_size*2.
1642 * 2st, if the discard bio spans reshape progress, we use the old way to
1643 * handle discard bio
1644 */
1645static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1646{
1647	struct r10conf *conf = mddev->private;
1648	struct geom *geo = &conf->geo;
1649	int far_copies = geo->far_copies;
1650	bool first_copy = true;
1651	struct r10bio *r10_bio, *first_r10bio;
1652	struct bio *split;
1653	int disk;
1654	sector_t chunk;
1655	unsigned int stripe_size;
1656	unsigned int stripe_data_disks;
1657	sector_t split_size;
1658	sector_t bio_start, bio_end;
1659	sector_t first_stripe_index, last_stripe_index;
1660	sector_t start_disk_offset;
1661	unsigned int start_disk_index;
1662	sector_t end_disk_offset;
1663	unsigned int end_disk_index;
1664	unsigned int remainder;
1665
1666	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1667		return -EAGAIN;
1668
1669	if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1670		bio_wouldblock_error(bio);
1671		return 0;
1672	}
1673	wait_barrier(conf, false);
1674
1675	/*
1676	 * Check reshape again to avoid reshape happens after checking
1677	 * MD_RECOVERY_RESHAPE and before wait_barrier
1678	 */
1679	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1680		goto out;
1681
1682	if (geo->near_copies)
1683		stripe_data_disks = geo->raid_disks / geo->near_copies +
1684					geo->raid_disks % geo->near_copies;
1685	else
1686		stripe_data_disks = geo->raid_disks;
1687
1688	stripe_size = stripe_data_disks << geo->chunk_shift;
1689
1690	bio_start = bio->bi_iter.bi_sector;
1691	bio_end = bio_end_sector(bio);
1692
1693	/*
1694	 * Maybe one discard bio is smaller than strip size or across one
1695	 * stripe and discard region is larger than one stripe size. For far
1696	 * offset layout, if the discard region is not aligned with stripe
1697	 * size, there is hole when we submit discard bio to member disk.
1698	 * For simplicity, we only handle discard bio which discard region
1699	 * is bigger than stripe_size * 2
1700	 */
1701	if (bio_sectors(bio) < stripe_size*2)
1702		goto out;
1703
1704	/*
1705	 * Keep bio aligned with strip size.
1706	 */
1707	div_u64_rem(bio_start, stripe_size, &remainder);
1708	if (remainder) {
1709		split_size = stripe_size - remainder;
1710		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1711		bio_chain(split, bio);
1712		allow_barrier(conf);
1713		/* Resend the fist split part */
1714		submit_bio_noacct(split);
1715		wait_barrier(conf, false);
1716	}
1717	div_u64_rem(bio_end, stripe_size, &remainder);
1718	if (remainder) {
1719		split_size = bio_sectors(bio) - remainder;
1720		split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1721		bio_chain(split, bio);
1722		allow_barrier(conf);
1723		/* Resend the second split part */
1724		submit_bio_noacct(bio);
1725		bio = split;
1726		wait_barrier(conf, false);
1727	}
1728
1729	bio_start = bio->bi_iter.bi_sector;
1730	bio_end = bio_end_sector(bio);
1731
1732	/*
1733	 * Raid10 uses chunk as the unit to store data. It's similar like raid0.
1734	 * One stripe contains the chunks from all member disk (one chunk from
1735	 * one disk at the same HBA address). For layout detail, see 'man md 4'
1736	 */
1737	chunk = bio_start >> geo->chunk_shift;
1738	chunk *= geo->near_copies;
1739	first_stripe_index = chunk;
1740	start_disk_index = sector_div(first_stripe_index, geo->raid_disks);
1741	if (geo->far_offset)
1742		first_stripe_index *= geo->far_copies;
1743	start_disk_offset = (bio_start & geo->chunk_mask) +
1744				(first_stripe_index << geo->chunk_shift);
1745
1746	chunk = bio_end >> geo->chunk_shift;
1747	chunk *= geo->near_copies;
1748	last_stripe_index = chunk;
1749	end_disk_index = sector_div(last_stripe_index, geo->raid_disks);
1750	if (geo->far_offset)
1751		last_stripe_index *= geo->far_copies;
1752	end_disk_offset = (bio_end & geo->chunk_mask) +
1753				(last_stripe_index << geo->chunk_shift);
1754
1755retry_discard:
1756	r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1757	r10_bio->mddev = mddev;
1758	r10_bio->state = 0;
1759	r10_bio->sectors = 0;
1760	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1761	wait_blocked_dev(mddev, r10_bio);
1762
1763	/*
1764	 * For far layout it needs more than one r10bio to cover all regions.
1765	 * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1766	 * to record the discard bio. Other r10bio->master_bio record the first
1767	 * r10bio. The first r10bio only release after all other r10bios finish.
1768	 * The discard bio returns only first r10bio finishes
1769	 */
1770	if (first_copy) {
1771		r10_bio->master_bio = bio;
1772		set_bit(R10BIO_Discard, &r10_bio->state);
1773		first_copy = false;
1774		first_r10bio = r10_bio;
1775	} else
1776		r10_bio->master_bio = (struct bio *)first_r10bio;
1777
1778	/*
1779	 * first select target devices under rcu_lock and
1780	 * inc refcount on their rdev.  Record them by setting
1781	 * bios[x] to bio
1782	 */
1783	rcu_read_lock();
1784	for (disk = 0; disk < geo->raid_disks; disk++) {
1785		struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
1786		struct md_rdev *rrdev = rcu_dereference(
1787			conf->mirrors[disk].replacement);
1788
1789		r10_bio->devs[disk].bio = NULL;
1790		r10_bio->devs[disk].repl_bio = NULL;
1791
1792		if (rdev && (test_bit(Faulty, &rdev->flags)))
1793			rdev = NULL;
1794		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1795			rrdev = NULL;
1796		if (!rdev && !rrdev)
1797			continue;
1798
1799		if (rdev) {
1800			r10_bio->devs[disk].bio = bio;
1801			atomic_inc(&rdev->nr_pending);
1802		}
1803		if (rrdev) {
1804			r10_bio->devs[disk].repl_bio = bio;
1805			atomic_inc(&rrdev->nr_pending);
1806		}
1807	}
1808	rcu_read_unlock();
1809
1810	atomic_set(&r10_bio->remaining, 1);
1811	for (disk = 0; disk < geo->raid_disks; disk++) {
1812		sector_t dev_start, dev_end;
1813		struct bio *mbio, *rbio = NULL;
1814
1815		/*
1816		 * Now start to calculate the start and end address for each disk.
1817		 * The space between dev_start and dev_end is the discard region.
1818		 *
1819		 * For dev_start, it needs to consider three conditions:
1820		 * 1st, the disk is before start_disk, you can imagine the disk in
1821		 * the next stripe. So the dev_start is the start address of next
1822		 * stripe.
1823		 * 2st, the disk is after start_disk, it means the disk is at the
1824		 * same stripe of first disk
1825		 * 3st, the first disk itself, we can use start_disk_offset directly
1826		 */
1827		if (disk < start_disk_index)
1828			dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1829		else if (disk > start_disk_index)
1830			dev_start = first_stripe_index * mddev->chunk_sectors;
1831		else
1832			dev_start = start_disk_offset;
1833
1834		if (disk < end_disk_index)
1835			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1836		else if (disk > end_disk_index)
1837			dev_end = last_stripe_index * mddev->chunk_sectors;
1838		else
1839			dev_end = end_disk_offset;
1840
1841		/*
1842		 * It only handles discard bio which size is >= stripe size, so
1843		 * dev_end > dev_start all the time.
1844		 * It doesn't need to use rcu lock to get rdev here. We already
1845		 * add rdev->nr_pending in the first loop.
1846		 */
1847		if (r10_bio->devs[disk].bio) {
1848			struct md_rdev *rdev = conf->mirrors[disk].rdev;
1849			mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1850					       &mddev->bio_set);
1851			mbio->bi_end_io = raid10_end_discard_request;
1852			mbio->bi_private = r10_bio;
1853			r10_bio->devs[disk].bio = mbio;
1854			r10_bio->devs[disk].devnum = disk;
1855			atomic_inc(&r10_bio->remaining);
1856			md_submit_discard_bio(mddev, rdev, mbio,
1857					dev_start + choose_data_offset(r10_bio, rdev),
1858					dev_end - dev_start);
1859			bio_endio(mbio);
1860		}
1861		if (r10_bio->devs[disk].repl_bio) {
1862			struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1863			rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1864					       &mddev->bio_set);
1865			rbio->bi_end_io = raid10_end_discard_request;
1866			rbio->bi_private = r10_bio;
1867			r10_bio->devs[disk].repl_bio = rbio;
1868			r10_bio->devs[disk].devnum = disk;
1869			atomic_inc(&r10_bio->remaining);
1870			md_submit_discard_bio(mddev, rrdev, rbio,
1871					dev_start + choose_data_offset(r10_bio, rrdev),
1872					dev_end - dev_start);
1873			bio_endio(rbio);
1874		}
1875	}
1876
1877	if (!geo->far_offset && --far_copies) {
1878		first_stripe_index += geo->stride >> geo->chunk_shift;
1879		start_disk_offset += geo->stride;
1880		last_stripe_index += geo->stride >> geo->chunk_shift;
1881		end_disk_offset += geo->stride;
1882		atomic_inc(&first_r10bio->remaining);
1883		raid_end_discard_bio(r10_bio);
1884		wait_barrier(conf, false);
1885		goto retry_discard;
1886	}
1887
1888	raid_end_discard_bio(r10_bio);
1889
1890	return 0;
1891out:
1892	allow_barrier(conf);
1893	return -EAGAIN;
1894}
1895
1896static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1897{
1898	struct r10conf *conf = mddev->private;
1899	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1900	int chunk_sects = chunk_mask + 1;
1901	int sectors = bio_sectors(bio);
1902
1903	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1904	    && md_flush_request(mddev, bio))
1905		return true;
1906
1907	if (!md_write_start(mddev, bio))
1908		return false;
1909
1910	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1911		if (!raid10_handle_discard(mddev, bio))
1912			return true;
1913
1914	/*
1915	 * If this request crosses a chunk boundary, we need to split
1916	 * it.
1917	 */
1918	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1919		     sectors > chunk_sects
1920		     && (conf->geo.near_copies < conf->geo.raid_disks
1921			 || conf->prev.near_copies <
1922			 conf->prev.raid_disks)))
1923		sectors = chunk_sects -
1924			(bio->bi_iter.bi_sector &
1925			 (chunk_sects - 1));
1926	__make_request(mddev, bio, sectors);
1927
1928	/* In case raid10d snuck in to freeze_array */
1929	wake_up_barrier(conf);
1930	return true;
1931}
1932
1933static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1934{
1935	struct r10conf *conf = mddev->private;
1936	int i;
1937
1938	if (conf->geo.near_copies < conf->geo.raid_disks)
1939		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1940	if (conf->geo.near_copies > 1)
1941		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1942	if (conf->geo.far_copies > 1) {
1943		if (conf->geo.far_offset)
1944			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1945		else
1946			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1947		if (conf->geo.far_set_size != conf->geo.raid_disks)
1948			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1949	}
1950	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1951					conf->geo.raid_disks - mddev->degraded);
1952	rcu_read_lock();
1953	for (i = 0; i < conf->geo.raid_disks; i++) {
1954		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1955		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1956	}
1957	rcu_read_unlock();
1958	seq_printf(seq, "]");
1959}
1960
1961/* check if there are enough drives for
1962 * every block to appear on atleast one.
1963 * Don't consider the device numbered 'ignore'
1964 * as we might be about to remove it.
1965 */
1966static int _enough(struct r10conf *conf, int previous, int ignore)
1967{
1968	int first = 0;
1969	int has_enough = 0;
1970	int disks, ncopies;
1971	if (previous) {
1972		disks = conf->prev.raid_disks;
1973		ncopies = conf->prev.near_copies;
1974	} else {
1975		disks = conf->geo.raid_disks;
1976		ncopies = conf->geo.near_copies;
1977	}
1978
1979	rcu_read_lock();
1980	do {
1981		int n = conf->copies;
1982		int cnt = 0;
1983		int this = first;
1984		while (n--) {
1985			struct md_rdev *rdev;
1986			if (this != ignore &&
1987			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1988			    test_bit(In_sync, &rdev->flags))
1989				cnt++;
1990			this = (this+1) % disks;
1991		}
1992		if (cnt == 0)
1993			goto out;
1994		first = (first + ncopies) % disks;
1995	} while (first != 0);
1996	has_enough = 1;
1997out:
1998	rcu_read_unlock();
1999	return has_enough;
2000}
2001
2002static int enough(struct r10conf *conf, int ignore)
2003{
2004	/* when calling 'enough', both 'prev' and 'geo' must
2005	 * be stable.
2006	 * This is ensured if ->reconfig_mutex or ->device_lock
2007	 * is held.
2008	 */
2009	return _enough(conf, 0, ignore) &&
2010		_enough(conf, 1, ignore);
2011}
2012
2013/**
2014 * raid10_error() - RAID10 error handler.
2015 * @mddev: affected md device.
2016 * @rdev: member device to fail.
2017 *
2018 * The routine acknowledges &rdev failure and determines new @mddev state.
2019 * If it failed, then:
2020 *	- &MD_BROKEN flag is set in &mddev->flags.
2021 * Otherwise, it must be degraded:
2022 *	- recovery is interrupted.
2023 *	- &mddev->degraded is bumped.
2024 *
2025 * @rdev is marked as &Faulty excluding case when array is failed and
2026 * &mddev->fail_last_dev is off.
2027 */
2028static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
2029{
 
2030	struct r10conf *conf = mddev->private;
2031	unsigned long flags;
2032
 
 
 
 
 
 
2033	spin_lock_irqsave(&conf->device_lock, flags);
2034
2035	if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
2036		set_bit(MD_BROKEN, &mddev->flags);
2037
2038		if (!mddev->fail_last_dev) {
2039			spin_unlock_irqrestore(&conf->device_lock, flags);
2040			return;
2041		}
2042	}
2043	if (test_and_clear_bit(In_sync, &rdev->flags))
2044		mddev->degraded++;
2045
 
 
2046	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2047	set_bit(Blocked, &rdev->flags);
2048	set_bit(Faulty, &rdev->flags);
2049	set_mask_bits(&mddev->sb_flags, 0,
2050		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2051	spin_unlock_irqrestore(&conf->device_lock, flags);
2052	pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n"
2053		"md/raid10:%s: Operation continuing on %d devices.\n",
2054		mdname(mddev), rdev->bdev,
2055		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
 
2056}
2057
2058static void print_conf(struct r10conf *conf)
2059{
2060	int i;
2061	struct md_rdev *rdev;
2062
2063	pr_debug("RAID10 conf printout:\n");
2064	if (!conf) {
2065		pr_debug("(!conf)\n");
2066		return;
2067	}
2068	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2069		 conf->geo.raid_disks);
2070
2071	/* This is only called with ->reconfix_mutex held, so
2072	 * rcu protection of rdev is not needed */
2073	for (i = 0; i < conf->geo.raid_disks; i++) {
2074		rdev = conf->mirrors[i].rdev;
2075		if (rdev)
2076			pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
2077				 i, !test_bit(In_sync, &rdev->flags),
2078				 !test_bit(Faulty, &rdev->flags),
2079				 rdev->bdev);
 
2080	}
2081}
2082
2083static void close_sync(struct r10conf *conf)
2084{
2085	wait_barrier(conf, false);
2086	allow_barrier(conf);
2087
2088	mempool_exit(&conf->r10buf_pool);
 
2089}
2090
2091static int raid10_spare_active(struct mddev *mddev)
2092{
2093	int i;
2094	struct r10conf *conf = mddev->private;
2095	struct raid10_info *tmp;
2096	int count = 0;
2097	unsigned long flags;
2098
2099	/*
2100	 * Find all non-in_sync disks within the RAID10 configuration
2101	 * and mark them in_sync
2102	 */
2103	for (i = 0; i < conf->geo.raid_disks; i++) {
2104		tmp = conf->mirrors + i;
2105		if (tmp->replacement
2106		    && tmp->replacement->recovery_offset == MaxSector
2107		    && !test_bit(Faulty, &tmp->replacement->flags)
2108		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
2109			/* Replacement has just become active */
2110			if (!tmp->rdev
2111			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2112				count++;
2113			if (tmp->rdev) {
2114				/* Replaced device not technically faulty,
2115				 * but we need to be sure it gets removed
2116				 * and never re-added.
2117				 */
2118				set_bit(Faulty, &tmp->rdev->flags);
2119				sysfs_notify_dirent_safe(
2120					tmp->rdev->sysfs_state);
2121			}
2122			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
2123		} else if (tmp->rdev
2124			   && tmp->rdev->recovery_offset == MaxSector
2125			   && !test_bit(Faulty, &tmp->rdev->flags)
2126			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2127			count++;
2128			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2129		}
2130	}
2131	spin_lock_irqsave(&conf->device_lock, flags);
2132	mddev->degraded -= count;
2133	spin_unlock_irqrestore(&conf->device_lock, flags);
2134
2135	print_conf(conf);
2136	return count;
2137}
2138
2139static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2140{
2141	struct r10conf *conf = mddev->private;
2142	int err = -EEXIST;
2143	int mirror;
2144	int first = 0;
2145	int last = conf->geo.raid_disks - 1;
2146
2147	if (mddev->recovery_cp < MaxSector)
2148		/* only hot-add to in-sync arrays, as recovery is
2149		 * very different from resync
2150		 */
2151		return -EBUSY;
2152	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2153		return -EINVAL;
2154
2155	if (md_integrity_add_rdev(rdev, mddev))
2156		return -ENXIO;
2157
2158	if (rdev->raid_disk >= 0)
2159		first = last = rdev->raid_disk;
2160
2161	if (rdev->saved_raid_disk >= first &&
2162	    rdev->saved_raid_disk < conf->geo.raid_disks &&
2163	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2164		mirror = rdev->saved_raid_disk;
2165	else
2166		mirror = first;
2167	for ( ; mirror <= last ; mirror++) {
2168		struct raid10_info *p = &conf->mirrors[mirror];
2169		if (p->recovery_disabled == mddev->recovery_disabled)
2170			continue;
2171		if (p->rdev) {
2172			if (!test_bit(WantReplacement, &p->rdev->flags) ||
2173			    p->replacement != NULL)
2174				continue;
2175			clear_bit(In_sync, &rdev->flags);
2176			set_bit(Replacement, &rdev->flags);
2177			rdev->raid_disk = mirror;
2178			err = 0;
2179			if (mddev->gendisk)
2180				disk_stack_limits(mddev->gendisk, rdev->bdev,
2181						  rdev->data_offset << 9);
2182			conf->fullsync = 1;
2183			rcu_assign_pointer(p->replacement, rdev);
2184			break;
2185		}
2186
2187		if (mddev->gendisk)
2188			disk_stack_limits(mddev->gendisk, rdev->bdev,
2189					  rdev->data_offset << 9);
2190
2191		p->head_position = 0;
2192		p->recovery_disabled = mddev->recovery_disabled - 1;
2193		rdev->raid_disk = mirror;
2194		err = 0;
2195		if (rdev->saved_raid_disk != mirror)
2196			conf->fullsync = 1;
2197		rcu_assign_pointer(p->rdev, rdev);
2198		break;
2199	}
 
 
2200
2201	print_conf(conf);
2202	return err;
2203}
2204
2205static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2206{
2207	struct r10conf *conf = mddev->private;
2208	int err = 0;
2209	int number = rdev->raid_disk;
2210	struct md_rdev **rdevp;
2211	struct raid10_info *p;
2212
2213	print_conf(conf);
2214	if (unlikely(number >= mddev->raid_disks))
2215		return 0;
2216	p = conf->mirrors + number;
2217	if (rdev == p->rdev)
2218		rdevp = &p->rdev;
2219	else if (rdev == p->replacement)
2220		rdevp = &p->replacement;
2221	else
2222		return 0;
2223
2224	if (test_bit(In_sync, &rdev->flags) ||
2225	    atomic_read(&rdev->nr_pending)) {
2226		err = -EBUSY;
2227		goto abort;
2228	}
2229	/* Only remove non-faulty devices if recovery
2230	 * is not possible.
2231	 */
2232	if (!test_bit(Faulty, &rdev->flags) &&
2233	    mddev->recovery_disabled != p->recovery_disabled &&
2234	    (!p->replacement || p->replacement == rdev) &&
2235	    number < conf->geo.raid_disks &&
2236	    enough(conf, -1)) {
2237		err = -EBUSY;
2238		goto abort;
2239	}
2240	*rdevp = NULL;
2241	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
2242		synchronize_rcu();
2243		if (atomic_read(&rdev->nr_pending)) {
2244			/* lost the race, try later */
2245			err = -EBUSY;
2246			*rdevp = rdev;
2247			goto abort;
2248		}
2249	}
2250	if (p->replacement) {
2251		/* We must have just cleared 'rdev' */
2252		p->rdev = p->replacement;
2253		clear_bit(Replacement, &p->replacement->flags);
2254		smp_mb(); /* Make sure other CPUs may see both as identical
2255			   * but will never see neither -- if they are careful.
2256			   */
2257		p->replacement = NULL;
2258	}
 
 
 
 
 
2259
2260	clear_bit(WantReplacement, &rdev->flags);
2261	err = md_integrity_register(mddev);
2262
2263abort:
2264
2265	print_conf(conf);
2266	return err;
2267}
2268
2269static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2270{
 
2271	struct r10conf *conf = r10_bio->mddev->private;
 
2272
2273	if (!bio->bi_status)
 
 
 
 
 
 
2274		set_bit(R10BIO_Uptodate, &r10_bio->state);
2275	else
2276		/* The write handler will notice the lack of
2277		 * R10BIO_Uptodate and record any errors etc
2278		 */
2279		atomic_add(r10_bio->sectors,
2280			   &conf->mirrors[d].rdev->corrected_errors);
2281
2282	/* for reconstruct, we always reschedule after a read.
2283	 * for resync, only after all reads
2284	 */
2285	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2286	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2287	    atomic_dec_and_test(&r10_bio->remaining)) {
2288		/* we have read all the blocks,
2289		 * do the comparison in process context in raid10d
2290		 */
2291		reschedule_retry(r10_bio);
2292	}
2293}
2294
2295static void end_sync_read(struct bio *bio)
2296{
2297	struct r10bio *r10_bio = get_resync_r10bio(bio);
2298	struct r10conf *conf = r10_bio->mddev->private;
2299	int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2300
2301	__end_sync_read(r10_bio, bio, d);
2302}
2303
2304static void end_reshape_read(struct bio *bio)
2305{
2306	/* reshape read bio isn't allocated from r10buf_pool */
2307	struct r10bio *r10_bio = bio->bi_private;
2308
2309	__end_sync_read(r10_bio, bio, r10_bio->read_slot);
2310}
2311
2312static void end_sync_request(struct r10bio *r10_bio)
2313{
2314	struct mddev *mddev = r10_bio->mddev;
2315
2316	while (atomic_dec_and_test(&r10_bio->remaining)) {
2317		if (r10_bio->master_bio == NULL) {
2318			/* the primary of several recovery bios */
2319			sector_t s = r10_bio->sectors;
2320			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2321			    test_bit(R10BIO_WriteError, &r10_bio->state))
2322				reschedule_retry(r10_bio);
2323			else
2324				put_buf(r10_bio);
2325			md_done_sync(mddev, s, 1);
2326			break;
2327		} else {
2328			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2329			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2330			    test_bit(R10BIO_WriteError, &r10_bio->state))
2331				reschedule_retry(r10_bio);
2332			else
2333				put_buf(r10_bio);
2334			r10_bio = r10_bio2;
2335		}
2336	}
2337}
2338
2339static void end_sync_write(struct bio *bio)
2340{
2341	struct r10bio *r10_bio = get_resync_r10bio(bio);
2342	struct mddev *mddev = r10_bio->mddev;
2343	struct r10conf *conf = mddev->private;
2344	int d;
2345	sector_t first_bad;
2346	int bad_sectors;
2347	int slot;
2348	int repl;
2349	struct md_rdev *rdev = NULL;
2350
2351	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2352	if (repl)
2353		rdev = conf->mirrors[d].replacement;
2354	else
2355		rdev = conf->mirrors[d].rdev;
2356
2357	if (bio->bi_status) {
2358		if (repl)
2359			md_error(mddev, rdev);
2360		else {
2361			set_bit(WriteErrorSeen, &rdev->flags);
2362			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2363				set_bit(MD_RECOVERY_NEEDED,
2364					&rdev->mddev->recovery);
2365			set_bit(R10BIO_WriteError, &r10_bio->state);
2366		}
2367	} else if (is_badblock(rdev,
2368			     r10_bio->devs[slot].addr,
2369			     r10_bio->sectors,
2370			     &first_bad, &bad_sectors))
2371		set_bit(R10BIO_MadeGood, &r10_bio->state);
2372
2373	rdev_dec_pending(rdev, mddev);
2374
2375	end_sync_request(r10_bio);
2376}
2377
2378/*
2379 * Note: sync and recover and handled very differently for raid10
2380 * This code is for resync.
2381 * For resync, we read through virtual addresses and read all blocks.
2382 * If there is any error, we schedule a write.  The lowest numbered
2383 * drive is authoritative.
2384 * However requests come for physical address, so we need to map.
2385 * For every physical address there are raid_disks/copies virtual addresses,
2386 * which is always are least one, but is not necessarly an integer.
2387 * This means that a physical address can span multiple chunks, so we may
2388 * have to submit multiple io requests for a single sync request.
2389 */
2390/*
2391 * We check if all blocks are in-sync and only write to blocks that
2392 * aren't in sync
2393 */
2394static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2395{
2396	struct r10conf *conf = mddev->private;
2397	int i, first;
2398	struct bio *tbio, *fbio;
2399	int vcnt;
2400	struct page **tpages, **fpages;
2401
2402	atomic_set(&r10_bio->remaining, 1);
2403
2404	/* find the first device with a block */
2405	for (i=0; i<conf->copies; i++)
2406		if (!r10_bio->devs[i].bio->bi_status)
2407			break;
2408
2409	if (i == conf->copies)
2410		goto done;
2411
2412	first = i;
2413	fbio = r10_bio->devs[i].bio;
2414	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2415	fbio->bi_iter.bi_idx = 0;
2416	fpages = get_resync_pages(fbio)->pages;
2417
2418	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2419	/* now find blocks with errors */
2420	for (i=0 ; i < conf->copies ; i++) {
2421		int  j, d;
2422		struct md_rdev *rdev;
2423		struct resync_pages *rp;
2424
2425		tbio = r10_bio->devs[i].bio;
2426
2427		if (tbio->bi_end_io != end_sync_read)
2428			continue;
2429		if (i == first)
2430			continue;
2431
2432		tpages = get_resync_pages(tbio)->pages;
2433		d = r10_bio->devs[i].devnum;
2434		rdev = conf->mirrors[d].rdev;
2435		if (!r10_bio->devs[i].bio->bi_status) {
2436			/* We know that the bi_io_vec layout is the same for
2437			 * both 'first' and 'i', so we just compare them.
2438			 * All vec entries are PAGE_SIZE;
2439			 */
2440			int sectors = r10_bio->sectors;
2441			for (j = 0; j < vcnt; j++) {
2442				int len = PAGE_SIZE;
2443				if (sectors < (len / 512))
2444					len = sectors * 512;
2445				if (memcmp(page_address(fpages[j]),
2446					   page_address(tpages[j]),
2447					   len))
2448					break;
2449				sectors -= len/512;
2450			}
2451			if (j == vcnt)
2452				continue;
2453			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2454			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2455				/* Don't fix anything. */
2456				continue;
2457		} else if (test_bit(FailFast, &rdev->flags)) {
2458			/* Just give up on this device */
2459			md_error(rdev->mddev, rdev);
2460			continue;
2461		}
2462		/* Ok, we need to write this bio, either to correct an
2463		 * inconsistency or to correct an unreadable block.
2464		 * First we need to fixup bv_offset, bv_len and
2465		 * bi_vecs, as the read request might have corrupted these
2466		 */
2467		rp = get_resync_pages(tbio);
2468		bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2469
2470		md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2471
2472		rp->raid_bio = r10_bio;
2473		tbio->bi_private = rp;
 
 
2474		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2475		tbio->bi_end_io = end_sync_write;
2476
2477		bio_copy_data(tbio, fbio);
2478
 
2479		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2480		atomic_inc(&r10_bio->remaining);
2481		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2482
2483		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2484			tbio->bi_opf |= MD_FAILFAST;
2485		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2486		submit_bio_noacct(tbio);
 
2487	}
2488
2489	/* Now write out to any replacement devices
2490	 * that are active
2491	 */
2492	for (i = 0; i < conf->copies; i++) {
2493		int d;
2494
2495		tbio = r10_bio->devs[i].repl_bio;
2496		if (!tbio || !tbio->bi_end_io)
2497			continue;
2498		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2499		    && r10_bio->devs[i].bio != fbio)
2500			bio_copy_data(tbio, fbio);
2501		d = r10_bio->devs[i].devnum;
2502		atomic_inc(&r10_bio->remaining);
2503		md_sync_acct(conf->mirrors[d].replacement->bdev,
2504			     bio_sectors(tbio));
2505		submit_bio_noacct(tbio);
2506	}
2507
2508done:
2509	if (atomic_dec_and_test(&r10_bio->remaining)) {
2510		md_done_sync(mddev, r10_bio->sectors, 1);
2511		put_buf(r10_bio);
2512	}
2513}
2514
2515/*
2516 * Now for the recovery code.
2517 * Recovery happens across physical sectors.
2518 * We recover all non-is_sync drives by finding the virtual address of
2519 * each, and then choose a working drive that also has that virt address.
2520 * There is a separate r10_bio for each non-in_sync drive.
2521 * Only the first two slots are in use. The first for reading,
2522 * The second for writing.
2523 *
2524 */
2525static void fix_recovery_read_error(struct r10bio *r10_bio)
2526{
2527	/* We got a read error during recovery.
2528	 * We repeat the read in smaller page-sized sections.
2529	 * If a read succeeds, write it to the new device or record
2530	 * a bad block if we cannot.
2531	 * If a read fails, record a bad block on both old and
2532	 * new devices.
2533	 */
2534	struct mddev *mddev = r10_bio->mddev;
2535	struct r10conf *conf = mddev->private;
2536	struct bio *bio = r10_bio->devs[0].bio;
2537	sector_t sect = 0;
2538	int sectors = r10_bio->sectors;
2539	int idx = 0;
2540	int dr = r10_bio->devs[0].devnum;
2541	int dw = r10_bio->devs[1].devnum;
2542	struct page **pages = get_resync_pages(bio)->pages;
2543
2544	while (sectors) {
2545		int s = sectors;
2546		struct md_rdev *rdev;
2547		sector_t addr;
2548		int ok;
2549
2550		if (s > (PAGE_SIZE>>9))
2551			s = PAGE_SIZE >> 9;
2552
2553		rdev = conf->mirrors[dr].rdev;
2554		addr = r10_bio->devs[0].addr + sect,
2555		ok = sync_page_io(rdev,
2556				  addr,
2557				  s << 9,
2558				  pages[idx],
2559				  REQ_OP_READ, false);
2560		if (ok) {
2561			rdev = conf->mirrors[dw].rdev;
2562			addr = r10_bio->devs[1].addr + sect;
2563			ok = sync_page_io(rdev,
2564					  addr,
2565					  s << 9,
2566					  pages[idx],
2567					  REQ_OP_WRITE, false);
2568			if (!ok) {
2569				set_bit(WriteErrorSeen, &rdev->flags);
2570				if (!test_and_set_bit(WantReplacement,
2571						      &rdev->flags))
2572					set_bit(MD_RECOVERY_NEEDED,
2573						&rdev->mddev->recovery);
2574			}
2575		}
2576		if (!ok) {
2577			/* We don't worry if we cannot set a bad block -
2578			 * it really is bad so there is no loss in not
2579			 * recording it yet
2580			 */
2581			rdev_set_badblocks(rdev, addr, s, 0);
2582
2583			if (rdev != conf->mirrors[dw].rdev) {
2584				/* need bad block on destination too */
2585				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2586				addr = r10_bio->devs[1].addr + sect;
2587				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2588				if (!ok) {
2589					/* just abort the recovery */
2590					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2591						  mdname(mddev));
 
 
2592
2593					conf->mirrors[dw].recovery_disabled
2594						= mddev->recovery_disabled;
2595					set_bit(MD_RECOVERY_INTR,
2596						&mddev->recovery);
2597					break;
2598				}
2599			}
2600		}
2601
2602		sectors -= s;
2603		sect += s;
2604		idx++;
2605	}
2606}
2607
2608static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2609{
2610	struct r10conf *conf = mddev->private;
2611	int d;
2612	struct bio *wbio, *wbio2;
2613
2614	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2615		fix_recovery_read_error(r10_bio);
2616		end_sync_request(r10_bio);
2617		return;
2618	}
2619
2620	/*
2621	 * share the pages with the first bio
2622	 * and submit the write request
2623	 */
2624	d = r10_bio->devs[1].devnum;
2625	wbio = r10_bio->devs[1].bio;
2626	wbio2 = r10_bio->devs[1].repl_bio;
2627	/* Need to test wbio2->bi_end_io before we call
2628	 * submit_bio_noacct as if the former is NULL,
2629	 * the latter is free to free wbio2.
2630	 */
2631	if (wbio2 && !wbio2->bi_end_io)
2632		wbio2 = NULL;
2633	if (wbio->bi_end_io) {
2634		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2635		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2636		submit_bio_noacct(wbio);
2637	}
2638	if (wbio2) {
2639		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2640		md_sync_acct(conf->mirrors[d].replacement->bdev,
2641			     bio_sectors(wbio2));
2642		submit_bio_noacct(wbio2);
2643	}
2644}
2645
2646/*
2647 * Used by fix_read_error() to decay the per rdev read_errors.
2648 * We halve the read error count for every hour that has elapsed
2649 * since the last recorded read error.
2650 *
2651 */
2652static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2653{
2654	long cur_time_mon;
2655	unsigned long hours_since_last;
2656	unsigned int read_errors = atomic_read(&rdev->read_errors);
2657
2658	cur_time_mon = ktime_get_seconds();
2659
2660	if (rdev->last_read_error == 0) {
 
2661		/* first time we've seen a read error */
2662		rdev->last_read_error = cur_time_mon;
2663		return;
2664	}
2665
2666	hours_since_last = (long)(cur_time_mon -
2667			    rdev->last_read_error) / 3600;
2668
2669	rdev->last_read_error = cur_time_mon;
2670
2671	/*
2672	 * if hours_since_last is > the number of bits in read_errors
2673	 * just set read errors to 0. We do this to avoid
2674	 * overflowing the shift of read_errors by hours_since_last.
2675	 */
2676	if (hours_since_last >= 8 * sizeof(read_errors))
2677		atomic_set(&rdev->read_errors, 0);
2678	else
2679		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2680}
2681
2682static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2683			    int sectors, struct page *page, enum req_op op)
2684{
2685	sector_t first_bad;
2686	int bad_sectors;
2687
2688	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2689	    && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2690		return -1;
2691	if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2692		/* success */
2693		return 1;
2694	if (op == REQ_OP_WRITE) {
2695		set_bit(WriteErrorSeen, &rdev->flags);
2696		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2697			set_bit(MD_RECOVERY_NEEDED,
2698				&rdev->mddev->recovery);
2699	}
2700	/* need to record an error - either for the block or the device */
2701	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2702		md_error(rdev->mddev, rdev);
2703	return 0;
2704}
2705
2706/*
2707 * This is a kernel thread which:
2708 *
2709 *	1.	Retries failed read operations on working mirrors.
2710 *	2.	Updates the raid superblock when problems encounter.
2711 *	3.	Performs writes following reads for array synchronising.
2712 */
2713
2714static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2715{
2716	int sect = 0; /* Offset from r10_bio->sector */
2717	int sectors = r10_bio->sectors;
2718	struct md_rdev *rdev;
2719	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2720	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2721
2722	/* still own a reference to this rdev, so it cannot
2723	 * have been cleared recently.
2724	 */
2725	rdev = conf->mirrors[d].rdev;
2726
2727	if (test_bit(Faulty, &rdev->flags))
2728		/* drive has already been failed, just ignore any
2729		   more fix_read_error() attempts */
2730		return;
2731
2732	check_decay_read_errors(mddev, rdev);
2733	atomic_inc(&rdev->read_errors);
2734	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2735		pr_notice("md/raid10:%s: %pg: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2736			  mdname(mddev), rdev->bdev,
2737			  atomic_read(&rdev->read_errors), max_read_errors);
2738		pr_notice("md/raid10:%s: %pg: Failing raid device\n",
2739			  mdname(mddev), rdev->bdev);
2740		md_error(mddev, rdev);
 
 
 
 
 
 
2741		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2742		return;
2743	}
2744
2745	while(sectors) {
2746		int s = sectors;
2747		int sl = r10_bio->read_slot;
2748		int success = 0;
2749		int start;
2750
2751		if (s > (PAGE_SIZE>>9))
2752			s = PAGE_SIZE >> 9;
2753
2754		rcu_read_lock();
2755		do {
2756			sector_t first_bad;
2757			int bad_sectors;
2758
2759			d = r10_bio->devs[sl].devnum;
2760			rdev = rcu_dereference(conf->mirrors[d].rdev);
2761			if (rdev &&
2762			    test_bit(In_sync, &rdev->flags) &&
2763			    !test_bit(Faulty, &rdev->flags) &&
2764			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2765					&first_bad, &bad_sectors) == 0) {
2766				atomic_inc(&rdev->nr_pending);
2767				rcu_read_unlock();
2768				success = sync_page_io(rdev,
2769						       r10_bio->devs[sl].addr +
2770						       sect,
2771						       s<<9,
2772						       conf->tmppage,
2773						       REQ_OP_READ, false);
2774				rdev_dec_pending(rdev, mddev);
2775				rcu_read_lock();
2776				if (success)
2777					break;
2778			}
2779			sl++;
2780			if (sl == conf->copies)
2781				sl = 0;
2782		} while (!success && sl != r10_bio->read_slot);
2783		rcu_read_unlock();
2784
2785		if (!success) {
2786			/* Cannot read from anywhere, just mark the block
2787			 * as bad on the first device to discourage future
2788			 * reads.
2789			 */
2790			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2791			rdev = conf->mirrors[dn].rdev;
2792
2793			if (!rdev_set_badblocks(
2794				    rdev,
2795				    r10_bio->devs[r10_bio->read_slot].addr
2796				    + sect,
2797				    s, 0)) {
2798				md_error(mddev, rdev);
2799				r10_bio->devs[r10_bio->read_slot].bio
2800					= IO_BLOCKED;
2801			}
2802			break;
2803		}
2804
2805		start = sl;
2806		/* write it back and re-read */
2807		rcu_read_lock();
2808		while (sl != r10_bio->read_slot) {
 
 
2809			if (sl==0)
2810				sl = conf->copies;
2811			sl--;
2812			d = r10_bio->devs[sl].devnum;
2813			rdev = rcu_dereference(conf->mirrors[d].rdev);
2814			if (!rdev ||
2815			    test_bit(Faulty, &rdev->flags) ||
2816			    !test_bit(In_sync, &rdev->flags))
2817				continue;
2818
2819			atomic_inc(&rdev->nr_pending);
2820			rcu_read_unlock();
2821			if (r10_sync_page_io(rdev,
2822					     r10_bio->devs[sl].addr +
2823					     sect,
2824					     s, conf->tmppage, REQ_OP_WRITE)
2825			    == 0) {
2826				/* Well, this device is dead */
2827				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %pg)\n",
2828					  mdname(mddev), s,
2829					  (unsigned long long)(
2830						  sect +
2831						  choose_data_offset(r10_bio,
2832								     rdev)),
2833					  rdev->bdev);
2834				pr_notice("md/raid10:%s: %pg: failing drive\n",
2835					  mdname(mddev),
2836					  rdev->bdev);
 
 
 
 
2837			}
2838			rdev_dec_pending(rdev, mddev);
2839			rcu_read_lock();
2840		}
2841		sl = start;
2842		while (sl != r10_bio->read_slot) {
 
 
2843			if (sl==0)
2844				sl = conf->copies;
2845			sl--;
2846			d = r10_bio->devs[sl].devnum;
2847			rdev = rcu_dereference(conf->mirrors[d].rdev);
2848			if (!rdev ||
2849			    test_bit(Faulty, &rdev->flags) ||
2850			    !test_bit(In_sync, &rdev->flags))
2851				continue;
2852
2853			atomic_inc(&rdev->nr_pending);
2854			rcu_read_unlock();
2855			switch (r10_sync_page_io(rdev,
2856					     r10_bio->devs[sl].addr +
2857					     sect,
2858					     s, conf->tmppage, REQ_OP_READ)) {
 
2859			case 0:
2860				/* Well, this device is dead */
2861				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %pg)\n",
 
 
 
2862				       mdname(mddev), s,
2863				       (unsigned long long)(
2864					       sect +
2865					       choose_data_offset(r10_bio, rdev)),
2866				       rdev->bdev);
2867				pr_notice("md/raid10:%s: %pg: failing drive\n",
 
2868				       mdname(mddev),
2869				       rdev->bdev);
2870				break;
2871			case 1:
2872				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %pg)\n",
 
 
2873				       mdname(mddev), s,
2874				       (unsigned long long)(
2875					       sect +
2876					       choose_data_offset(r10_bio, rdev)),
2877				       rdev->bdev);
2878				atomic_add(s, &rdev->corrected_errors);
2879			}
2880
2881			rdev_dec_pending(rdev, mddev);
2882			rcu_read_lock();
2883		}
2884		rcu_read_unlock();
2885
2886		sectors -= s;
2887		sect += s;
2888	}
2889}
2890
2891static int narrow_write_error(struct r10bio *r10_bio, int i)
2892{
2893	struct bio *bio = r10_bio->master_bio;
2894	struct mddev *mddev = r10_bio->mddev;
2895	struct r10conf *conf = mddev->private;
2896	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2897	/* bio has the data to be written to slot 'i' where
2898	 * we just recently had a write error.
2899	 * We repeatedly clone the bio and trim down to one block,
2900	 * then try the write.  Where the write fails we record
2901	 * a bad block.
2902	 * It is conceivable that the bio doesn't exactly align with
2903	 * blocks.  We must handle this.
2904	 *
2905	 * We currently own a reference to the rdev.
2906	 */
2907
2908	int block_sectors;
2909	sector_t sector;
2910	int sectors;
2911	int sect_to_write = r10_bio->sectors;
2912	int ok = 1;
2913
2914	if (rdev->badblocks.shift < 0)
2915		return 0;
2916
2917	block_sectors = roundup(1 << rdev->badblocks.shift,
2918				bdev_logical_block_size(rdev->bdev) >> 9);
2919	sector = r10_bio->sector;
2920	sectors = ((r10_bio->sector + block_sectors)
2921		   & ~(sector_t)(block_sectors - 1))
2922		- sector;
2923
2924	while (sect_to_write) {
2925		struct bio *wbio;
2926		sector_t wsector;
2927		if (sectors > sect_to_write)
2928			sectors = sect_to_write;
2929		/* Write at 'sector' for 'sectors' */
2930		wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2931				       &mddev->bio_set);
2932		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2933		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2934		wbio->bi_iter.bi_sector = wsector +
2935				   choose_data_offset(r10_bio, rdev);
2936		wbio->bi_opf = REQ_OP_WRITE;
2937
2938		if (submit_bio_wait(wbio) < 0)
2939			/* Failure! */
2940			ok = rdev_set_badblocks(rdev, wsector,
2941						sectors, 0)
2942				&& ok;
2943
2944		bio_put(wbio);
2945		sect_to_write -= sectors;
2946		sector += sectors;
2947		sectors = block_sectors;
2948	}
2949	return ok;
2950}
2951
2952static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2953{
2954	int slot = r10_bio->read_slot;
2955	struct bio *bio;
2956	struct r10conf *conf = mddev->private;
2957	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
 
 
 
2958
2959	/* we got a read error. Maybe the drive is bad.  Maybe just
2960	 * the block and we can fix it.
2961	 * We freeze all other IO, and try reading the block from
2962	 * other devices.  When we find one, we re-write
2963	 * and check it that fixes the read error.
2964	 * This is all done synchronously while the array is
2965	 * frozen.
2966	 */
2967	bio = r10_bio->devs[slot].bio;
 
2968	bio_put(bio);
2969	r10_bio->devs[slot].bio = NULL;
2970
2971	if (mddev->ro)
2972		r10_bio->devs[slot].bio = IO_BLOCKED;
2973	else if (!test_bit(FailFast, &rdev->flags)) {
2974		freeze_array(conf, 1);
2975		fix_read_error(conf, mddev, r10_bio);
2976		unfreeze_array(conf);
2977	} else
2978		md_error(mddev, rdev);
2979
2980	rdev_dec_pending(rdev, mddev);
2981	allow_barrier(conf);
2982	r10_bio->state = 0;
2983	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2984}
2985
2986static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2987{
2988	/* Some sort of write request has finished and it
2989	 * succeeded in writing where we thought there was a
2990	 * bad block.  So forget the bad block.
2991	 * Or possibly if failed and we need to record
2992	 * a bad block.
2993	 */
2994	int m;
2995	struct md_rdev *rdev;
2996
2997	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2998	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2999		for (m = 0; m < conf->copies; m++) {
3000			int dev = r10_bio->devs[m].devnum;
3001			rdev = conf->mirrors[dev].rdev;
3002			if (r10_bio->devs[m].bio == NULL ||
3003				r10_bio->devs[m].bio->bi_end_io == NULL)
3004				continue;
3005			if (!r10_bio->devs[m].bio->bi_status) {
3006				rdev_clear_badblocks(
3007					rdev,
3008					r10_bio->devs[m].addr,
3009					r10_bio->sectors, 0);
3010			} else {
3011				if (!rdev_set_badblocks(
3012					    rdev,
3013					    r10_bio->devs[m].addr,
3014					    r10_bio->sectors, 0))
3015					md_error(conf->mddev, rdev);
3016			}
3017			rdev = conf->mirrors[dev].replacement;
3018			if (r10_bio->devs[m].repl_bio == NULL ||
3019				r10_bio->devs[m].repl_bio->bi_end_io == NULL)
3020				continue;
3021
3022			if (!r10_bio->devs[m].repl_bio->bi_status) {
3023				rdev_clear_badblocks(
3024					rdev,
3025					r10_bio->devs[m].addr,
3026					r10_bio->sectors, 0);
3027			} else {
3028				if (!rdev_set_badblocks(
3029					    rdev,
3030					    r10_bio->devs[m].addr,
3031					    r10_bio->sectors, 0))
3032					md_error(conf->mddev, rdev);
3033			}
3034		}
3035		put_buf(r10_bio);
3036	} else {
3037		bool fail = false;
3038		for (m = 0; m < conf->copies; m++) {
3039			int dev = r10_bio->devs[m].devnum;
3040			struct bio *bio = r10_bio->devs[m].bio;
3041			rdev = conf->mirrors[dev].rdev;
3042			if (bio == IO_MADE_GOOD) {
3043				rdev_clear_badblocks(
3044					rdev,
3045					r10_bio->devs[m].addr,
3046					r10_bio->sectors, 0);
3047				rdev_dec_pending(rdev, conf->mddev);
3048			} else if (bio != NULL && bio->bi_status) {
3049				fail = true;
3050				if (!narrow_write_error(r10_bio, m)) {
3051					md_error(conf->mddev, rdev);
3052					set_bit(R10BIO_Degraded,
3053						&r10_bio->state);
3054				}
3055				rdev_dec_pending(rdev, conf->mddev);
3056			}
3057			bio = r10_bio->devs[m].repl_bio;
3058			rdev = conf->mirrors[dev].replacement;
3059			if (rdev && bio == IO_MADE_GOOD) {
3060				rdev_clear_badblocks(
3061					rdev,
3062					r10_bio->devs[m].addr,
3063					r10_bio->sectors, 0);
3064				rdev_dec_pending(rdev, conf->mddev);
3065			}
3066		}
3067		if (fail) {
3068			spin_lock_irq(&conf->device_lock);
3069			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
3070			conf->nr_queued++;
3071			spin_unlock_irq(&conf->device_lock);
3072			/*
3073			 * In case freeze_array() is waiting for condition
3074			 * nr_pending == nr_queued + extra to be true.
3075			 */
3076			wake_up(&conf->wait_barrier);
3077			md_wakeup_thread(conf->mddev->thread);
3078		} else {
3079			if (test_bit(R10BIO_WriteError,
3080				     &r10_bio->state))
3081				close_write(r10_bio);
3082			raid_end_bio_io(r10_bio);
3083		}
3084	}
3085}
3086
3087static void raid10d(struct md_thread *thread)
3088{
3089	struct mddev *mddev = thread->mddev;
3090	struct r10bio *r10_bio;
3091	unsigned long flags;
3092	struct r10conf *conf = mddev->private;
3093	struct list_head *head = &conf->retry_list;
3094	struct blk_plug plug;
3095
3096	md_check_recovery(mddev);
3097
3098	if (!list_empty_careful(&conf->bio_end_io_list) &&
3099	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3100		LIST_HEAD(tmp);
3101		spin_lock_irqsave(&conf->device_lock, flags);
3102		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3103			while (!list_empty(&conf->bio_end_io_list)) {
3104				list_move(conf->bio_end_io_list.prev, &tmp);
3105				conf->nr_queued--;
3106			}
3107		}
3108		spin_unlock_irqrestore(&conf->device_lock, flags);
3109		while (!list_empty(&tmp)) {
3110			r10_bio = list_first_entry(&tmp, struct r10bio,
3111						   retry_list);
3112			list_del(&r10_bio->retry_list);
3113			if (mddev->degraded)
3114				set_bit(R10BIO_Degraded, &r10_bio->state);
3115
3116			if (test_bit(R10BIO_WriteError,
3117				     &r10_bio->state))
3118				close_write(r10_bio);
3119			raid_end_bio_io(r10_bio);
3120		}
3121	}
3122
3123	blk_start_plug(&plug);
3124	for (;;) {
3125
3126		flush_pending_writes(conf);
3127
3128		spin_lock_irqsave(&conf->device_lock, flags);
3129		if (list_empty(head)) {
3130			spin_unlock_irqrestore(&conf->device_lock, flags);
3131			break;
3132		}
3133		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3134		list_del(head->prev);
3135		conf->nr_queued--;
3136		spin_unlock_irqrestore(&conf->device_lock, flags);
3137
3138		mddev = r10_bio->mddev;
3139		conf = mddev->private;
3140		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3141		    test_bit(R10BIO_WriteError, &r10_bio->state))
3142			handle_write_completed(conf, r10_bio);
3143		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3144			reshape_request_write(mddev, r10_bio);
3145		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3146			sync_request_write(mddev, r10_bio);
3147		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3148			recovery_request_write(mddev, r10_bio);
3149		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3150			handle_read_error(mddev, r10_bio);
3151		else
3152			WARN_ON_ONCE(1);
 
 
 
 
 
3153
3154		cond_resched();
3155		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3156			md_check_recovery(mddev);
3157	}
3158	blk_finish_plug(&plug);
3159}
3160
3161static int init_resync(struct r10conf *conf)
3162{
3163	int ret, buffs, i;
 
3164
3165	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
3166	BUG_ON(mempool_initialized(&conf->r10buf_pool));
3167	conf->have_replacement = 0;
3168	for (i = 0; i < conf->geo.raid_disks; i++)
3169		if (conf->mirrors[i].replacement)
3170			conf->have_replacement = 1;
3171	ret = mempool_init(&conf->r10buf_pool, buffs,
3172			   r10buf_pool_alloc, r10buf_pool_free, conf);
3173	if (ret)
3174		return ret;
3175	conf->next_resync = 0;
3176	return 0;
3177}
3178
3179static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3180{
3181	struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3182	struct rsync_pages *rp;
3183	struct bio *bio;
3184	int nalloc;
3185	int i;
3186
3187	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3188	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3189		nalloc = conf->copies; /* resync */
3190	else
3191		nalloc = 2; /* recovery */
3192
3193	for (i = 0; i < nalloc; i++) {
3194		bio = r10bio->devs[i].bio;
3195		rp = bio->bi_private;
3196		bio_reset(bio, NULL, 0);
3197		bio->bi_private = rp;
3198		bio = r10bio->devs[i].repl_bio;
3199		if (bio) {
3200			rp = bio->bi_private;
3201			bio_reset(bio, NULL, 0);
3202			bio->bi_private = rp;
3203		}
3204	}
3205	return r10bio;
3206}
3207
3208/*
3209 * Set cluster_sync_high since we need other nodes to add the
3210 * range [cluster_sync_low, cluster_sync_high] to suspend list.
3211 */
3212static void raid10_set_cluster_sync_high(struct r10conf *conf)
3213{
3214	sector_t window_size;
3215	int extra_chunk, chunks;
3216
3217	/*
3218	 * First, here we define "stripe" as a unit which across
3219	 * all member devices one time, so we get chunks by use
3220	 * raid_disks / near_copies. Otherwise, if near_copies is
3221	 * close to raid_disks, then resync window could increases
3222	 * linearly with the increase of raid_disks, which means
3223	 * we will suspend a really large IO window while it is not
3224	 * necessary. If raid_disks is not divisible by near_copies,
3225	 * an extra chunk is needed to ensure the whole "stripe" is
3226	 * covered.
3227	 */
3228
3229	chunks = conf->geo.raid_disks / conf->geo.near_copies;
3230	if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3231		extra_chunk = 0;
3232	else
3233		extra_chunk = 1;
3234	window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3235
3236	/*
3237	 * At least use a 32M window to align with raid1's resync window
3238	 */
3239	window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
3240			CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
3241
3242	conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3243}
3244
3245/*
3246 * perform a "sync" on one "block"
3247 *
3248 * We need to make sure that no normal I/O request - particularly write
3249 * requests - conflict with active sync requests.
3250 *
3251 * This is achieved by tracking pending requests and a 'barrier' concept
3252 * that can be installed to exclude normal IO requests.
3253 *
3254 * Resync and recovery are handled very differently.
3255 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3256 *
3257 * For resync, we iterate over virtual addresses, read all copies,
3258 * and update if there are differences.  If only one copy is live,
3259 * skip it.
3260 * For recovery, we iterate over physical addresses, read a good
3261 * value for each non-in_sync drive, and over-write.
3262 *
3263 * So, for recovery we may have several outstanding complex requests for a
3264 * given address, one for each out-of-sync device.  We model this by allocating
3265 * a number of r10_bio structures, one for each out-of-sync device.
3266 * As we setup these structures, we collect all bio's together into a list
3267 * which we then process collectively to add pages, and then process again
3268 * to pass to submit_bio_noacct.
3269 *
3270 * The r10_bio structures are linked using a borrowed master_bio pointer.
3271 * This link is counted in ->remaining.  When the r10_bio that points to NULL
3272 * has its remaining count decremented to 0, the whole complex operation
3273 * is complete.
3274 *
3275 */
3276
3277static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3278			     int *skipped)
3279{
3280	struct r10conf *conf = mddev->private;
3281	struct r10bio *r10_bio;
3282	struct bio *biolist = NULL, *bio;
3283	sector_t max_sector, nr_sectors;
3284	int i;
3285	int max_sync;
3286	sector_t sync_blocks;
3287	sector_t sectors_skipped = 0;
3288	int chunks_skipped = 0;
3289	sector_t chunk_mask = conf->geo.chunk_mask;
3290	int page_idx = 0;
3291
3292	if (!mempool_initialized(&conf->r10buf_pool))
3293		if (init_resync(conf))
3294			return 0;
3295
3296	/*
3297	 * Allow skipping a full rebuild for incremental assembly
3298	 * of a clean array, like RAID1 does.
3299	 */
3300	if (mddev->bitmap == NULL &&
3301	    mddev->recovery_cp == MaxSector &&
3302	    mddev->reshape_position == MaxSector &&
3303	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3304	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3305	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3306	    conf->fullsync == 0) {
3307		*skipped = 1;
3308		return mddev->dev_sectors - sector_nr;
3309	}
3310
3311 skipped:
3312	max_sector = mddev->dev_sectors;
3313	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
3314	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3315		max_sector = mddev->resync_max_sectors;
3316	if (sector_nr >= max_sector) {
3317		conf->cluster_sync_low = 0;
3318		conf->cluster_sync_high = 0;
3319
3320		/* If we aborted, we need to abort the
3321		 * sync on the 'current' bitmap chucks (there can
3322		 * be several when recovering multiple devices).
3323		 * as we may have started syncing it but not finished.
3324		 * We can find the current address in
3325		 * mddev->curr_resync, but for recovery,
3326		 * we need to convert that to several
3327		 * virtual addresses.
3328		 */
3329		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3330			end_reshape(conf);
3331			close_sync(conf);
3332			return 0;
3333		}
3334
3335		if (mddev->curr_resync < max_sector) { /* aborted */
3336			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3337				md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3338						   &sync_blocks, 1);
3339			else for (i = 0; i < conf->geo.raid_disks; i++) {
3340				sector_t sect =
3341					raid10_find_virt(conf, mddev->curr_resync, i);
3342				md_bitmap_end_sync(mddev->bitmap, sect,
3343						   &sync_blocks, 1);
3344			}
3345		} else {
3346			/* completed sync */
3347			if ((!mddev->bitmap || conf->fullsync)
3348			    && conf->have_replacement
3349			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3350				/* Completed a full sync so the replacements
3351				 * are now fully recovered.
3352				 */
3353				rcu_read_lock();
3354				for (i = 0; i < conf->geo.raid_disks; i++) {
3355					struct md_rdev *rdev =
3356						rcu_dereference(conf->mirrors[i].replacement);
3357					if (rdev)
3358						rdev->recovery_offset = MaxSector;
3359				}
3360				rcu_read_unlock();
3361			}
3362			conf->fullsync = 0;
3363		}
3364		md_bitmap_close_sync(mddev->bitmap);
3365		close_sync(conf);
3366		*skipped = 1;
3367		return sectors_skipped;
3368	}
3369
3370	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3371		return reshape_request(mddev, sector_nr, skipped);
3372
3373	if (chunks_skipped >= conf->geo.raid_disks) {
3374		/* if there has been nothing to do on any drive,
3375		 * then there is nothing to do at all..
3376		 */
3377		*skipped = 1;
3378		return (max_sector - sector_nr) + sectors_skipped;
3379	}
3380
3381	if (max_sector > mddev->resync_max)
3382		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3383
3384	/* make sure whole request will fit in a chunk - if chunks
3385	 * are meaningful
3386	 */
3387	if (conf->geo.near_copies < conf->geo.raid_disks &&
3388	    max_sector > (sector_nr | chunk_mask))
3389		max_sector = (sector_nr | chunk_mask) + 1;
3390
3391	/*
3392	 * If there is non-resync activity waiting for a turn, then let it
3393	 * though before starting on this new sync request.
3394	 */
3395	if (conf->nr_waiting)
3396		schedule_timeout_uninterruptible(1);
3397
3398	/* Again, very different code for resync and recovery.
3399	 * Both must result in an r10bio with a list of bios that
3400	 * have bi_end_io, bi_sector, bi_bdev set,
3401	 * and bi_private set to the r10bio.
3402	 * For recovery, we may actually create several r10bios
3403	 * with 2 bios in each, that correspond to the bios in the main one.
3404	 * In this case, the subordinate r10bios link back through a
3405	 * borrowed master_bio pointer, and the counter in the master
3406	 * includes a ref from each subordinate.
3407	 */
3408	/* First, we decide what to do and set ->bi_end_io
3409	 * To end_sync_read if we want to read, and
3410	 * end_sync_write if we will want to write.
3411	 */
3412
3413	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3414	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3415		/* recovery... the complicated one */
3416		int j;
3417		r10_bio = NULL;
3418
3419		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3420			int still_degraded;
3421			struct r10bio *rb2;
3422			sector_t sect;
3423			int must_sync;
3424			int any_working;
3425			int need_recover = 0;
3426			int need_replace = 0;
3427			struct raid10_info *mirror = &conf->mirrors[i];
3428			struct md_rdev *mrdev, *mreplace;
3429
3430			rcu_read_lock();
3431			mrdev = rcu_dereference(mirror->rdev);
3432			mreplace = rcu_dereference(mirror->replacement);
3433
3434			if (mrdev != NULL &&
3435			    !test_bit(Faulty, &mrdev->flags) &&
3436			    !test_bit(In_sync, &mrdev->flags))
3437				need_recover = 1;
3438			if (mreplace != NULL &&
3439			    !test_bit(Faulty, &mreplace->flags))
3440				need_replace = 1;
3441
3442			if (!need_recover && !need_replace) {
3443				rcu_read_unlock();
 
 
 
 
3444				continue;
3445			}
3446
3447			still_degraded = 0;
3448			/* want to reconstruct this device */
3449			rb2 = r10_bio;
3450			sect = raid10_find_virt(conf, sector_nr, i);
3451			if (sect >= mddev->resync_max_sectors) {
3452				/* last stripe is not complete - don't
3453				 * try to recover this sector.
3454				 */
3455				rcu_read_unlock();
3456				continue;
3457			}
3458			if (mreplace && test_bit(Faulty, &mreplace->flags))
3459				mreplace = NULL;
3460			/* Unless we are doing a full sync, or a replacement
3461			 * we only need to recover the block if it is set in
3462			 * the bitmap
3463			 */
3464			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3465							 &sync_blocks, 1);
3466			if (sync_blocks < max_sync)
3467				max_sync = sync_blocks;
3468			if (!must_sync &&
3469			    mreplace == NULL &&
3470			    !conf->fullsync) {
3471				/* yep, skip the sync_blocks here, but don't assume
3472				 * that there will never be anything to do here
3473				 */
3474				chunks_skipped = -1;
3475				rcu_read_unlock();
3476				continue;
3477			}
3478			atomic_inc(&mrdev->nr_pending);
3479			if (mreplace)
3480				atomic_inc(&mreplace->nr_pending);
3481			rcu_read_unlock();
3482
3483			r10_bio = raid10_alloc_init_r10buf(conf);
3484			r10_bio->state = 0;
3485			raise_barrier(conf, rb2 != NULL);
3486			atomic_set(&r10_bio->remaining, 0);
3487
3488			r10_bio->master_bio = (struct bio*)rb2;
3489			if (rb2)
3490				atomic_inc(&rb2->remaining);
3491			r10_bio->mddev = mddev;
3492			set_bit(R10BIO_IsRecover, &r10_bio->state);
3493			r10_bio->sector = sect;
3494
3495			raid10_find_phys(conf, r10_bio);
3496
3497			/* Need to check if the array will still be
3498			 * degraded
3499			 */
3500			rcu_read_lock();
3501			for (j = 0; j < conf->geo.raid_disks; j++) {
3502				struct md_rdev *rdev = rcu_dereference(
3503					conf->mirrors[j].rdev);
3504				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3505					still_degraded = 1;
3506					break;
3507				}
3508			}
3509
3510			must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3511							 &sync_blocks, still_degraded);
3512
3513			any_working = 0;
3514			for (j=0; j<conf->copies;j++) {
3515				int k;
3516				int d = r10_bio->devs[j].devnum;
3517				sector_t from_addr, to_addr;
3518				struct md_rdev *rdev =
3519					rcu_dereference(conf->mirrors[d].rdev);
3520				sector_t sector, first_bad;
3521				int bad_sectors;
3522				if (!rdev ||
3523				    !test_bit(In_sync, &rdev->flags))
3524					continue;
3525				/* This is where we read from */
3526				any_working = 1;
 
3527				sector = r10_bio->devs[j].addr;
3528
3529				if (is_badblock(rdev, sector, max_sync,
3530						&first_bad, &bad_sectors)) {
3531					if (first_bad > sector)
3532						max_sync = first_bad - sector;
3533					else {
3534						bad_sectors -= (sector
3535								- first_bad);
3536						if (max_sync > bad_sectors)
3537							max_sync = bad_sectors;
3538						continue;
3539					}
3540				}
3541				bio = r10_bio->devs[0].bio;
 
3542				bio->bi_next = biolist;
3543				biolist = bio;
 
3544				bio->bi_end_io = end_sync_read;
3545				bio->bi_opf = REQ_OP_READ;
3546				if (test_bit(FailFast, &rdev->flags))
3547					bio->bi_opf |= MD_FAILFAST;
3548				from_addr = r10_bio->devs[j].addr;
3549				bio->bi_iter.bi_sector = from_addr +
3550					rdev->data_offset;
3551				bio_set_dev(bio, rdev->bdev);
3552				atomic_inc(&rdev->nr_pending);
3553				/* and we write to 'i' (if not in_sync) */
3554
3555				for (k=0; k<conf->copies; k++)
3556					if (r10_bio->devs[k].devnum == i)
3557						break;
3558				BUG_ON(k == conf->copies);
3559				to_addr = r10_bio->devs[k].addr;
3560				r10_bio->devs[0].devnum = d;
3561				r10_bio->devs[0].addr = from_addr;
3562				r10_bio->devs[1].devnum = i;
3563				r10_bio->devs[1].addr = to_addr;
3564
3565				if (need_recover) {
 
3566					bio = r10_bio->devs[1].bio;
 
3567					bio->bi_next = biolist;
3568					biolist = bio;
 
3569					bio->bi_end_io = end_sync_write;
3570					bio->bi_opf = REQ_OP_WRITE;
3571					bio->bi_iter.bi_sector = to_addr
3572						+ mrdev->data_offset;
3573					bio_set_dev(bio, mrdev->bdev);
3574					atomic_inc(&r10_bio->remaining);
3575				} else
3576					r10_bio->devs[1].bio->bi_end_io = NULL;
3577
3578				/* and maybe write to replacement */
3579				bio = r10_bio->devs[1].repl_bio;
3580				if (bio)
3581					bio->bi_end_io = NULL;
3582				/* Note: if need_replace, then bio
 
3583				 * cannot be NULL as r10buf_pool_alloc will
3584				 * have allocated it.
 
 
 
 
3585				 */
3586				if (!need_replace)
 
3587					break;
 
3588				bio->bi_next = biolist;
3589				biolist = bio;
 
3590				bio->bi_end_io = end_sync_write;
3591				bio->bi_opf = REQ_OP_WRITE;
3592				bio->bi_iter.bi_sector = to_addr +
3593					mreplace->data_offset;
3594				bio_set_dev(bio, mreplace->bdev);
3595				atomic_inc(&r10_bio->remaining);
3596				break;
3597			}
3598			rcu_read_unlock();
3599			if (j == conf->copies) {
3600				/* Cannot recover, so abort the recovery or
3601				 * record a bad block */
3602				if (any_working) {
3603					/* problem is that there are bad blocks
3604					 * on other device(s)
3605					 */
3606					int k;
3607					for (k = 0; k < conf->copies; k++)
3608						if (r10_bio->devs[k].devnum == i)
3609							break;
3610					if (!test_bit(In_sync,
3611						      &mrdev->flags)
3612					    && !rdev_set_badblocks(
3613						    mrdev,
3614						    r10_bio->devs[k].addr,
3615						    max_sync, 0))
3616						any_working = 0;
3617					if (mreplace &&
3618					    !rdev_set_badblocks(
3619						    mreplace,
3620						    r10_bio->devs[k].addr,
3621						    max_sync, 0))
3622						any_working = 0;
3623				}
3624				if (!any_working)  {
3625					if (!test_and_set_bit(MD_RECOVERY_INTR,
3626							      &mddev->recovery))
3627						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
 
3628						       mdname(mddev));
3629					mirror->recovery_disabled
3630						= mddev->recovery_disabled;
3631				}
3632				put_buf(r10_bio);
3633				if (rb2)
3634					atomic_dec(&rb2->remaining);
3635				r10_bio = rb2;
3636				rdev_dec_pending(mrdev, mddev);
3637				if (mreplace)
3638					rdev_dec_pending(mreplace, mddev);
3639				break;
3640			}
3641			rdev_dec_pending(mrdev, mddev);
3642			if (mreplace)
3643				rdev_dec_pending(mreplace, mddev);
3644			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3645				/* Only want this if there is elsewhere to
3646				 * read from. 'j' is currently the first
3647				 * readable copy.
3648				 */
3649				int targets = 1;
3650				for (; j < conf->copies; j++) {
3651					int d = r10_bio->devs[j].devnum;
3652					if (conf->mirrors[d].rdev &&
3653					    test_bit(In_sync,
3654						      &conf->mirrors[d].rdev->flags))
3655						targets++;
3656				}
3657				if (targets == 1)
3658					r10_bio->devs[0].bio->bi_opf
3659						&= ~MD_FAILFAST;
3660			}
3661		}
3662		if (biolist == NULL) {
3663			while (r10_bio) {
3664				struct r10bio *rb2 = r10_bio;
3665				r10_bio = (struct r10bio*) rb2->master_bio;
3666				rb2->master_bio = NULL;
3667				put_buf(rb2);
3668			}
3669			goto giveup;
3670		}
3671	} else {
3672		/* resync. Schedule a read for every block at this virt offset */
3673		int count = 0;
3674
3675		/*
3676		 * Since curr_resync_completed could probably not update in
3677		 * time, and we will set cluster_sync_low based on it.
3678		 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3679		 * safety reason, which ensures curr_resync_completed is
3680		 * updated in bitmap_cond_end_sync.
3681		 */
3682		md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3683					mddev_is_clustered(mddev) &&
3684					(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3685
3686		if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3687					  &sync_blocks, mddev->degraded) &&
3688		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3689						 &mddev->recovery)) {
3690			/* We can skip this block */
3691			*skipped = 1;
3692			return sync_blocks + sectors_skipped;
3693		}
3694		if (sync_blocks < max_sync)
3695			max_sync = sync_blocks;
3696		r10_bio = raid10_alloc_init_r10buf(conf);
3697		r10_bio->state = 0;
3698
3699		r10_bio->mddev = mddev;
3700		atomic_set(&r10_bio->remaining, 0);
3701		raise_barrier(conf, 0);
3702		conf->next_resync = sector_nr;
3703
3704		r10_bio->master_bio = NULL;
3705		r10_bio->sector = sector_nr;
3706		set_bit(R10BIO_IsSync, &r10_bio->state);
3707		raid10_find_phys(conf, r10_bio);
3708		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3709
3710		for (i = 0; i < conf->copies; i++) {
3711			int d = r10_bio->devs[i].devnum;
3712			sector_t first_bad, sector;
3713			int bad_sectors;
3714			struct md_rdev *rdev;
3715
3716			if (r10_bio->devs[i].repl_bio)
3717				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3718
3719			bio = r10_bio->devs[i].bio;
3720			bio->bi_status = BLK_STS_IOERR;
3721			rcu_read_lock();
3722			rdev = rcu_dereference(conf->mirrors[d].rdev);
3723			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3724				rcu_read_unlock();
3725				continue;
3726			}
3727			sector = r10_bio->devs[i].addr;
3728			if (is_badblock(rdev, sector, max_sync,
 
3729					&first_bad, &bad_sectors)) {
3730				if (first_bad > sector)
3731					max_sync = first_bad - sector;
3732				else {
3733					bad_sectors -= (sector - first_bad);
3734					if (max_sync > bad_sectors)
3735						max_sync = bad_sectors;
3736					rcu_read_unlock();
3737					continue;
3738				}
3739			}
3740			atomic_inc(&rdev->nr_pending);
3741			atomic_inc(&r10_bio->remaining);
3742			bio->bi_next = biolist;
3743			biolist = bio;
 
3744			bio->bi_end_io = end_sync_read;
3745			bio->bi_opf = REQ_OP_READ;
3746			if (test_bit(FailFast, &rdev->flags))
3747				bio->bi_opf |= MD_FAILFAST;
3748			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3749			bio_set_dev(bio, rdev->bdev);
3750			count++;
3751
3752			rdev = rcu_dereference(conf->mirrors[d].replacement);
3753			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3754				rcu_read_unlock();
3755				continue;
3756			}
3757			atomic_inc(&rdev->nr_pending);
3758
3759			/* Need to set up for writing to the replacement */
3760			bio = r10_bio->devs[i].repl_bio;
3761			bio->bi_status = BLK_STS_IOERR;
 
3762
3763			sector = r10_bio->devs[i].addr;
 
3764			bio->bi_next = biolist;
3765			biolist = bio;
 
3766			bio->bi_end_io = end_sync_write;
3767			bio->bi_opf = REQ_OP_WRITE;
3768			if (test_bit(FailFast, &rdev->flags))
3769				bio->bi_opf |= MD_FAILFAST;
3770			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3771			bio_set_dev(bio, rdev->bdev);
3772			count++;
3773			rcu_read_unlock();
3774		}
3775
3776		if (count < 2) {
3777			for (i=0; i<conf->copies; i++) {
3778				int d = r10_bio->devs[i].devnum;
3779				if (r10_bio->devs[i].bio->bi_end_io)
3780					rdev_dec_pending(conf->mirrors[d].rdev,
3781							 mddev);
3782				if (r10_bio->devs[i].repl_bio &&
3783				    r10_bio->devs[i].repl_bio->bi_end_io)
3784					rdev_dec_pending(
3785						conf->mirrors[d].replacement,
3786						mddev);
3787			}
3788			put_buf(r10_bio);
3789			biolist = NULL;
3790			goto giveup;
3791		}
3792	}
3793
3794	nr_sectors = 0;
3795	if (sector_nr + max_sync < max_sector)
3796		max_sector = sector_nr + max_sync;
3797	do {
3798		struct page *page;
3799		int len = PAGE_SIZE;
3800		if (sector_nr + (len>>9) > max_sector)
3801			len = (max_sector - sector_nr) << 9;
3802		if (len == 0)
3803			break;
3804		for (bio= biolist ; bio ; bio=bio->bi_next) {
3805			struct resync_pages *rp = get_resync_pages(bio);
3806			page = resync_fetch_page(rp, page_idx);
3807			/*
3808			 * won't fail because the vec table is big enough
3809			 * to hold all these pages
3810			 */
3811			bio_add_page(bio, page, len, 0);
 
 
 
 
 
 
 
 
 
3812		}
3813		nr_sectors += len>>9;
3814		sector_nr += len>>9;
3815	} while (++page_idx < RESYNC_PAGES);
 
3816	r10_bio->sectors = nr_sectors;
3817
3818	if (mddev_is_clustered(mddev) &&
3819	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3820		/* It is resync not recovery */
3821		if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3822			conf->cluster_sync_low = mddev->curr_resync_completed;
3823			raid10_set_cluster_sync_high(conf);
3824			/* Send resync message */
3825			md_cluster_ops->resync_info_update(mddev,
3826						conf->cluster_sync_low,
3827						conf->cluster_sync_high);
3828		}
3829	} else if (mddev_is_clustered(mddev)) {
3830		/* This is recovery not resync */
3831		sector_t sect_va1, sect_va2;
3832		bool broadcast_msg = false;
3833
3834		for (i = 0; i < conf->geo.raid_disks; i++) {
3835			/*
3836			 * sector_nr is a device address for recovery, so we
3837			 * need translate it to array address before compare
3838			 * with cluster_sync_high.
3839			 */
3840			sect_va1 = raid10_find_virt(conf, sector_nr, i);
3841
3842			if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3843				broadcast_msg = true;
3844				/*
3845				 * curr_resync_completed is similar as
3846				 * sector_nr, so make the translation too.
3847				 */
3848				sect_va2 = raid10_find_virt(conf,
3849					mddev->curr_resync_completed, i);
3850
3851				if (conf->cluster_sync_low == 0 ||
3852				    conf->cluster_sync_low > sect_va2)
3853					conf->cluster_sync_low = sect_va2;
3854			}
3855		}
3856		if (broadcast_msg) {
3857			raid10_set_cluster_sync_high(conf);
3858			md_cluster_ops->resync_info_update(mddev,
3859						conf->cluster_sync_low,
3860						conf->cluster_sync_high);
3861		}
3862	}
3863
3864	while (biolist) {
3865		bio = biolist;
3866		biolist = biolist->bi_next;
3867
3868		bio->bi_next = NULL;
3869		r10_bio = get_resync_r10bio(bio);
3870		r10_bio->sectors = nr_sectors;
3871
3872		if (bio->bi_end_io == end_sync_read) {
3873			md_sync_acct_bio(bio, nr_sectors);
3874			bio->bi_status = 0;
3875			submit_bio_noacct(bio);
3876		}
3877	}
3878
3879	if (sectors_skipped)
3880		/* pretend they weren't skipped, it makes
3881		 * no important difference in this case
3882		 */
3883		md_done_sync(mddev, sectors_skipped, 1);
3884
3885	return sectors_skipped + nr_sectors;
3886 giveup:
3887	/* There is nowhere to write, so all non-sync
3888	 * drives must be failed or in resync, all drives
3889	 * have a bad block, so try the next chunk...
3890	 */
3891	if (sector_nr + max_sync < max_sector)
3892		max_sector = sector_nr + max_sync;
3893
3894	sectors_skipped += (max_sector - sector_nr);
3895	chunks_skipped ++;
3896	sector_nr = max_sector;
3897	goto skipped;
3898}
3899
3900static sector_t
3901raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3902{
3903	sector_t size;
3904	struct r10conf *conf = mddev->private;
3905
3906	if (!raid_disks)
3907		raid_disks = min(conf->geo.raid_disks,
3908				 conf->prev.raid_disks);
3909	if (!sectors)
3910		sectors = conf->dev_sectors;
3911
3912	size = sectors >> conf->geo.chunk_shift;
3913	sector_div(size, conf->geo.far_copies);
3914	size = size * raid_disks;
3915	sector_div(size, conf->geo.near_copies);
3916
3917	return size << conf->geo.chunk_shift;
3918}
3919
3920static void calc_sectors(struct r10conf *conf, sector_t size)
3921{
3922	/* Calculate the number of sectors-per-device that will
3923	 * actually be used, and set conf->dev_sectors and
3924	 * conf->stride
3925	 */
3926
3927	size = size >> conf->geo.chunk_shift;
3928	sector_div(size, conf->geo.far_copies);
3929	size = size * conf->geo.raid_disks;
3930	sector_div(size, conf->geo.near_copies);
3931	/* 'size' is now the number of chunks in the array */
3932	/* calculate "used chunks per device" */
3933	size = size * conf->copies;
3934
3935	/* We need to round up when dividing by raid_disks to
3936	 * get the stride size.
3937	 */
3938	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3939
3940	conf->dev_sectors = size << conf->geo.chunk_shift;
3941
3942	if (conf->geo.far_offset)
3943		conf->geo.stride = 1 << conf->geo.chunk_shift;
3944	else {
3945		sector_div(size, conf->geo.far_copies);
3946		conf->geo.stride = size << conf->geo.chunk_shift;
3947	}
3948}
3949
3950enum geo_type {geo_new, geo_old, geo_start};
3951static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3952{
3953	int nc, fc, fo;
3954	int layout, chunk, disks;
3955	switch (new) {
3956	case geo_old:
3957		layout = mddev->layout;
3958		chunk = mddev->chunk_sectors;
3959		disks = mddev->raid_disks - mddev->delta_disks;
3960		break;
3961	case geo_new:
3962		layout = mddev->new_layout;
3963		chunk = mddev->new_chunk_sectors;
3964		disks = mddev->raid_disks;
3965		break;
3966	default: /* avoid 'may be unused' warnings */
3967	case geo_start: /* new when starting reshape - raid_disks not
3968			 * updated yet. */
3969		layout = mddev->new_layout;
3970		chunk = mddev->new_chunk_sectors;
3971		disks = mddev->raid_disks + mddev->delta_disks;
3972		break;
3973	}
3974	if (layout >> 19)
3975		return -1;
3976	if (chunk < (PAGE_SIZE >> 9) ||
3977	    !is_power_of_2(chunk))
3978		return -2;
3979	nc = layout & 255;
3980	fc = (layout >> 8) & 255;
3981	fo = layout & (1<<16);
3982	geo->raid_disks = disks;
3983	geo->near_copies = nc;
3984	geo->far_copies = fc;
3985	geo->far_offset = fo;
3986	switch (layout >> 17) {
3987	case 0:	/* original layout.  simple but not always optimal */
3988		geo->far_set_size = disks;
3989		break;
3990	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3991		 * actually using this, but leave code here just in case.*/
3992		geo->far_set_size = disks/fc;
3993		WARN(geo->far_set_size < fc,
3994		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3995		break;
3996	case 2: /* "improved" layout fixed to match documentation */
3997		geo->far_set_size = fc * nc;
3998		break;
3999	default: /* Not a valid layout */
4000		return -1;
4001	}
4002	geo->chunk_mask = chunk - 1;
4003	geo->chunk_shift = ffz(~chunk);
4004	return nc*fc;
4005}
4006
4007static struct r10conf *setup_conf(struct mddev *mddev)
4008{
4009	struct r10conf *conf = NULL;
4010	int err = -EINVAL;
4011	struct geom geo;
4012	int copies;
4013
4014	copies = setup_geo(&geo, mddev, geo_new);
4015
4016	if (copies == -2) {
4017		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
4018			mdname(mddev), PAGE_SIZE);
 
4019		goto out;
4020	}
4021
4022	if (copies < 2 || copies > mddev->raid_disks) {
4023		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
4024			mdname(mddev), mddev->new_layout);
4025		goto out;
4026	}
4027
4028	err = -ENOMEM;
4029	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
4030	if (!conf)
4031		goto out;
4032
4033	/* FIXME calc properly */
4034	conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
4035				sizeof(struct raid10_info),
4036				GFP_KERNEL);
4037	if (!conf->mirrors)
4038		goto out;
4039
4040	conf->tmppage = alloc_page(GFP_KERNEL);
4041	if (!conf->tmppage)
4042		goto out;
4043
4044	conf->geo = geo;
4045	conf->copies = copies;
4046	err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
4047			   rbio_pool_free, conf);
4048	if (err)
4049		goto out;
4050
4051	err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
4052	if (err)
4053		goto out;
4054
4055	calc_sectors(conf, mddev->dev_sectors);
4056	if (mddev->reshape_position == MaxSector) {
4057		conf->prev = conf->geo;
4058		conf->reshape_progress = MaxSector;
4059	} else {
4060		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
4061			err = -EINVAL;
4062			goto out;
4063		}
4064		conf->reshape_progress = mddev->reshape_position;
4065		if (conf->prev.far_offset)
4066			conf->prev.stride = 1 << conf->prev.chunk_shift;
4067		else
4068			/* far_copies must be 1 */
4069			conf->prev.stride = conf->dev_sectors;
4070	}
4071	conf->reshape_safe = conf->reshape_progress;
4072	spin_lock_init(&conf->device_lock);
4073	INIT_LIST_HEAD(&conf->retry_list);
4074	INIT_LIST_HEAD(&conf->bio_end_io_list);
4075
4076	seqlock_init(&conf->resync_lock);
4077	init_waitqueue_head(&conf->wait_barrier);
4078	atomic_set(&conf->nr_pending, 0);
4079
4080	err = -ENOMEM;
4081	conf->thread = md_register_thread(raid10d, mddev, "raid10");
4082	if (!conf->thread)
4083		goto out;
4084
4085	conf->mddev = mddev;
4086	return conf;
4087
4088 out:
 
 
 
4089	if (conf) {
4090		mempool_exit(&conf->r10bio_pool);
4091		kfree(conf->mirrors);
4092		safe_put_page(conf->tmppage);
4093		bioset_exit(&conf->bio_split);
4094		kfree(conf);
4095	}
4096	return ERR_PTR(err);
4097}
4098
4099static void raid10_set_io_opt(struct r10conf *conf)
4100{
4101	int raid_disks = conf->geo.raid_disks;
4102
4103	if (!(conf->geo.raid_disks % conf->geo.near_copies))
4104		raid_disks /= conf->geo.near_copies;
4105	blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
4106			 raid_disks);
4107}
4108
4109static int raid10_run(struct mddev *mddev)
4110{
4111	struct r10conf *conf;
4112	int i, disk_idx;
4113	struct raid10_info *disk;
4114	struct md_rdev *rdev;
4115	sector_t size;
4116	sector_t min_offset_diff = 0;
4117	int first = 1;
4118
4119	if (mddev_init_writes_pending(mddev) < 0)
4120		return -ENOMEM;
4121
4122	if (mddev->private == NULL) {
4123		conf = setup_conf(mddev);
4124		if (IS_ERR(conf))
4125			return PTR_ERR(conf);
4126		mddev->private = conf;
4127	}
4128	conf = mddev->private;
4129	if (!conf)
4130		goto out;
4131
4132	if (mddev_is_clustered(conf->mddev)) {
4133		int fc, fo;
4134
4135		fc = (mddev->layout >> 8) & 255;
4136		fo = mddev->layout & (1<<16);
4137		if (fc > 1 || fo > 0) {
4138			pr_err("only near layout is supported by clustered"
4139				" raid10\n");
4140			goto out_free_conf;
4141		}
4142	}
4143
4144	mddev->thread = conf->thread;
4145	conf->thread = NULL;
4146
 
4147	if (mddev->queue) {
4148		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
4149		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
4150		raid10_set_io_opt(conf);
 
 
 
 
 
 
4151	}
4152
4153	rdev_for_each(rdev, mddev) {
4154		long long diff;
 
4155
4156		disk_idx = rdev->raid_disk;
4157		if (disk_idx < 0)
4158			continue;
4159		if (disk_idx >= conf->geo.raid_disks &&
4160		    disk_idx >= conf->prev.raid_disks)
4161			continue;
4162		disk = conf->mirrors + disk_idx;
4163
4164		if (test_bit(Replacement, &rdev->flags)) {
4165			if (disk->replacement)
4166				goto out_free_conf;
4167			disk->replacement = rdev;
4168		} else {
4169			if (disk->rdev)
4170				goto out_free_conf;
4171			disk->rdev = rdev;
4172		}
 
4173		diff = (rdev->new_data_offset - rdev->data_offset);
4174		if (!mddev->reshape_backwards)
4175			diff = -diff;
4176		if (diff < 0)
4177			diff = 0;
4178		if (first || diff < min_offset_diff)
4179			min_offset_diff = diff;
4180
4181		if (mddev->gendisk)
4182			disk_stack_limits(mddev->gendisk, rdev->bdev,
4183					  rdev->data_offset << 9);
4184
4185		disk->head_position = 0;
4186		first = 0;
 
 
4187	}
4188
 
 
 
 
 
 
 
 
4189	/* need to check that every block has at least one working mirror */
4190	if (!enough(conf, -1)) {
4191		pr_err("md/raid10:%s: not enough operational mirrors.\n",
4192		       mdname(mddev));
4193		goto out_free_conf;
4194	}
4195
4196	if (conf->reshape_progress != MaxSector) {
4197		/* must ensure that shape change is supported */
4198		if (conf->geo.far_copies != 1 &&
4199		    conf->geo.far_offset == 0)
4200			goto out_free_conf;
4201		if (conf->prev.far_copies != 1 &&
4202		    conf->prev.far_offset == 0)
4203			goto out_free_conf;
4204	}
4205
4206	mddev->degraded = 0;
4207	for (i = 0;
4208	     i < conf->geo.raid_disks
4209		     || i < conf->prev.raid_disks;
4210	     i++) {
4211
4212		disk = conf->mirrors + i;
4213
4214		if (!disk->rdev && disk->replacement) {
4215			/* The replacement is all we have - use it */
4216			disk->rdev = disk->replacement;
4217			disk->replacement = NULL;
4218			clear_bit(Replacement, &disk->rdev->flags);
4219		}
4220
4221		if (!disk->rdev ||
4222		    !test_bit(In_sync, &disk->rdev->flags)) {
4223			disk->head_position = 0;
4224			mddev->degraded++;
4225			if (disk->rdev &&
4226			    disk->rdev->saved_raid_disk < 0)
4227				conf->fullsync = 1;
4228		}
4229
4230		if (disk->replacement &&
4231		    !test_bit(In_sync, &disk->replacement->flags) &&
4232		    disk->replacement->saved_raid_disk < 0) {
4233			conf->fullsync = 1;
4234		}
4235
4236		disk->recovery_disabled = mddev->recovery_disabled - 1;
4237	}
4238
4239	if (mddev->recovery_cp != MaxSector)
4240		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
4241			  mdname(mddev));
4242	pr_info("md/raid10:%s: active with %d out of %d devices\n",
 
 
4243		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4244		conf->geo.raid_disks);
4245	/*
4246	 * Ok, everything is just fine now
4247	 */
4248	mddev->dev_sectors = conf->dev_sectors;
4249	size = raid10_size(mddev, 0, 0);
4250	md_set_array_sectors(mddev, size);
4251	mddev->resync_max_sectors = size;
4252	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
 
 
 
 
 
 
 
 
 
 
 
 
4253
4254	if (md_integrity_register(mddev))
4255		goto out_free_conf;
4256
4257	if (conf->reshape_progress != MaxSector) {
4258		unsigned long before_length, after_length;
4259
4260		before_length = ((1 << conf->prev.chunk_shift) *
4261				 conf->prev.far_copies);
4262		after_length = ((1 << conf->geo.chunk_shift) *
4263				conf->geo.far_copies);
4264
4265		if (max(before_length, after_length) > min_offset_diff) {
4266			/* This cannot work */
4267			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
4268			goto out_free_conf;
4269		}
4270		conf->offset_diff = min_offset_diff;
4271
4272		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4273		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4274		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4275		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4276		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4277							"reshape");
4278		if (!mddev->sync_thread)
4279			goto out_free_conf;
4280	}
4281
4282	return 0;
4283
4284out_free_conf:
4285	md_unregister_thread(&mddev->thread);
4286	mempool_exit(&conf->r10bio_pool);
4287	safe_put_page(conf->tmppage);
4288	kfree(conf->mirrors);
4289	kfree(conf);
4290	mddev->private = NULL;
4291out:
4292	return -EIO;
4293}
4294
4295static void raid10_free(struct mddev *mddev, void *priv)
4296{
4297	struct r10conf *conf = priv;
4298
4299	mempool_exit(&conf->r10bio_pool);
4300	safe_put_page(conf->tmppage);
4301	kfree(conf->mirrors);
4302	kfree(conf->mirrors_old);
4303	kfree(conf->mirrors_new);
4304	bioset_exit(&conf->bio_split);
4305	kfree(conf);
4306}
4307
4308static void raid10_quiesce(struct mddev *mddev, int quiesce)
4309{
4310	struct r10conf *conf = mddev->private;
4311
4312	if (quiesce)
 
4313		raise_barrier(conf, 0);
4314	else
 
4315		lower_barrier(conf);
 
 
4316}
4317
4318static int raid10_resize(struct mddev *mddev, sector_t sectors)
4319{
4320	/* Resize of 'far' arrays is not supported.
4321	 * For 'near' and 'offset' arrays we can set the
4322	 * number of sectors used to be an appropriate multiple
4323	 * of the chunk size.
4324	 * For 'offset', this is far_copies*chunksize.
4325	 * For 'near' the multiplier is the LCM of
4326	 * near_copies and raid_disks.
4327	 * So if far_copies > 1 && !far_offset, fail.
4328	 * Else find LCM(raid_disks, near_copy)*far_copies and
4329	 * multiply by chunk_size.  Then round to this number.
4330	 * This is mostly done by raid10_size()
4331	 */
4332	struct r10conf *conf = mddev->private;
4333	sector_t oldsize, size;
4334
4335	if (mddev->reshape_position != MaxSector)
4336		return -EBUSY;
4337
4338	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4339		return -EINVAL;
4340
4341	oldsize = raid10_size(mddev, 0, 0);
4342	size = raid10_size(mddev, sectors, 0);
4343	if (mddev->external_size &&
4344	    mddev->array_sectors > size)
4345		return -EINVAL;
4346	if (mddev->bitmap) {
4347		int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4348		if (ret)
4349			return ret;
4350	}
4351	md_set_array_sectors(mddev, size);
 
 
4352	if (sectors > mddev->dev_sectors &&
4353	    mddev->recovery_cp > oldsize) {
4354		mddev->recovery_cp = oldsize;
4355		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4356	}
4357	calc_sectors(conf, sectors);
4358	mddev->dev_sectors = conf->dev_sectors;
4359	mddev->resync_max_sectors = size;
4360	return 0;
4361}
4362
4363static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4364{
4365	struct md_rdev *rdev;
4366	struct r10conf *conf;
4367
4368	if (mddev->degraded > 0) {
4369		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
4370			mdname(mddev));
4371		return ERR_PTR(-EINVAL);
4372	}
4373	sector_div(size, devs);
4374
4375	/* Set new parameters */
4376	mddev->new_level = 10;
4377	/* new layout: far_copies = 1, near_copies = 2 */
4378	mddev->new_layout = (1<<8) + 2;
4379	mddev->new_chunk_sectors = mddev->chunk_sectors;
4380	mddev->delta_disks = mddev->raid_disks;
4381	mddev->raid_disks *= 2;
4382	/* make sure it will be not marked as dirty */
4383	mddev->recovery_cp = MaxSector;
4384	mddev->dev_sectors = size;
4385
4386	conf = setup_conf(mddev);
4387	if (!IS_ERR(conf)) {
4388		rdev_for_each(rdev, mddev)
4389			if (rdev->raid_disk >= 0) {
4390				rdev->new_raid_disk = rdev->raid_disk * 2;
4391				rdev->sectors = size;
4392			}
4393		WRITE_ONCE(conf->barrier, 1);
4394	}
4395
4396	return conf;
4397}
4398
4399static void *raid10_takeover(struct mddev *mddev)
4400{
4401	struct r0conf *raid0_conf;
4402
4403	/* raid10 can take over:
4404	 *  raid0 - providing it has only two drives
4405	 */
4406	if (mddev->level == 0) {
4407		/* for raid0 takeover only one zone is supported */
4408		raid0_conf = mddev->private;
4409		if (raid0_conf->nr_strip_zones > 1) {
4410			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4411				mdname(mddev));
 
4412			return ERR_PTR(-EINVAL);
4413		}
4414		return raid10_takeover_raid0(mddev,
4415			raid0_conf->strip_zone->zone_end,
4416			raid0_conf->strip_zone->nb_dev);
4417	}
4418	return ERR_PTR(-EINVAL);
4419}
4420
4421static int raid10_check_reshape(struct mddev *mddev)
4422{
4423	/* Called when there is a request to change
4424	 * - layout (to ->new_layout)
4425	 * - chunk size (to ->new_chunk_sectors)
4426	 * - raid_disks (by delta_disks)
4427	 * or when trying to restart a reshape that was ongoing.
4428	 *
4429	 * We need to validate the request and possibly allocate
4430	 * space if that might be an issue later.
4431	 *
4432	 * Currently we reject any reshape of a 'far' mode array,
4433	 * allow chunk size to change if new is generally acceptable,
4434	 * allow raid_disks to increase, and allow
4435	 * a switch between 'near' mode and 'offset' mode.
4436	 */
4437	struct r10conf *conf = mddev->private;
4438	struct geom geo;
4439
4440	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4441		return -EINVAL;
4442
4443	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4444		/* mustn't change number of copies */
4445		return -EINVAL;
4446	if (geo.far_copies > 1 && !geo.far_offset)
4447		/* Cannot switch to 'far' mode */
4448		return -EINVAL;
4449
4450	if (mddev->array_sectors & geo.chunk_mask)
4451			/* not factor of array size */
4452			return -EINVAL;
4453
4454	if (!enough(conf, -1))
4455		return -EINVAL;
4456
4457	kfree(conf->mirrors_new);
4458	conf->mirrors_new = NULL;
4459	if (mddev->delta_disks > 0) {
4460		/* allocate new 'mirrors' list */
4461		conf->mirrors_new =
4462			kcalloc(mddev->raid_disks + mddev->delta_disks,
4463				sizeof(struct raid10_info),
4464				GFP_KERNEL);
 
4465		if (!conf->mirrors_new)
4466			return -ENOMEM;
4467	}
4468	return 0;
4469}
4470
4471/*
4472 * Need to check if array has failed when deciding whether to:
4473 *  - start an array
4474 *  - remove non-faulty devices
4475 *  - add a spare
4476 *  - allow a reshape
4477 * This determination is simple when no reshape is happening.
4478 * However if there is a reshape, we need to carefully check
4479 * both the before and after sections.
4480 * This is because some failed devices may only affect one
4481 * of the two sections, and some non-in_sync devices may
4482 * be insync in the section most affected by failed devices.
4483 */
4484static int calc_degraded(struct r10conf *conf)
4485{
4486	int degraded, degraded2;
4487	int i;
4488
4489	rcu_read_lock();
4490	degraded = 0;
4491	/* 'prev' section first */
4492	for (i = 0; i < conf->prev.raid_disks; i++) {
4493		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4494		if (!rdev || test_bit(Faulty, &rdev->flags))
4495			degraded++;
4496		else if (!test_bit(In_sync, &rdev->flags))
4497			/* When we can reduce the number of devices in
4498			 * an array, this might not contribute to
4499			 * 'degraded'.  It does now.
4500			 */
4501			degraded++;
4502	}
4503	rcu_read_unlock();
4504	if (conf->geo.raid_disks == conf->prev.raid_disks)
4505		return degraded;
4506	rcu_read_lock();
4507	degraded2 = 0;
4508	for (i = 0; i < conf->geo.raid_disks; i++) {
4509		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4510		if (!rdev || test_bit(Faulty, &rdev->flags))
4511			degraded2++;
4512		else if (!test_bit(In_sync, &rdev->flags)) {
4513			/* If reshape is increasing the number of devices,
4514			 * this section has already been recovered, so
4515			 * it doesn't contribute to degraded.
4516			 * else it does.
4517			 */
4518			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4519				degraded2++;
4520		}
4521	}
4522	rcu_read_unlock();
4523	if (degraded2 > degraded)
4524		return degraded2;
4525	return degraded;
4526}
4527
4528static int raid10_start_reshape(struct mddev *mddev)
4529{
4530	/* A 'reshape' has been requested. This commits
4531	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4532	 * This also checks if there are enough spares and adds them
4533	 * to the array.
4534	 * We currently require enough spares to make the final
4535	 * array non-degraded.  We also require that the difference
4536	 * between old and new data_offset - on each device - is
4537	 * enough that we never risk over-writing.
4538	 */
4539
4540	unsigned long before_length, after_length;
4541	sector_t min_offset_diff = 0;
4542	int first = 1;
4543	struct geom new;
4544	struct r10conf *conf = mddev->private;
4545	struct md_rdev *rdev;
4546	int spares = 0;
4547	int ret;
4548
4549	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4550		return -EBUSY;
4551
4552	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4553		return -EINVAL;
4554
4555	before_length = ((1 << conf->prev.chunk_shift) *
4556			 conf->prev.far_copies);
4557	after_length = ((1 << conf->geo.chunk_shift) *
4558			conf->geo.far_copies);
4559
4560	rdev_for_each(rdev, mddev) {
4561		if (!test_bit(In_sync, &rdev->flags)
4562		    && !test_bit(Faulty, &rdev->flags))
4563			spares++;
4564		if (rdev->raid_disk >= 0) {
4565			long long diff = (rdev->new_data_offset
4566					  - rdev->data_offset);
4567			if (!mddev->reshape_backwards)
4568				diff = -diff;
4569			if (diff < 0)
4570				diff = 0;
4571			if (first || diff < min_offset_diff)
4572				min_offset_diff = diff;
4573			first = 0;
4574		}
4575	}
4576
4577	if (max(before_length, after_length) > min_offset_diff)
4578		return -EINVAL;
4579
4580	if (spares < mddev->delta_disks)
4581		return -EINVAL;
4582
4583	conf->offset_diff = min_offset_diff;
4584	spin_lock_irq(&conf->device_lock);
4585	if (conf->mirrors_new) {
4586		memcpy(conf->mirrors_new, conf->mirrors,
4587		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4588		smp_mb();
4589		kfree(conf->mirrors_old);
4590		conf->mirrors_old = conf->mirrors;
4591		conf->mirrors = conf->mirrors_new;
4592		conf->mirrors_new = NULL;
4593	}
4594	setup_geo(&conf->geo, mddev, geo_start);
4595	smp_mb();
4596	if (mddev->reshape_backwards) {
4597		sector_t size = raid10_size(mddev, 0, 0);
4598		if (size < mddev->array_sectors) {
4599			spin_unlock_irq(&conf->device_lock);
4600			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4601				mdname(mddev));
4602			return -EINVAL;
4603		}
4604		mddev->resync_max_sectors = size;
4605		conf->reshape_progress = size;
4606	} else
4607		conf->reshape_progress = 0;
4608	conf->reshape_safe = conf->reshape_progress;
4609	spin_unlock_irq(&conf->device_lock);
4610
4611	if (mddev->delta_disks && mddev->bitmap) {
4612		struct mdp_superblock_1 *sb = NULL;
4613		sector_t oldsize, newsize;
4614
4615		oldsize = raid10_size(mddev, 0, 0);
4616		newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4617
4618		if (!mddev_is_clustered(mddev)) {
4619			ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4620			if (ret)
4621				goto abort;
4622			else
4623				goto out;
4624		}
4625
4626		rdev_for_each(rdev, mddev) {
4627			if (rdev->raid_disk > -1 &&
4628			    !test_bit(Faulty, &rdev->flags))
4629				sb = page_address(rdev->sb_page);
4630		}
4631
4632		/*
4633		 * some node is already performing reshape, and no need to
4634		 * call md_bitmap_resize again since it should be called when
4635		 * receiving BITMAP_RESIZE msg
4636		 */
4637		if ((sb && (le32_to_cpu(sb->feature_map) &
4638			    MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
4639			goto out;
4640
4641		ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4642		if (ret)
4643			goto abort;
4644
4645		ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4646		if (ret) {
4647			md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4648			goto abort;
4649		}
4650	}
4651out:
4652	if (mddev->delta_disks > 0) {
4653		rdev_for_each(rdev, mddev)
4654			if (rdev->raid_disk < 0 &&
4655			    !test_bit(Faulty, &rdev->flags)) {
4656				if (raid10_add_disk(mddev, rdev) == 0) {
4657					if (rdev->raid_disk >=
4658					    conf->prev.raid_disks)
4659						set_bit(In_sync, &rdev->flags);
4660					else
4661						rdev->recovery_offset = 0;
4662
4663					/* Failure here is OK */
4664					sysfs_link_rdev(mddev, rdev);
4665				}
4666			} else if (rdev->raid_disk >= conf->prev.raid_disks
4667				   && !test_bit(Faulty, &rdev->flags)) {
4668				/* This is a spare that was manually added */
4669				set_bit(In_sync, &rdev->flags);
4670			}
4671	}
4672	/* When a reshape changes the number of devices,
4673	 * ->degraded is measured against the larger of the
4674	 * pre and  post numbers.
4675	 */
4676	spin_lock_irq(&conf->device_lock);
4677	mddev->degraded = calc_degraded(conf);
4678	spin_unlock_irq(&conf->device_lock);
4679	mddev->raid_disks = conf->geo.raid_disks;
4680	mddev->reshape_position = conf->reshape_progress;
4681	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4682
4683	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4684	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4685	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4686	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4687	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4688
4689	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4690						"reshape");
4691	if (!mddev->sync_thread) {
4692		ret = -EAGAIN;
4693		goto abort;
4694	}
4695	conf->reshape_checkpoint = jiffies;
4696	md_wakeup_thread(mddev->sync_thread);
4697	md_new_event();
4698	return 0;
4699
4700abort:
4701	mddev->recovery = 0;
4702	spin_lock_irq(&conf->device_lock);
4703	conf->geo = conf->prev;
4704	mddev->raid_disks = conf->geo.raid_disks;
4705	rdev_for_each(rdev, mddev)
4706		rdev->new_data_offset = rdev->data_offset;
4707	smp_wmb();
4708	conf->reshape_progress = MaxSector;
4709	conf->reshape_safe = MaxSector;
4710	mddev->reshape_position = MaxSector;
4711	spin_unlock_irq(&conf->device_lock);
4712	return ret;
4713}
4714
4715/* Calculate the last device-address that could contain
4716 * any block from the chunk that includes the array-address 's'
4717 * and report the next address.
4718 * i.e. the address returned will be chunk-aligned and after
4719 * any data that is in the chunk containing 's'.
4720 */
4721static sector_t last_dev_address(sector_t s, struct geom *geo)
4722{
4723	s = (s | geo->chunk_mask) + 1;
4724	s >>= geo->chunk_shift;
4725	s *= geo->near_copies;
4726	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4727	s *= geo->far_copies;
4728	s <<= geo->chunk_shift;
4729	return s;
4730}
4731
4732/* Calculate the first device-address that could contain
4733 * any block from the chunk that includes the array-address 's'.
4734 * This too will be the start of a chunk
4735 */
4736static sector_t first_dev_address(sector_t s, struct geom *geo)
4737{
4738	s >>= geo->chunk_shift;
4739	s *= geo->near_copies;
4740	sector_div(s, geo->raid_disks);
4741	s *= geo->far_copies;
4742	s <<= geo->chunk_shift;
4743	return s;
4744}
4745
4746static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4747				int *skipped)
4748{
4749	/* We simply copy at most one chunk (smallest of old and new)
4750	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4751	 * or we hit a bad block or something.
4752	 * This might mean we pause for normal IO in the middle of
4753	 * a chunk, but that is not a problem as mddev->reshape_position
4754	 * can record any location.
4755	 *
4756	 * If we will want to write to a location that isn't
4757	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4758	 * we need to flush all reshape requests and update the metadata.
4759	 *
4760	 * When reshaping forwards (e.g. to more devices), we interpret
4761	 * 'safe' as the earliest block which might not have been copied
4762	 * down yet.  We divide this by previous stripe size and multiply
4763	 * by previous stripe length to get lowest device offset that we
4764	 * cannot write to yet.
4765	 * We interpret 'sector_nr' as an address that we want to write to.
4766	 * From this we use last_device_address() to find where we might
4767	 * write to, and first_device_address on the  'safe' position.
4768	 * If this 'next' write position is after the 'safe' position,
4769	 * we must update the metadata to increase the 'safe' position.
4770	 *
4771	 * When reshaping backwards, we round in the opposite direction
4772	 * and perform the reverse test:  next write position must not be
4773	 * less than current safe position.
4774	 *
4775	 * In all this the minimum difference in data offsets
4776	 * (conf->offset_diff - always positive) allows a bit of slack,
4777	 * so next can be after 'safe', but not by more than offset_diff
4778	 *
4779	 * We need to prepare all the bios here before we start any IO
4780	 * to ensure the size we choose is acceptable to all devices.
4781	 * The means one for each copy for write-out and an extra one for
4782	 * read-in.
4783	 * We store the read-in bio in ->master_bio and the others in
4784	 * ->devs[x].bio and ->devs[x].repl_bio.
4785	 */
4786	struct r10conf *conf = mddev->private;
4787	struct r10bio *r10_bio;
4788	sector_t next, safe, last;
4789	int max_sectors;
4790	int nr_sectors;
4791	int s;
4792	struct md_rdev *rdev;
4793	int need_flush = 0;
4794	struct bio *blist;
4795	struct bio *bio, *read_bio;
4796	int sectors_done = 0;
4797	struct page **pages;
4798
4799	if (sector_nr == 0) {
4800		/* If restarting in the middle, skip the initial sectors */
4801		if (mddev->reshape_backwards &&
4802		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4803			sector_nr = (raid10_size(mddev, 0, 0)
4804				     - conf->reshape_progress);
4805		} else if (!mddev->reshape_backwards &&
4806			   conf->reshape_progress > 0)
4807			sector_nr = conf->reshape_progress;
4808		if (sector_nr) {
4809			mddev->curr_resync_completed = sector_nr;
4810			sysfs_notify_dirent_safe(mddev->sysfs_completed);
4811			*skipped = 1;
4812			return sector_nr;
4813		}
4814	}
4815
4816	/* We don't use sector_nr to track where we are up to
4817	 * as that doesn't work well for ->reshape_backwards.
4818	 * So just use ->reshape_progress.
4819	 */
4820	if (mddev->reshape_backwards) {
4821		/* 'next' is the earliest device address that we might
4822		 * write to for this chunk in the new layout
4823		 */
4824		next = first_dev_address(conf->reshape_progress - 1,
4825					 &conf->geo);
4826
4827		/* 'safe' is the last device address that we might read from
4828		 * in the old layout after a restart
4829		 */
4830		safe = last_dev_address(conf->reshape_safe - 1,
4831					&conf->prev);
4832
4833		if (next + conf->offset_diff < safe)
4834			need_flush = 1;
4835
4836		last = conf->reshape_progress - 1;
4837		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4838					       & conf->prev.chunk_mask);
4839		if (sector_nr + RESYNC_SECTORS < last)
4840			sector_nr = last + 1 - RESYNC_SECTORS;
4841	} else {
4842		/* 'next' is after the last device address that we
4843		 * might write to for this chunk in the new layout
4844		 */
4845		next = last_dev_address(conf->reshape_progress, &conf->geo);
4846
4847		/* 'safe' is the earliest device address that we might
4848		 * read from in the old layout after a restart
4849		 */
4850		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4851
4852		/* Need to update metadata if 'next' might be beyond 'safe'
4853		 * as that would possibly corrupt data
4854		 */
4855		if (next > safe + conf->offset_diff)
4856			need_flush = 1;
4857
4858		sector_nr = conf->reshape_progress;
4859		last  = sector_nr | (conf->geo.chunk_mask
4860				     & conf->prev.chunk_mask);
4861
4862		if (sector_nr + RESYNC_SECTORS <= last)
4863			last = sector_nr + RESYNC_SECTORS - 1;
4864	}
4865
4866	if (need_flush ||
4867	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4868		/* Need to update reshape_position in metadata */
4869		wait_barrier(conf, false);
4870		mddev->reshape_position = conf->reshape_progress;
4871		if (mddev->reshape_backwards)
4872			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4873				- conf->reshape_progress;
4874		else
4875			mddev->curr_resync_completed = conf->reshape_progress;
4876		conf->reshape_checkpoint = jiffies;
4877		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4878		md_wakeup_thread(mddev->thread);
4879		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4880			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4881		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4882			allow_barrier(conf);
4883			return sectors_done;
4884		}
4885		conf->reshape_safe = mddev->reshape_position;
4886		allow_barrier(conf);
4887	}
4888
4889	raise_barrier(conf, 0);
4890read_more:
4891	/* Now schedule reads for blocks from sector_nr to last */
4892	r10_bio = raid10_alloc_init_r10buf(conf);
4893	r10_bio->state = 0;
4894	raise_barrier(conf, 1);
4895	atomic_set(&r10_bio->remaining, 0);
4896	r10_bio->mddev = mddev;
4897	r10_bio->sector = sector_nr;
4898	set_bit(R10BIO_IsReshape, &r10_bio->state);
4899	r10_bio->sectors = last - sector_nr + 1;
4900	rdev = read_balance(conf, r10_bio, &max_sectors);
4901	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4902
4903	if (!rdev) {
4904		/* Cannot read from here, so need to record bad blocks
4905		 * on all the target devices.
4906		 */
4907		// FIXME
4908		mempool_free(r10_bio, &conf->r10buf_pool);
4909		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4910		return sectors_done;
4911	}
4912
4913	read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4914				    GFP_KERNEL, &mddev->bio_set);
 
4915	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4916			       + rdev->data_offset);
4917	read_bio->bi_private = r10_bio;
4918	read_bio->bi_end_io = end_reshape_read;
 
 
 
 
 
4919	r10_bio->master_bio = read_bio;
4920	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4921
4922	/*
4923	 * Broadcast RESYNC message to other nodes, so all nodes would not
4924	 * write to the region to avoid conflict.
4925	*/
4926	if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4927		struct mdp_superblock_1 *sb = NULL;
4928		int sb_reshape_pos = 0;
4929
4930		conf->cluster_sync_low = sector_nr;
4931		conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4932		sb = page_address(rdev->sb_page);
4933		if (sb) {
4934			sb_reshape_pos = le64_to_cpu(sb->reshape_position);
4935			/*
4936			 * Set cluster_sync_low again if next address for array
4937			 * reshape is less than cluster_sync_low. Since we can't
4938			 * update cluster_sync_low until it has finished reshape.
4939			 */
4940			if (sb_reshape_pos < conf->cluster_sync_low)
4941				conf->cluster_sync_low = sb_reshape_pos;
4942		}
4943
4944		md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4945							  conf->cluster_sync_high);
4946	}
4947
4948	/* Now find the locations in the new layout */
4949	__raid10_find_phys(&conf->geo, r10_bio);
4950
4951	blist = read_bio;
4952	read_bio->bi_next = NULL;
4953
4954	rcu_read_lock();
4955	for (s = 0; s < conf->copies*2; s++) {
4956		struct bio *b;
4957		int d = r10_bio->devs[s/2].devnum;
4958		struct md_rdev *rdev2;
4959		if (s&1) {
4960			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4961			b = r10_bio->devs[s/2].repl_bio;
4962		} else {
4963			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4964			b = r10_bio->devs[s/2].bio;
4965		}
4966		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4967			continue;
4968
4969		bio_set_dev(b, rdev2->bdev);
 
4970		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4971			rdev2->new_data_offset;
 
4972		b->bi_end_io = end_reshape_write;
4973		b->bi_opf = REQ_OP_WRITE;
4974		b->bi_next = blist;
4975		blist = b;
4976	}
4977
4978	/* Now add as many pages as possible to all of these bios. */
4979
4980	nr_sectors = 0;
4981	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4982	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4983		struct page *page = pages[s / (PAGE_SIZE >> 9)];
4984		int len = (max_sectors - s) << 9;
4985		if (len > PAGE_SIZE)
4986			len = PAGE_SIZE;
4987		for (bio = blist; bio ; bio = bio->bi_next) {
4988			/*
4989			 * won't fail because the vec table is big enough
4990			 * to hold all these pages
4991			 */
4992			bio_add_page(bio, page, len, 0);
 
 
 
 
 
 
 
 
 
4993		}
4994		sector_nr += len >> 9;
4995		nr_sectors += len >> 9;
4996	}
4997	rcu_read_unlock();
4998	r10_bio->sectors = nr_sectors;
4999
5000	/* Now submit the read */
5001	md_sync_acct_bio(read_bio, r10_bio->sectors);
5002	atomic_inc(&r10_bio->remaining);
5003	read_bio->bi_next = NULL;
5004	submit_bio_noacct(read_bio);
 
5005	sectors_done += nr_sectors;
5006	if (sector_nr <= last)
5007		goto read_more;
5008
5009	lower_barrier(conf);
5010
5011	/* Now that we have done the whole section we can
5012	 * update reshape_progress
5013	 */
5014	if (mddev->reshape_backwards)
5015		conf->reshape_progress -= sectors_done;
5016	else
5017		conf->reshape_progress += sectors_done;
5018
5019	return sectors_done;
5020}
5021
5022static void end_reshape_request(struct r10bio *r10_bio);
5023static int handle_reshape_read_error(struct mddev *mddev,
5024				     struct r10bio *r10_bio);
5025static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
5026{
5027	/* Reshape read completed.  Hopefully we have a block
5028	 * to write out.
5029	 * If we got a read error then we do sync 1-page reads from
5030	 * elsewhere until we find the data - or give up.
5031	 */
5032	struct r10conf *conf = mddev->private;
5033	int s;
5034
5035	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
5036		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
5037			/* Reshape has been aborted */
5038			md_done_sync(mddev, r10_bio->sectors, 0);
5039			return;
5040		}
5041
5042	/* We definitely have the data in the pages, schedule the
5043	 * writes.
5044	 */
5045	atomic_set(&r10_bio->remaining, 1);
5046	for (s = 0; s < conf->copies*2; s++) {
5047		struct bio *b;
5048		int d = r10_bio->devs[s/2].devnum;
5049		struct md_rdev *rdev;
5050		rcu_read_lock();
5051		if (s&1) {
5052			rdev = rcu_dereference(conf->mirrors[d].replacement);
5053			b = r10_bio->devs[s/2].repl_bio;
5054		} else {
5055			rdev = rcu_dereference(conf->mirrors[d].rdev);
5056			b = r10_bio->devs[s/2].bio;
5057		}
5058		if (!rdev || test_bit(Faulty, &rdev->flags)) {
5059			rcu_read_unlock();
5060			continue;
5061		}
5062		atomic_inc(&rdev->nr_pending);
5063		rcu_read_unlock();
5064		md_sync_acct_bio(b, r10_bio->sectors);
5065		atomic_inc(&r10_bio->remaining);
5066		b->bi_next = NULL;
5067		submit_bio_noacct(b);
5068	}
5069	end_reshape_request(r10_bio);
5070}
5071
5072static void end_reshape(struct r10conf *conf)
5073{
5074	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
5075		return;
5076
5077	spin_lock_irq(&conf->device_lock);
5078	conf->prev = conf->geo;
5079	md_finish_reshape(conf->mddev);
5080	smp_wmb();
5081	conf->reshape_progress = MaxSector;
5082	conf->reshape_safe = MaxSector;
5083	spin_unlock_irq(&conf->device_lock);
5084
5085	if (conf->mddev->queue)
5086		raid10_set_io_opt(conf);
 
 
 
 
 
 
 
 
5087	conf->fullsync = 0;
5088}
5089
5090static void raid10_update_reshape_pos(struct mddev *mddev)
5091{
5092	struct r10conf *conf = mddev->private;
5093	sector_t lo, hi;
5094
5095	md_cluster_ops->resync_info_get(mddev, &lo, &hi);
5096	if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
5097	    || mddev->reshape_position == MaxSector)
5098		conf->reshape_progress = mddev->reshape_position;
5099	else
5100		WARN_ON_ONCE(1);
5101}
5102
5103static int handle_reshape_read_error(struct mddev *mddev,
5104				     struct r10bio *r10_bio)
5105{
5106	/* Use sync reads to get the blocks from somewhere else */
5107	int sectors = r10_bio->sectors;
5108	struct r10conf *conf = mddev->private;
5109	struct r10bio *r10b;
 
 
 
 
5110	int slot = 0;
5111	int idx = 0;
5112	struct page **pages;
5113
5114	r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
5115	if (!r10b) {
5116		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5117		return -ENOMEM;
5118	}
5119
5120	/* reshape IOs share pages from .devs[0].bio */
5121	pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5122
5123	r10b->sector = r10_bio->sector;
5124	__raid10_find_phys(&conf->prev, r10b);
5125
5126	while (sectors) {
5127		int s = sectors;
5128		int success = 0;
5129		int first_slot = slot;
5130
5131		if (s > (PAGE_SIZE >> 9))
5132			s = PAGE_SIZE >> 9;
5133
5134		rcu_read_lock();
5135		while (!success) {
5136			int d = r10b->devs[slot].devnum;
5137			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5138			sector_t addr;
5139			if (rdev == NULL ||
5140			    test_bit(Faulty, &rdev->flags) ||
5141			    !test_bit(In_sync, &rdev->flags))
5142				goto failed;
5143
5144			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
5145			atomic_inc(&rdev->nr_pending);
5146			rcu_read_unlock();
5147			success = sync_page_io(rdev,
5148					       addr,
5149					       s << 9,
5150					       pages[idx],
5151					       REQ_OP_READ, false);
5152			rdev_dec_pending(rdev, mddev);
5153			rcu_read_lock();
5154			if (success)
5155				break;
5156		failed:
5157			slot++;
5158			if (slot >= conf->copies)
5159				slot = 0;
5160			if (slot == first_slot)
5161				break;
5162		}
5163		rcu_read_unlock();
5164		if (!success) {
5165			/* couldn't read this block, must give up */
5166			set_bit(MD_RECOVERY_INTR,
5167				&mddev->recovery);
5168			kfree(r10b);
5169			return -EIO;
5170		}
5171		sectors -= s;
5172		idx++;
5173	}
5174	kfree(r10b);
5175	return 0;
5176}
5177
5178static void end_reshape_write(struct bio *bio)
5179{
5180	struct r10bio *r10_bio = get_resync_r10bio(bio);
5181	struct mddev *mddev = r10_bio->mddev;
5182	struct r10conf *conf = mddev->private;
5183	int d;
5184	int slot;
5185	int repl;
5186	struct md_rdev *rdev = NULL;
5187
5188	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5189	if (repl)
5190		rdev = conf->mirrors[d].replacement;
5191	if (!rdev) {
5192		smp_mb();
5193		rdev = conf->mirrors[d].rdev;
5194	}
5195
5196	if (bio->bi_status) {
5197		/* FIXME should record badblock */
5198		md_error(mddev, rdev);
5199	}
5200
5201	rdev_dec_pending(rdev, mddev);
5202	end_reshape_request(r10_bio);
5203}
5204
5205static void end_reshape_request(struct r10bio *r10_bio)
5206{
5207	if (!atomic_dec_and_test(&r10_bio->remaining))
5208		return;
5209	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5210	bio_put(r10_bio->master_bio);
5211	put_buf(r10_bio);
5212}
5213
5214static void raid10_finish_reshape(struct mddev *mddev)
5215{
5216	struct r10conf *conf = mddev->private;
5217
5218	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5219		return;
5220
5221	if (mddev->delta_disks > 0) {
 
 
5222		if (mddev->recovery_cp > mddev->resync_max_sectors) {
5223			mddev->recovery_cp = mddev->resync_max_sectors;
5224			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5225		}
5226		mddev->resync_max_sectors = mddev->array_sectors;
 
 
5227	} else {
5228		int d;
5229		rcu_read_lock();
5230		for (d = conf->geo.raid_disks ;
5231		     d < conf->geo.raid_disks - mddev->delta_disks;
5232		     d++) {
5233			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5234			if (rdev)
5235				clear_bit(In_sync, &rdev->flags);
5236			rdev = rcu_dereference(conf->mirrors[d].replacement);
5237			if (rdev)
5238				clear_bit(In_sync, &rdev->flags);
5239		}
5240		rcu_read_unlock();
5241	}
5242	mddev->layout = mddev->new_layout;
5243	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5244	mddev->reshape_position = MaxSector;
5245	mddev->delta_disks = 0;
5246	mddev->reshape_backwards = 0;
5247}
5248
5249static struct md_personality raid10_personality =
5250{
5251	.name		= "raid10",
5252	.level		= 10,
5253	.owner		= THIS_MODULE,
5254	.make_request	= raid10_make_request,
5255	.run		= raid10_run,
5256	.free		= raid10_free,
5257	.status		= raid10_status,
5258	.error_handler	= raid10_error,
5259	.hot_add_disk	= raid10_add_disk,
5260	.hot_remove_disk= raid10_remove_disk,
5261	.spare_active	= raid10_spare_active,
5262	.sync_request	= raid10_sync_request,
5263	.quiesce	= raid10_quiesce,
5264	.size		= raid10_size,
5265	.resize		= raid10_resize,
5266	.takeover	= raid10_takeover,
5267	.check_reshape	= raid10_check_reshape,
5268	.start_reshape	= raid10_start_reshape,
5269	.finish_reshape	= raid10_finish_reshape,
5270	.update_reshape_pos = raid10_update_reshape_pos,
5271};
5272
5273static int __init raid_init(void)
5274{
5275	return register_md_personality(&raid10_personality);
5276}
5277
5278static void raid_exit(void)
5279{
5280	unregister_md_personality(&raid10_personality);
5281}
5282
5283module_init(raid_init);
5284module_exit(raid_exit);
5285MODULE_LICENSE("GPL");
5286MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
5287MODULE_ALIAS("md-personality-9"); /* RAID10 */
5288MODULE_ALIAS("md-raid10");
5289MODULE_ALIAS("md-level-10");
v4.6
 
   1/*
   2 * raid10.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 2000-2004 Neil Brown
   5 *
   6 * RAID-10 support for md.
   7 *
   8 * Base on code in raid1.c.  See raid1.c for further copyright information.
   9 *
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21#include <linux/slab.h>
  22#include <linux/delay.h>
  23#include <linux/blkdev.h>
  24#include <linux/module.h>
  25#include <linux/seq_file.h>
  26#include <linux/ratelimit.h>
  27#include <linux/kthread.h>
 
 
  28#include "md.h"
  29#include "raid10.h"
  30#include "raid0.h"
  31#include "bitmap.h"
  32
  33/*
  34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  35 * The layout of data is defined by
  36 *    chunk_size
  37 *    raid_disks
  38 *    near_copies (stored in low byte of layout)
  39 *    far_copies (stored in second byte of layout)
  40 *    far_offset (stored in bit 16 of layout )
  41 *    use_far_sets (stored in bit 17 of layout )
  42 *    use_far_sets_bugfixed (stored in bit 18 of layout )
  43 *
  44 * The data to be stored is divided into chunks using chunksize.  Each device
  45 * is divided into far_copies sections.   In each section, chunks are laid out
  46 * in a style similar to raid0, but near_copies copies of each chunk is stored
  47 * (each on a different drive).  The starting device for each section is offset
  48 * near_copies from the starting device of the previous section.  Thus there
  49 * are (near_copies * far_copies) of each chunk, and each is on a different
  50 * drive.  near_copies and far_copies must be at least one, and their product
  51 * is at most raid_disks.
  52 *
  53 * If far_offset is true, then the far_copies are handled a bit differently.
  54 * The copies are still in different stripes, but instead of being very far
  55 * apart on disk, there are adjacent stripes.
  56 *
  57 * The far and offset algorithms are handled slightly differently if
  58 * 'use_far_sets' is true.  In this case, the array's devices are grouped into
  59 * sets that are (near_copies * far_copies) in size.  The far copied stripes
  60 * are still shifted by 'near_copies' devices, but this shifting stays confined
  61 * to the set rather than the entire array.  This is done to improve the number
  62 * of device combinations that can fail without causing the array to fail.
  63 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
  64 * on a device):
  65 *    A B C D    A B C D E
  66 *      ...         ...
  67 *    D A B C    E A B C D
  68 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
  69 *    [A B] [C D]    [A B] [C D E]
  70 *    |...| |...|    |...| | ... |
  71 *    [B A] [D C]    [B A] [E C D]
  72 */
  73
  74/*
  75 * Number of guaranteed r10bios in case of extreme VM load:
  76 */
  77#define	NR_RAID10_BIOS 256
  78
  79/* when we get a read error on a read-only array, we redirect to another
  80 * device without failing the first device, or trying to over-write to
  81 * correct the read error.  To keep track of bad blocks on a per-bio
  82 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  83 */
  84#define IO_BLOCKED ((struct bio *)1)
  85/* When we successfully write to a known bad-block, we need to remove the
  86 * bad-block marking which must be done from process context.  So we record
  87 * the success by setting devs[n].bio to IO_MADE_GOOD
  88 */
  89#define IO_MADE_GOOD ((struct bio *)2)
  90
  91#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  92
  93/* When there are this many requests queued to be written by
  94 * the raid10 thread, we become 'congested' to provide back-pressure
  95 * for writeback.
  96 */
  97static int max_queued_requests = 1024;
  98
  99static void allow_barrier(struct r10conf *conf);
 100static void lower_barrier(struct r10conf *conf);
 101static int _enough(struct r10conf *conf, int previous, int ignore);
 
 102static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 103				int *skipped);
 104static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
 105static void end_reshape_write(struct bio *bio);
 106static void end_reshape(struct r10conf *conf);
 107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 108static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 109{
 110	struct r10conf *conf = data;
 111	int size = offsetof(struct r10bio, devs[conf->copies]);
 112
 113	/* allocate a r10bio with room for raid_disks entries in the
 114	 * bios array */
 115	return kzalloc(size, gfp_flags);
 116}
 117
 118static void r10bio_pool_free(void *r10_bio, void *data)
 119{
 120	kfree(r10_bio);
 121}
 122
 123/* Maximum size of each resync request */
 124#define RESYNC_BLOCK_SIZE (64*1024)
 125#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
 126/* amount of memory to reserve for resync requests */
 127#define RESYNC_WINDOW (1024*1024)
 128/* maximum number of concurrent requests, memory permitting */
 129#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
 
 
 130
 131/*
 132 * When performing a resync, we need to read and compare, so
 133 * we need as many pages are there are copies.
 134 * When performing a recovery, we need 2 bios, one for read,
 135 * one for write (we recover only one drive per r10buf)
 136 *
 137 */
 138static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 139{
 140	struct r10conf *conf = data;
 141	struct page *page;
 142	struct r10bio *r10_bio;
 143	struct bio *bio;
 144	int i, j;
 145	int nalloc;
 
 146
 147	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 148	if (!r10_bio)
 149		return NULL;
 150
 151	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
 152	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
 153		nalloc = conf->copies; /* resync */
 154	else
 155		nalloc = 2; /* recovery */
 156
 
 
 
 
 
 
 
 
 
 157	/*
 158	 * Allocate bios.
 159	 */
 160	for (j = nalloc ; j-- ; ) {
 161		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 162		if (!bio)
 163			goto out_free_bio;
 
 164		r10_bio->devs[j].bio = bio;
 165		if (!conf->have_replacement)
 166			continue;
 167		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 168		if (!bio)
 169			goto out_free_bio;
 
 170		r10_bio->devs[j].repl_bio = bio;
 171	}
 172	/*
 173	 * Allocate RESYNC_PAGES data pages and attach them
 174	 * where needed.
 175	 */
 176	for (j = 0 ; j < nalloc; j++) {
 177		struct bio *rbio = r10_bio->devs[j].repl_bio;
 
 
 
 
 
 
 178		bio = r10_bio->devs[j].bio;
 179		for (i = 0; i < RESYNC_PAGES; i++) {
 180			if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
 181					       &conf->mddev->recovery)) {
 182				/* we can share bv_page's during recovery
 183				 * and reshape */
 184				struct bio *rbio = r10_bio->devs[0].bio;
 185				page = rbio->bi_io_vec[i].bv_page;
 186				get_page(page);
 187			} else
 188				page = alloc_page(gfp_flags);
 189			if (unlikely(!page))
 190				goto out_free_pages;
 
 
 
 
 191
 192			bio->bi_io_vec[i].bv_page = page;
 193			if (rbio)
 194				rbio->bi_io_vec[i].bv_page = page;
 
 
 195		}
 196	}
 197
 198	return r10_bio;
 199
 200out_free_pages:
 201	for ( ; i > 0 ; i--)
 202		safe_put_page(bio->bi_io_vec[i-1].bv_page);
 203	while (j--)
 204		for (i = 0; i < RESYNC_PAGES ; i++)
 205			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
 206	j = 0;
 207out_free_bio:
 208	for ( ; j < nalloc; j++) {
 209		if (r10_bio->devs[j].bio)
 210			bio_put(r10_bio->devs[j].bio);
 
 211		if (r10_bio->devs[j].repl_bio)
 212			bio_put(r10_bio->devs[j].repl_bio);
 
 213	}
 214	r10bio_pool_free(r10_bio, conf);
 
 
 215	return NULL;
 216}
 217
 218static void r10buf_pool_free(void *__r10_bio, void *data)
 219{
 220	int i;
 221	struct r10conf *conf = data;
 222	struct r10bio *r10bio = __r10_bio;
 223	int j;
 
 224
 225	for (j=0; j < conf->copies; j++) {
 226		struct bio *bio = r10bio->devs[j].bio;
 
 227		if (bio) {
 228			for (i = 0; i < RESYNC_PAGES; i++) {
 229				safe_put_page(bio->bi_io_vec[i].bv_page);
 230				bio->bi_io_vec[i].bv_page = NULL;
 231			}
 232			bio_put(bio);
 233		}
 
 234		bio = r10bio->devs[j].repl_bio;
 235		if (bio)
 236			bio_put(bio);
 
 
 237	}
 238	r10bio_pool_free(r10bio, conf);
 
 
 
 
 239}
 240
 241static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 242{
 243	int i;
 244
 245	for (i = 0; i < conf->copies; i++) {
 246		struct bio **bio = & r10_bio->devs[i].bio;
 247		if (!BIO_SPECIAL(*bio))
 248			bio_put(*bio);
 249		*bio = NULL;
 250		bio = &r10_bio->devs[i].repl_bio;
 251		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
 252			bio_put(*bio);
 253		*bio = NULL;
 254	}
 255}
 256
 257static void free_r10bio(struct r10bio *r10_bio)
 258{
 259	struct r10conf *conf = r10_bio->mddev->private;
 260
 261	put_all_bios(conf, r10_bio);
 262	mempool_free(r10_bio, conf->r10bio_pool);
 263}
 264
 265static void put_buf(struct r10bio *r10_bio)
 266{
 267	struct r10conf *conf = r10_bio->mddev->private;
 268
 269	mempool_free(r10_bio, conf->r10buf_pool);
 270
 271	lower_barrier(conf);
 272}
 273
 
 
 
 
 
 
 274static void reschedule_retry(struct r10bio *r10_bio)
 275{
 276	unsigned long flags;
 277	struct mddev *mddev = r10_bio->mddev;
 278	struct r10conf *conf = mddev->private;
 279
 280	spin_lock_irqsave(&conf->device_lock, flags);
 281	list_add(&r10_bio->retry_list, &conf->retry_list);
 282	conf->nr_queued ++;
 283	spin_unlock_irqrestore(&conf->device_lock, flags);
 284
 285	/* wake up frozen array... */
 286	wake_up(&conf->wait_barrier);
 287
 288	md_wakeup_thread(mddev->thread);
 289}
 290
 291/*
 292 * raid_end_bio_io() is called when we have finished servicing a mirrored
 293 * operation and are ready to return a success/failure code to the buffer
 294 * cache layer.
 295 */
 296static void raid_end_bio_io(struct r10bio *r10_bio)
 297{
 298	struct bio *bio = r10_bio->master_bio;
 299	int done;
 300	struct r10conf *conf = r10_bio->mddev->private;
 301
 302	if (bio->bi_phys_segments) {
 303		unsigned long flags;
 304		spin_lock_irqsave(&conf->device_lock, flags);
 305		bio->bi_phys_segments--;
 306		done = (bio->bi_phys_segments == 0);
 307		spin_unlock_irqrestore(&conf->device_lock, flags);
 308	} else
 309		done = 1;
 310	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 311		bio->bi_error = -EIO;
 312	if (done) {
 313		bio_endio(bio);
 314		/*
 315		 * Wake up any possible resync thread that waits for the device
 316		 * to go idle.
 317		 */
 318		allow_barrier(conf);
 319	}
 
 
 320	free_r10bio(r10_bio);
 321}
 322
 323/*
 324 * Update disk head position estimator based on IRQ completion info.
 325 */
 326static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 327{
 328	struct r10conf *conf = r10_bio->mddev->private;
 329
 330	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 331		r10_bio->devs[slot].addr + (r10_bio->sectors);
 332}
 333
 334/*
 335 * Find the disk number which triggered given bio
 336 */
 337static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 338			 struct bio *bio, int *slotp, int *replp)
 339{
 340	int slot;
 341	int repl = 0;
 342
 343	for (slot = 0; slot < conf->copies; slot++) {
 344		if (r10_bio->devs[slot].bio == bio)
 345			break;
 346		if (r10_bio->devs[slot].repl_bio == bio) {
 347			repl = 1;
 348			break;
 349		}
 350	}
 351
 352	BUG_ON(slot == conf->copies);
 353	update_head_pos(slot, r10_bio);
 354
 355	if (slotp)
 356		*slotp = slot;
 357	if (replp)
 358		*replp = repl;
 359	return r10_bio->devs[slot].devnum;
 360}
 361
 362static void raid10_end_read_request(struct bio *bio)
 363{
 364	int uptodate = !bio->bi_error;
 365	struct r10bio *r10_bio = bio->bi_private;
 366	int slot, dev;
 367	struct md_rdev *rdev;
 368	struct r10conf *conf = r10_bio->mddev->private;
 369
 370	slot = r10_bio->read_slot;
 371	dev = r10_bio->devs[slot].devnum;
 372	rdev = r10_bio->devs[slot].rdev;
 373	/*
 374	 * this branch is our 'one mirror IO has finished' event handler:
 375	 */
 376	update_head_pos(slot, r10_bio);
 377
 378	if (uptodate) {
 379		/*
 380		 * Set R10BIO_Uptodate in our master bio, so that
 381		 * we will return a good error code to the higher
 382		 * levels even if IO on some other mirrored buffer fails.
 383		 *
 384		 * The 'master' represents the composite IO operation to
 385		 * user-side. So if something waits for IO, then it will
 386		 * wait for the 'master' bio.
 387		 */
 388		set_bit(R10BIO_Uptodate, &r10_bio->state);
 389	} else {
 390		/* If all other devices that store this block have
 391		 * failed, we want to return the error upwards rather
 392		 * than fail the last device.  Here we redefine
 393		 * "uptodate" to mean "Don't want to retry"
 394		 */
 395		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
 396			     rdev->raid_disk))
 397			uptodate = 1;
 398	}
 399	if (uptodate) {
 400		raid_end_bio_io(r10_bio);
 401		rdev_dec_pending(rdev, conf->mddev);
 402	} else {
 403		/*
 404		 * oops, read error - keep the refcount on the rdev
 405		 */
 406		char b[BDEVNAME_SIZE];
 407		printk_ratelimited(KERN_ERR
 408				   "md/raid10:%s: %s: rescheduling sector %llu\n",
 409				   mdname(conf->mddev),
 410				   bdevname(rdev->bdev, b),
 411				   (unsigned long long)r10_bio->sector);
 412		set_bit(R10BIO_ReadError, &r10_bio->state);
 413		reschedule_retry(r10_bio);
 414	}
 415}
 416
 417static void close_write(struct r10bio *r10_bio)
 418{
 419	/* clear the bitmap if all writes complete successfully */
 420	bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 421			r10_bio->sectors,
 422			!test_bit(R10BIO_Degraded, &r10_bio->state),
 423			0);
 424	md_write_end(r10_bio->mddev);
 425}
 426
 427static void one_write_done(struct r10bio *r10_bio)
 428{
 429	if (atomic_dec_and_test(&r10_bio->remaining)) {
 430		if (test_bit(R10BIO_WriteError, &r10_bio->state))
 431			reschedule_retry(r10_bio);
 432		else {
 433			close_write(r10_bio);
 434			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 435				reschedule_retry(r10_bio);
 436			else
 437				raid_end_bio_io(r10_bio);
 438		}
 439	}
 440}
 441
 442static void raid10_end_write_request(struct bio *bio)
 443{
 444	struct r10bio *r10_bio = bio->bi_private;
 445	int dev;
 446	int dec_rdev = 1;
 447	struct r10conf *conf = r10_bio->mddev->private;
 448	int slot, repl;
 449	struct md_rdev *rdev = NULL;
 
 
 
 
 450
 451	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 452
 453	if (repl)
 454		rdev = conf->mirrors[dev].replacement;
 455	if (!rdev) {
 456		smp_rmb();
 457		repl = 0;
 458		rdev = conf->mirrors[dev].rdev;
 459	}
 460	/*
 461	 * this branch is our 'one mirror IO has finished' event handler:
 462	 */
 463	if (bio->bi_error) {
 464		if (repl)
 465			/* Never record new bad blocks to replacement,
 466			 * just fail it.
 467			 */
 468			md_error(rdev->mddev, rdev);
 469		else {
 470			set_bit(WriteErrorSeen,	&rdev->flags);
 471			if (!test_and_set_bit(WantReplacement, &rdev->flags))
 472				set_bit(MD_RECOVERY_NEEDED,
 473					&rdev->mddev->recovery);
 474			set_bit(R10BIO_WriteError, &r10_bio->state);
 475			dec_rdev = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476		}
 477	} else {
 478		/*
 479		 * Set R10BIO_Uptodate in our master bio, so that
 480		 * we will return a good error code for to the higher
 481		 * levels even if IO on some other mirrored buffer fails.
 482		 *
 483		 * The 'master' represents the composite IO operation to
 484		 * user-side. So if something waits for IO, then it will
 485		 * wait for the 'master' bio.
 486		 */
 487		sector_t first_bad;
 488		int bad_sectors;
 489
 490		/*
 491		 * Do not set R10BIO_Uptodate if the current device is
 492		 * rebuilding or Faulty. This is because we cannot use
 493		 * such device for properly reading the data back (we could
 494		 * potentially use it, if the current write would have felt
 495		 * before rdev->recovery_offset, but for simplicity we don't
 496		 * check this here.
 497		 */
 498		if (test_bit(In_sync, &rdev->flags) &&
 499		    !test_bit(Faulty, &rdev->flags))
 500			set_bit(R10BIO_Uptodate, &r10_bio->state);
 501
 502		/* Maybe we can clear some bad blocks. */
 503		if (is_badblock(rdev,
 504				r10_bio->devs[slot].addr,
 505				r10_bio->sectors,
 506				&first_bad, &bad_sectors)) {
 507			bio_put(bio);
 508			if (repl)
 509				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
 510			else
 511				r10_bio->devs[slot].bio = IO_MADE_GOOD;
 512			dec_rdev = 0;
 513			set_bit(R10BIO_MadeGood, &r10_bio->state);
 514		}
 515	}
 516
 517	/*
 518	 *
 519	 * Let's see if all mirrored write operations have finished
 520	 * already.
 521	 */
 522	one_write_done(r10_bio);
 523	if (dec_rdev)
 524		rdev_dec_pending(rdev, conf->mddev);
 
 
 525}
 526
 527/*
 528 * RAID10 layout manager
 529 * As well as the chunksize and raid_disks count, there are two
 530 * parameters: near_copies and far_copies.
 531 * near_copies * far_copies must be <= raid_disks.
 532 * Normally one of these will be 1.
 533 * If both are 1, we get raid0.
 534 * If near_copies == raid_disks, we get raid1.
 535 *
 536 * Chunks are laid out in raid0 style with near_copies copies of the
 537 * first chunk, followed by near_copies copies of the next chunk and
 538 * so on.
 539 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 540 * as described above, we start again with a device offset of near_copies.
 541 * So we effectively have another copy of the whole array further down all
 542 * the drives, but with blocks on different drives.
 543 * With this layout, and block is never stored twice on the one device.
 544 *
 545 * raid10_find_phys finds the sector offset of a given virtual sector
 546 * on each device that it is on.
 547 *
 548 * raid10_find_virt does the reverse mapping, from a device and a
 549 * sector offset to a virtual address
 550 */
 551
 552static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
 553{
 554	int n,f;
 555	sector_t sector;
 556	sector_t chunk;
 557	sector_t stripe;
 558	int dev;
 559	int slot = 0;
 560	int last_far_set_start, last_far_set_size;
 561
 562	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 563	last_far_set_start *= geo->far_set_size;
 564
 565	last_far_set_size = geo->far_set_size;
 566	last_far_set_size += (geo->raid_disks % geo->far_set_size);
 567
 568	/* now calculate first sector/dev */
 569	chunk = r10bio->sector >> geo->chunk_shift;
 570	sector = r10bio->sector & geo->chunk_mask;
 571
 572	chunk *= geo->near_copies;
 573	stripe = chunk;
 574	dev = sector_div(stripe, geo->raid_disks);
 575	if (geo->far_offset)
 576		stripe *= geo->far_copies;
 577
 578	sector += stripe << geo->chunk_shift;
 579
 580	/* and calculate all the others */
 581	for (n = 0; n < geo->near_copies; n++) {
 582		int d = dev;
 583		int set;
 584		sector_t s = sector;
 585		r10bio->devs[slot].devnum = d;
 586		r10bio->devs[slot].addr = s;
 587		slot++;
 588
 589		for (f = 1; f < geo->far_copies; f++) {
 590			set = d / geo->far_set_size;
 591			d += geo->near_copies;
 592
 593			if ((geo->raid_disks % geo->far_set_size) &&
 594			    (d > last_far_set_start)) {
 595				d -= last_far_set_start;
 596				d %= last_far_set_size;
 597				d += last_far_set_start;
 598			} else {
 599				d %= geo->far_set_size;
 600				d += geo->far_set_size * set;
 601			}
 602			s += geo->stride;
 603			r10bio->devs[slot].devnum = d;
 604			r10bio->devs[slot].addr = s;
 605			slot++;
 606		}
 607		dev++;
 608		if (dev >= geo->raid_disks) {
 609			dev = 0;
 610			sector += (geo->chunk_mask + 1);
 611		}
 612	}
 613}
 614
 615static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 616{
 617	struct geom *geo = &conf->geo;
 618
 619	if (conf->reshape_progress != MaxSector &&
 620	    ((r10bio->sector >= conf->reshape_progress) !=
 621	     conf->mddev->reshape_backwards)) {
 622		set_bit(R10BIO_Previous, &r10bio->state);
 623		geo = &conf->prev;
 624	} else
 625		clear_bit(R10BIO_Previous, &r10bio->state);
 626
 627	__raid10_find_phys(geo, r10bio);
 628}
 629
 630static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 631{
 632	sector_t offset, chunk, vchunk;
 633	/* Never use conf->prev as this is only called during resync
 634	 * or recovery, so reshape isn't happening
 635	 */
 636	struct geom *geo = &conf->geo;
 637	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
 638	int far_set_size = geo->far_set_size;
 639	int last_far_set_start;
 640
 641	if (geo->raid_disks % geo->far_set_size) {
 642		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 643		last_far_set_start *= geo->far_set_size;
 644
 645		if (dev >= last_far_set_start) {
 646			far_set_size = geo->far_set_size;
 647			far_set_size += (geo->raid_disks % geo->far_set_size);
 648			far_set_start = last_far_set_start;
 649		}
 650	}
 651
 652	offset = sector & geo->chunk_mask;
 653	if (geo->far_offset) {
 654		int fc;
 655		chunk = sector >> geo->chunk_shift;
 656		fc = sector_div(chunk, geo->far_copies);
 657		dev -= fc * geo->near_copies;
 658		if (dev < far_set_start)
 659			dev += far_set_size;
 660	} else {
 661		while (sector >= geo->stride) {
 662			sector -= geo->stride;
 663			if (dev < (geo->near_copies + far_set_start))
 664				dev += far_set_size - geo->near_copies;
 665			else
 666				dev -= geo->near_copies;
 667		}
 668		chunk = sector >> geo->chunk_shift;
 669	}
 670	vchunk = chunk * geo->raid_disks + dev;
 671	sector_div(vchunk, geo->near_copies);
 672	return (vchunk << geo->chunk_shift) + offset;
 673}
 674
 675/*
 676 * This routine returns the disk from which the requested read should
 677 * be done. There is a per-array 'next expected sequential IO' sector
 678 * number - if this matches on the next IO then we use the last disk.
 679 * There is also a per-disk 'last know head position' sector that is
 680 * maintained from IRQ contexts, both the normal and the resync IO
 681 * completion handlers update this position correctly. If there is no
 682 * perfect sequential match then we pick the disk whose head is closest.
 683 *
 684 * If there are 2 mirrors in the same 2 devices, performance degrades
 685 * because position is mirror, not device based.
 686 *
 687 * The rdev for the device selected will have nr_pending incremented.
 688 */
 689
 690/*
 691 * FIXME: possibly should rethink readbalancing and do it differently
 692 * depending on near_copies / far_copies geometry.
 693 */
 694static struct md_rdev *read_balance(struct r10conf *conf,
 695				    struct r10bio *r10_bio,
 696				    int *max_sectors)
 697{
 698	const sector_t this_sector = r10_bio->sector;
 699	int disk, slot;
 700	int sectors = r10_bio->sectors;
 701	int best_good_sectors;
 702	sector_t new_distance, best_dist;
 703	struct md_rdev *best_rdev, *rdev = NULL;
 704	int do_balance;
 705	int best_slot;
 
 
 706	struct geom *geo = &conf->geo;
 707
 708	raid10_find_phys(conf, r10_bio);
 709	rcu_read_lock();
 710retry:
 711	sectors = r10_bio->sectors;
 712	best_slot = -1;
 713	best_rdev = NULL;
 714	best_dist = MaxSector;
 715	best_good_sectors = 0;
 716	do_balance = 1;
 
 717	/*
 718	 * Check if we can balance. We can balance on the whole
 719	 * device if no resync is going on (recovery is ok), or below
 720	 * the resync window. We take the first readable disk when
 721	 * above the resync window.
 722	 */
 723	if (conf->mddev->recovery_cp < MaxSector
 724	    && (this_sector + sectors >= conf->next_resync))
 
 
 
 725		do_balance = 0;
 726
 727	for (slot = 0; slot < conf->copies ; slot++) {
 728		sector_t first_bad;
 729		int bad_sectors;
 730		sector_t dev_sector;
 
 
 731
 732		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 733			continue;
 734		disk = r10_bio->devs[slot].devnum;
 735		rdev = rcu_dereference(conf->mirrors[disk].replacement);
 736		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
 737		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 738			rdev = rcu_dereference(conf->mirrors[disk].rdev);
 739		if (rdev == NULL ||
 740		    test_bit(Faulty, &rdev->flags))
 741			continue;
 742		if (!test_bit(In_sync, &rdev->flags) &&
 743		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 744			continue;
 745
 746		dev_sector = r10_bio->devs[slot].addr;
 747		if (is_badblock(rdev, dev_sector, sectors,
 748				&first_bad, &bad_sectors)) {
 749			if (best_dist < MaxSector)
 750				/* Already have a better slot */
 751				continue;
 752			if (first_bad <= dev_sector) {
 753				/* Cannot read here.  If this is the
 754				 * 'primary' device, then we must not read
 755				 * beyond 'bad_sectors' from another device.
 756				 */
 757				bad_sectors -= (dev_sector - first_bad);
 758				if (!do_balance && sectors > bad_sectors)
 759					sectors = bad_sectors;
 760				if (best_good_sectors > sectors)
 761					best_good_sectors = sectors;
 762			} else {
 763				sector_t good_sectors =
 764					first_bad - dev_sector;
 765				if (good_sectors > best_good_sectors) {
 766					best_good_sectors = good_sectors;
 767					best_slot = slot;
 768					best_rdev = rdev;
 769				}
 770				if (!do_balance)
 771					/* Must read from here */
 772					break;
 773			}
 774			continue;
 775		} else
 776			best_good_sectors = sectors;
 777
 778		if (!do_balance)
 779			break;
 780
 
 
 
 
 
 
 
 
 
 
 
 
 781		/* This optimisation is debatable, and completely destroys
 782		 * sequential read speed for 'far copies' arrays.  So only
 783		 * keep it for 'near' arrays, and review those later.
 784		 */
 785		if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
 786			break;
 787
 788		/* for far > 1 always use the lowest address */
 789		if (geo->far_copies > 1)
 790			new_distance = r10_bio->devs[slot].addr;
 791		else
 792			new_distance = abs(r10_bio->devs[slot].addr -
 793					   conf->mirrors[disk].head_position);
 
 794		if (new_distance < best_dist) {
 795			best_dist = new_distance;
 796			best_slot = slot;
 797			best_rdev = rdev;
 798		}
 799	}
 800	if (slot >= conf->copies) {
 801		slot = best_slot;
 802		rdev = best_rdev;
 
 
 
 
 
 803	}
 804
 805	if (slot >= 0) {
 806		atomic_inc(&rdev->nr_pending);
 807		if (test_bit(Faulty, &rdev->flags)) {
 808			/* Cannot risk returning a device that failed
 809			 * before we inc'ed nr_pending
 810			 */
 811			rdev_dec_pending(rdev, conf->mddev);
 812			goto retry;
 813		}
 814		r10_bio->read_slot = slot;
 815	} else
 816		rdev = NULL;
 817	rcu_read_unlock();
 818	*max_sectors = best_good_sectors;
 819
 820	return rdev;
 821}
 822
 823static int raid10_congested(struct mddev *mddev, int bits)
 824{
 825	struct r10conf *conf = mddev->private;
 826	int i, ret = 0;
 827
 828	if ((bits & (1 << WB_async_congested)) &&
 829	    conf->pending_count >= max_queued_requests)
 830		return 1;
 831
 832	rcu_read_lock();
 833	for (i = 0;
 834	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
 835		     && ret == 0;
 836	     i++) {
 837		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 838		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 839			struct request_queue *q = bdev_get_queue(rdev->bdev);
 840
 841			ret |= bdi_congested(&q->backing_dev_info, bits);
 842		}
 843	}
 844	rcu_read_unlock();
 845	return ret;
 846}
 847
 848static void flush_pending_writes(struct r10conf *conf)
 849{
 850	/* Any writes that have been queued but are awaiting
 851	 * bitmap updates get flushed here.
 852	 */
 853	spin_lock_irq(&conf->device_lock);
 854
 855	if (conf->pending_bio_list.head) {
 
 856		struct bio *bio;
 
 857		bio = bio_list_get(&conf->pending_bio_list);
 858		conf->pending_count = 0;
 859		spin_unlock_irq(&conf->device_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 860		/* flush any pending bitmap writes to disk
 861		 * before proceeding w/ I/O */
 862		bitmap_unplug(conf->mddev->bitmap);
 863		wake_up(&conf->wait_barrier);
 864
 865		while (bio) { /* submit pending writes */
 866			struct bio *next = bio->bi_next;
 
 867			bio->bi_next = NULL;
 868			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 869			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 
 
 
 870				/* Just ignore it */
 871				bio_endio(bio);
 872			else
 873				generic_make_request(bio);
 874			bio = next;
 875		}
 
 876	} else
 877		spin_unlock_irq(&conf->device_lock);
 878}
 879
 880/* Barriers....
 881 * Sometimes we need to suspend IO while we do something else,
 882 * either some resync/recovery, or reconfigure the array.
 883 * To do this we raise a 'barrier'.
 884 * The 'barrier' is a counter that can be raised multiple times
 885 * to count how many activities are happening which preclude
 886 * normal IO.
 887 * We can only raise the barrier if there is no pending IO.
 888 * i.e. if nr_pending == 0.
 889 * We choose only to raise the barrier if no-one is waiting for the
 890 * barrier to go down.  This means that as soon as an IO request
 891 * is ready, no other operations which require a barrier will start
 892 * until the IO request has had a chance.
 893 *
 894 * So: regular IO calls 'wait_barrier'.  When that returns there
 895 *    is no backgroup IO happening,  It must arrange to call
 896 *    allow_barrier when it has finished its IO.
 897 * backgroup IO calls must call raise_barrier.  Once that returns
 898 *    there is no normal IO happeing.  It must arrange to call
 899 *    lower_barrier when the particular background IO completes.
 900 */
 901
 902static void raise_barrier(struct r10conf *conf, int force)
 903{
 
 904	BUG_ON(force && !conf->barrier);
 905	spin_lock_irq(&conf->resync_lock);
 906
 907	/* Wait until no block IO is waiting (unless 'force') */
 908	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 909			    conf->resync_lock);
 910
 911	/* block any new IO from starting */
 912	conf->barrier++;
 913
 914	/* Now wait for all pending IO to complete */
 915	wait_event_lock_irq(conf->wait_barrier,
 916			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 917			    conf->resync_lock);
 918
 919	spin_unlock_irq(&conf->resync_lock);
 920}
 921
 922static void lower_barrier(struct r10conf *conf)
 923{
 924	unsigned long flags;
 925	spin_lock_irqsave(&conf->resync_lock, flags);
 926	conf->barrier--;
 927	spin_unlock_irqrestore(&conf->resync_lock, flags);
 
 928	wake_up(&conf->wait_barrier);
 929}
 930
 931static void wait_barrier(struct r10conf *conf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932{
 933	spin_lock_irq(&conf->resync_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934	if (conf->barrier) {
 935		conf->nr_waiting++;
 936		/* Wait for the barrier to drop.
 937		 * However if there are already pending
 938		 * requests (preventing the barrier from
 939		 * rising completely), and the
 940		 * pre-process bio queue isn't empty,
 941		 * then don't wait, as we need to empty
 942		 * that queue to get the nr_pending
 943		 * count down.
 944		 */
 945		wait_event_lock_irq(conf->wait_barrier,
 946				    !conf->barrier ||
 947				    (conf->nr_pending &&
 948				     current->bio_list &&
 949				     !bio_list_empty(current->bio_list)),
 950				    conf->resync_lock);
 951		conf->nr_waiting--;
 952	}
 953	conf->nr_pending++;
 954	spin_unlock_irq(&conf->resync_lock);
 955}
 956
 957static void allow_barrier(struct r10conf *conf)
 958{
 959	unsigned long flags;
 960	spin_lock_irqsave(&conf->resync_lock, flags);
 961	conf->nr_pending--;
 962	spin_unlock_irqrestore(&conf->resync_lock, flags);
 963	wake_up(&conf->wait_barrier);
 964}
 965
 966static void freeze_array(struct r10conf *conf, int extra)
 967{
 968	/* stop syncio and normal IO and wait for everything to
 969	 * go quiet.
 970	 * We increment barrier and nr_waiting, and then
 971	 * wait until nr_pending match nr_queued+extra
 972	 * This is called in the context of one normal IO request
 973	 * that has failed. Thus any sync request that might be pending
 974	 * will be blocked by nr_pending, and we need to wait for
 975	 * pending IO requests to complete or be queued for re-try.
 976	 * Thus the number queued (nr_queued) plus this request (extra)
 977	 * must match the number of pending IOs (nr_pending) before
 978	 * we continue.
 979	 */
 980	spin_lock_irq(&conf->resync_lock);
 981	conf->barrier++;
 
 982	conf->nr_waiting++;
 983	wait_event_lock_irq_cmd(conf->wait_barrier,
 984				conf->nr_pending == conf->nr_queued+extra,
 985				conf->resync_lock,
 986				flush_pending_writes(conf));
 987
 988	spin_unlock_irq(&conf->resync_lock);
 989}
 990
 991static void unfreeze_array(struct r10conf *conf)
 992{
 993	/* reverse the effect of the freeze */
 994	spin_lock_irq(&conf->resync_lock);
 995	conf->barrier--;
 996	conf->nr_waiting--;
 997	wake_up(&conf->wait_barrier);
 998	spin_unlock_irq(&conf->resync_lock);
 999}
1000
1001static sector_t choose_data_offset(struct r10bio *r10_bio,
1002				   struct md_rdev *rdev)
1003{
1004	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1005	    test_bit(R10BIO_Previous, &r10_bio->state))
1006		return rdev->data_offset;
1007	else
1008		return rdev->new_data_offset;
1009}
1010
1011struct raid10_plug_cb {
1012	struct blk_plug_cb	cb;
1013	struct bio_list		pending;
1014	int			pending_cnt;
1015};
1016
1017static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1018{
1019	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1020						   cb);
1021	struct mddev *mddev = plug->cb.data;
1022	struct r10conf *conf = mddev->private;
1023	struct bio *bio;
1024
1025	if (from_schedule || current->bio_list) {
1026		spin_lock_irq(&conf->device_lock);
1027		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1028		conf->pending_count += plug->pending_cnt;
1029		spin_unlock_irq(&conf->device_lock);
1030		wake_up(&conf->wait_barrier);
1031		md_wakeup_thread(mddev->thread);
1032		kfree(plug);
1033		return;
1034	}
1035
1036	/* we aren't scheduling, so we can do the write-out directly. */
1037	bio = bio_list_get(&plug->pending);
1038	bitmap_unplug(mddev->bitmap);
1039	wake_up(&conf->wait_barrier);
1040
1041	while (bio) { /* submit pending writes */
1042		struct bio *next = bio->bi_next;
 
1043		bio->bi_next = NULL;
1044		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1045		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 
 
 
1046			/* Just ignore it */
1047			bio_endio(bio);
1048		else
1049			generic_make_request(bio);
1050		bio = next;
1051	}
1052	kfree(plug);
1053}
1054
1055static void __make_request(struct mddev *mddev, struct bio *bio)
 
 
 
 
 
 
 
1056{
1057	struct r10conf *conf = mddev->private;
1058	struct r10bio *r10_bio;
1059	struct bio *read_bio;
1060	int i;
1061	const int rw = bio_data_dir(bio);
1062	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1063	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1064	const unsigned long do_discard = (bio->bi_rw
1065					  & (REQ_DISCARD | REQ_SECURE));
1066	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1067	unsigned long flags;
1068	struct md_rdev *blocked_rdev;
1069	struct blk_plug_cb *cb;
1070	struct raid10_plug_cb *plug = NULL;
1071	int sectors_handled;
1072	int max_sectors;
1073	int sectors;
1074
1075	/*
1076	 * Register the new request and wait if the reconstruction
1077	 * thread has put up a bar for new requests.
1078	 * Continue immediately if no resync is active currently.
1079	 */
1080	wait_barrier(conf);
1081
1082	sectors = bio_sectors(bio);
1083	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1084	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1085	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1086		/* IO spans the reshape position.  Need to wait for
1087		 * reshape to pass
1088		 */
1089		allow_barrier(conf);
 
 
 
 
 
1090		wait_event(conf->wait_barrier,
1091			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1092			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1093			   sectors);
1094		wait_barrier(conf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1095	}
1096	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1097	    bio_data_dir(bio) == WRITE &&
1098	    (mddev->reshape_backwards
1099	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1100		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1101	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1102		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1103		/* Need to update reshape_position in metadata */
1104		mddev->reshape_position = conf->reshape_progress;
1105		set_bit(MD_CHANGE_DEVS, &mddev->flags);
1106		set_bit(MD_CHANGE_PENDING, &mddev->flags);
1107		md_wakeup_thread(mddev->thread);
1108		wait_event(mddev->sb_wait,
1109			   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1110
1111		conf->reshape_safe = mddev->reshape_position;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113
1114	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
 
 
 
 
1115
1116	r10_bio->master_bio = bio;
1117	r10_bio->sectors = sectors;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118
1119	r10_bio->mddev = mddev;
1120	r10_bio->sector = bio->bi_iter.bi_sector;
1121	r10_bio->state = 0;
1122
1123	/* We might need to issue multiple reads to different
1124	 * devices if there are bad blocks around, so we keep
1125	 * track of the number of reads in bio->bi_phys_segments.
1126	 * If this is 0, there is only one r10_bio and no locking
1127	 * will be needed when the request completes.  If it is
1128	 * non-zero, then it is the number of not-completed requests.
1129	 */
1130	bio->bi_phys_segments = 0;
1131	bio_clear_flag(bio, BIO_SEG_VALID);
 
 
 
 
 
1132
1133	if (rw == READ) {
1134		/*
1135		 * read balancing logic:
1136		 */
1137		struct md_rdev *rdev;
1138		int slot;
1139
1140read_again:
1141		rdev = read_balance(conf, r10_bio, &max_sectors);
1142		if (!rdev) {
1143			raid_end_bio_io(r10_bio);
1144			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
1145		}
1146		slot = r10_bio->read_slot;
1147
1148		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1149		bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1150			 max_sectors);
1151
1152		r10_bio->devs[slot].bio = read_bio;
1153		r10_bio->devs[slot].rdev = rdev;
1154
1155		read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1156			choose_data_offset(r10_bio, rdev);
1157		read_bio->bi_bdev = rdev->bdev;
1158		read_bio->bi_end_io = raid10_end_read_request;
1159		read_bio->bi_rw = READ | do_sync;
1160		read_bio->bi_private = r10_bio;
1161
1162		if (max_sectors < r10_bio->sectors) {
1163			/* Could not read all from this device, so we will
1164			 * need another r10_bio.
1165			 */
1166			sectors_handled = (r10_bio->sector + max_sectors
1167					   - bio->bi_iter.bi_sector);
1168			r10_bio->sectors = max_sectors;
1169			spin_lock_irq(&conf->device_lock);
1170			if (bio->bi_phys_segments == 0)
1171				bio->bi_phys_segments = 2;
1172			else
1173				bio->bi_phys_segments++;
1174			spin_unlock_irq(&conf->device_lock);
1175			/* Cannot call generic_make_request directly
1176			 * as that will be queued in __generic_make_request
1177			 * and subsequent mempool_alloc might block
1178			 * waiting for it.  so hand bio over to raid10d.
1179			 */
1180			reschedule_retry(r10_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1181
1182			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
 
 
 
 
 
1183
1184			r10_bio->master_bio = bio;
1185			r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1186			r10_bio->state = 0;
1187			r10_bio->mddev = mddev;
1188			r10_bio->sector = bio->bi_iter.bi_sector +
1189				sectors_handled;
1190			goto read_again;
1191		} else
1192			generic_make_request(read_bio);
1193		return;
 
 
 
 
 
 
 
 
 
1194	}
1195
1196	/*
1197	 * WRITE:
1198	 */
1199	if (conf->pending_count >= max_queued_requests) {
 
 
 
 
 
 
 
 
 
1200		md_wakeup_thread(mddev->thread);
1201		wait_event(conf->wait_barrier,
1202			   conf->pending_count < max_queued_requests);
 
 
 
 
 
 
 
 
1203	}
 
1204	/* first select target devices under rcu_lock and
1205	 * inc refcount on their rdev.  Record them by setting
1206	 * bios[x] to bio
1207	 * If there are known/acknowledged bad blocks on any device
1208	 * on which we have seen a write error, we want to avoid
1209	 * writing to those blocks.  This potentially requires several
1210	 * writes to write around the bad blocks.  Each set of writes
1211	 * gets its own r10_bio with a set of bios attached.  The number
1212	 * of r10_bios is recored in bio->bi_phys_segments just as with
1213	 * the read case.
1214	 */
1215
1216	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1217	raid10_find_phys(conf, r10_bio);
1218retry_write:
1219	blocked_rdev = NULL;
 
1220	rcu_read_lock();
1221	max_sectors = r10_bio->sectors;
1222
1223	for (i = 0;  i < conf->copies; i++) {
1224		int d = r10_bio->devs[i].devnum;
1225		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1226		struct md_rdev *rrdev = rcu_dereference(
1227			conf->mirrors[d].replacement);
1228		if (rdev == rrdev)
1229			rrdev = NULL;
1230		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1231			atomic_inc(&rdev->nr_pending);
1232			blocked_rdev = rdev;
1233			break;
1234		}
1235		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1236			atomic_inc(&rrdev->nr_pending);
1237			blocked_rdev = rrdev;
1238			break;
1239		}
1240		if (rdev && (test_bit(Faulty, &rdev->flags)))
1241			rdev = NULL;
1242		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1243			rrdev = NULL;
1244
1245		r10_bio->devs[i].bio = NULL;
1246		r10_bio->devs[i].repl_bio = NULL;
1247
1248		if (!rdev && !rrdev) {
1249			set_bit(R10BIO_Degraded, &r10_bio->state);
1250			continue;
1251		}
1252		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1253			sector_t first_bad;
1254			sector_t dev_sector = r10_bio->devs[i].addr;
1255			int bad_sectors;
1256			int is_bad;
1257
1258			is_bad = is_badblock(rdev, dev_sector,
1259					     max_sectors,
1260					     &first_bad, &bad_sectors);
1261			if (is_bad < 0) {
1262				/* Mustn't write here until the bad block
1263				 * is acknowledged
1264				 */
1265				atomic_inc(&rdev->nr_pending);
1266				set_bit(BlockedBadBlocks, &rdev->flags);
1267				blocked_rdev = rdev;
1268				break;
1269			}
1270			if (is_bad && first_bad <= dev_sector) {
1271				/* Cannot write here at all */
1272				bad_sectors -= (dev_sector - first_bad);
1273				if (bad_sectors < max_sectors)
1274					/* Mustn't write more than bad_sectors
1275					 * to other devices yet
1276					 */
1277					max_sectors = bad_sectors;
1278				/* We don't set R10BIO_Degraded as that
1279				 * only applies if the disk is missing,
1280				 * so it might be re-added, and we want to
1281				 * know to recover this chunk.
1282				 * In this case the device is here, and the
1283				 * fact that this chunk is not in-sync is
1284				 * recorded in the bad block log.
1285				 */
1286				continue;
1287			}
1288			if (is_bad) {
1289				int good_sectors = first_bad - dev_sector;
1290				if (good_sectors < max_sectors)
1291					max_sectors = good_sectors;
1292			}
1293		}
1294		if (rdev) {
1295			r10_bio->devs[i].bio = bio;
1296			atomic_inc(&rdev->nr_pending);
1297		}
1298		if (rrdev) {
1299			r10_bio->devs[i].repl_bio = bio;
1300			atomic_inc(&rrdev->nr_pending);
1301		}
1302	}
1303	rcu_read_unlock();
1304
1305	if (unlikely(blocked_rdev)) {
1306		/* Have to wait for this device to get unblocked, then retry */
1307		int j;
1308		int d;
1309
1310		for (j = 0; j < i; j++) {
1311			if (r10_bio->devs[j].bio) {
1312				d = r10_bio->devs[j].devnum;
1313				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1314			}
1315			if (r10_bio->devs[j].repl_bio) {
1316				struct md_rdev *rdev;
1317				d = r10_bio->devs[j].devnum;
1318				rdev = conf->mirrors[d].replacement;
1319				if (!rdev) {
1320					/* Race with remove_disk */
1321					smp_mb();
1322					rdev = conf->mirrors[d].rdev;
1323				}
1324				rdev_dec_pending(rdev, mddev);
1325			}
1326		}
1327		allow_barrier(conf);
1328		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1329		wait_barrier(conf);
1330		goto retry_write;
 
1331	}
1332
1333	if (max_sectors < r10_bio->sectors) {
1334		/* We are splitting this into multiple parts, so
1335		 * we need to prepare for allocating another r10_bio.
1336		 */
1337		r10_bio->sectors = max_sectors;
1338		spin_lock_irq(&conf->device_lock);
1339		if (bio->bi_phys_segments == 0)
1340			bio->bi_phys_segments = 2;
1341		else
1342			bio->bi_phys_segments++;
1343		spin_unlock_irq(&conf->device_lock);
1344	}
1345	sectors_handled = r10_bio->sector + max_sectors -
1346		bio->bi_iter.bi_sector;
1347
1348	atomic_set(&r10_bio->remaining, 1);
1349	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1350
1351	for (i = 0; i < conf->copies; i++) {
1352		struct bio *mbio;
1353		int d = r10_bio->devs[i].devnum;
1354		if (r10_bio->devs[i].bio) {
1355			struct md_rdev *rdev = conf->mirrors[d].rdev;
1356			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1357			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1358				 max_sectors);
1359			r10_bio->devs[i].bio = mbio;
1360
1361			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr+
1362					   choose_data_offset(r10_bio,
1363							      rdev));
1364			mbio->bi_bdev = rdev->bdev;
1365			mbio->bi_end_io	= raid10_end_write_request;
1366			mbio->bi_rw =
1367				WRITE | do_sync | do_fua | do_discard | do_same;
1368			mbio->bi_private = r10_bio;
1369
1370			atomic_inc(&r10_bio->remaining);
 
 
 
1371
1372			cb = blk_check_plugged(raid10_unplug, mddev,
1373					       sizeof(*plug));
1374			if (cb)
1375				plug = container_of(cb, struct raid10_plug_cb,
1376						    cb);
1377			else
1378				plug = NULL;
1379			spin_lock_irqsave(&conf->device_lock, flags);
1380			if (plug) {
1381				bio_list_add(&plug->pending, mbio);
1382				plug->pending_cnt++;
1383			} else {
1384				bio_list_add(&conf->pending_bio_list, mbio);
1385				conf->pending_count++;
1386			}
1387			spin_unlock_irqrestore(&conf->device_lock, flags);
1388			if (!plug)
1389				md_wakeup_thread(mddev->thread);
1390		}
1391
1392		if (r10_bio->devs[i].repl_bio) {
1393			struct md_rdev *rdev = conf->mirrors[d].replacement;
1394			if (rdev == NULL) {
1395				/* Replacement just got moved to main 'rdev' */
1396				smp_mb();
1397				rdev = conf->mirrors[d].rdev;
1398			}
1399			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1400			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1401				 max_sectors);
1402			r10_bio->devs[i].repl_bio = mbio;
1403
1404			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr +
1405					   choose_data_offset(
1406						   r10_bio, rdev));
1407			mbio->bi_bdev = rdev->bdev;
1408			mbio->bi_end_io	= raid10_end_write_request;
1409			mbio->bi_rw =
1410				WRITE | do_sync | do_fua | do_discard | do_same;
1411			mbio->bi_private = r10_bio;
1412
1413			atomic_inc(&r10_bio->remaining);
1414			spin_lock_irqsave(&conf->device_lock, flags);
1415			bio_list_add(&conf->pending_bio_list, mbio);
1416			conf->pending_count++;
1417			spin_unlock_irqrestore(&conf->device_lock, flags);
1418			if (!mddev_check_plugged(mddev))
1419				md_wakeup_thread(mddev->thread);
 
 
1420		}
1421	}
 
 
 
 
 
 
 
 
 
1422
1423	/* Don't remove the bias on 'remaining' (one_write_done) until
1424	 * after checking if we need to go around again.
1425	 */
 
 
1426
1427	if (sectors_handled < bio_sectors(bio)) {
1428		one_write_done(r10_bio);
1429		/* We need another r10_bio.  It has already been counted
1430		 * in bio->bi_phys_segments.
 
 
 
 
1431		 */
1432		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1433
1434		r10_bio->master_bio = bio;
1435		r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1436
1437		r10_bio->mddev = mddev;
1438		r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1439		r10_bio->state = 0;
1440		goto retry_write;
1441	}
1442	one_write_done(r10_bio);
1443}
1444
1445static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 
 
 
 
 
 
1446{
1447	struct r10conf *conf = mddev->private;
1448	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1449	int chunk_sects = chunk_mask + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450
1451	struct bio *split;
 
 
 
 
 
1452
1453	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1454		md_flush_request(mddev, bio);
1455		return;
 
 
 
 
 
1456	}
 
1457
1458	md_write_start(mddev, bio);
 
 
 
1459
1460	do {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1461
1462		/*
1463		 * If this request crosses a chunk boundary, we need to split
1464		 * it.
 
 
1465		 */
1466		if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1467			     bio_sectors(bio) > chunk_sects
1468			     && (conf->geo.near_copies < conf->geo.raid_disks
1469				 || conf->prev.near_copies <
1470				 conf->prev.raid_disks))) {
1471			split = bio_split(bio, chunk_sects -
1472					  (bio->bi_iter.bi_sector &
1473					   (chunk_sects - 1)),
1474					  GFP_NOIO, fs_bio_set);
1475			bio_chain(split, bio);
1476		} else {
1477			split = bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1478		}
 
1479
1480		__make_request(mddev, split);
1481	} while (split != bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1482
1483	/* In case raid10d snuck in to freeze_array */
1484	wake_up(&conf->wait_barrier);
 
1485}
1486
1487static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1488{
1489	struct r10conf *conf = mddev->private;
1490	int i;
1491
1492	if (conf->geo.near_copies < conf->geo.raid_disks)
1493		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1494	if (conf->geo.near_copies > 1)
1495		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1496	if (conf->geo.far_copies > 1) {
1497		if (conf->geo.far_offset)
1498			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1499		else
1500			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1501		if (conf->geo.far_set_size != conf->geo.raid_disks)
1502			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1503	}
1504	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1505					conf->geo.raid_disks - mddev->degraded);
1506	for (i = 0; i < conf->geo.raid_disks; i++)
1507		seq_printf(seq, "%s",
1508			      conf->mirrors[i].rdev &&
1509			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
 
 
1510	seq_printf(seq, "]");
1511}
1512
1513/* check if there are enough drives for
1514 * every block to appear on atleast one.
1515 * Don't consider the device numbered 'ignore'
1516 * as we might be about to remove it.
1517 */
1518static int _enough(struct r10conf *conf, int previous, int ignore)
1519{
1520	int first = 0;
1521	int has_enough = 0;
1522	int disks, ncopies;
1523	if (previous) {
1524		disks = conf->prev.raid_disks;
1525		ncopies = conf->prev.near_copies;
1526	} else {
1527		disks = conf->geo.raid_disks;
1528		ncopies = conf->geo.near_copies;
1529	}
1530
1531	rcu_read_lock();
1532	do {
1533		int n = conf->copies;
1534		int cnt = 0;
1535		int this = first;
1536		while (n--) {
1537			struct md_rdev *rdev;
1538			if (this != ignore &&
1539			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1540			    test_bit(In_sync, &rdev->flags))
1541				cnt++;
1542			this = (this+1) % disks;
1543		}
1544		if (cnt == 0)
1545			goto out;
1546		first = (first + ncopies) % disks;
1547	} while (first != 0);
1548	has_enough = 1;
1549out:
1550	rcu_read_unlock();
1551	return has_enough;
1552}
1553
1554static int enough(struct r10conf *conf, int ignore)
1555{
1556	/* when calling 'enough', both 'prev' and 'geo' must
1557	 * be stable.
1558	 * This is ensured if ->reconfig_mutex or ->device_lock
1559	 * is held.
1560	 */
1561	return _enough(conf, 0, ignore) &&
1562		_enough(conf, 1, ignore);
1563}
1564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1565static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1566{
1567	char b[BDEVNAME_SIZE];
1568	struct r10conf *conf = mddev->private;
1569	unsigned long flags;
1570
1571	/*
1572	 * If it is not operational, then we have already marked it as dead
1573	 * else if it is the last working disks, ignore the error, let the
1574	 * next level up know.
1575	 * else mark the drive as failed
1576	 */
1577	spin_lock_irqsave(&conf->device_lock, flags);
1578	if (test_bit(In_sync, &rdev->flags)
1579	    && !enough(conf, rdev->raid_disk)) {
1580		/*
1581		 * Don't fail the drive, just return an IO error.
1582		 */
1583		spin_unlock_irqrestore(&conf->device_lock, flags);
1584		return;
 
1585	}
1586	if (test_and_clear_bit(In_sync, &rdev->flags))
1587		mddev->degraded++;
1588	/*
1589	 * If recovery is running, make sure it aborts.
1590	 */
1591	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1592	set_bit(Blocked, &rdev->flags);
1593	set_bit(Faulty, &rdev->flags);
1594	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1595	set_bit(MD_CHANGE_PENDING, &mddev->flags);
1596	spin_unlock_irqrestore(&conf->device_lock, flags);
1597	printk(KERN_ALERT
1598	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
1599	       "md/raid10:%s: Operation continuing on %d devices.\n",
1600	       mdname(mddev), bdevname(rdev->bdev, b),
1601	       mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1602}
1603
1604static void print_conf(struct r10conf *conf)
1605{
1606	int i;
1607	struct raid10_info *tmp;
1608
1609	printk(KERN_DEBUG "RAID10 conf printout:\n");
1610	if (!conf) {
1611		printk(KERN_DEBUG "(!conf)\n");
1612		return;
1613	}
1614	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1615		conf->geo.raid_disks);
1616
 
 
1617	for (i = 0; i < conf->geo.raid_disks; i++) {
1618		char b[BDEVNAME_SIZE];
1619		tmp = conf->mirrors + i;
1620		if (tmp->rdev)
1621			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1622				i, !test_bit(In_sync, &tmp->rdev->flags),
1623			        !test_bit(Faulty, &tmp->rdev->flags),
1624				bdevname(tmp->rdev->bdev,b));
1625	}
1626}
1627
1628static void close_sync(struct r10conf *conf)
1629{
1630	wait_barrier(conf);
1631	allow_barrier(conf);
1632
1633	mempool_destroy(conf->r10buf_pool);
1634	conf->r10buf_pool = NULL;
1635}
1636
1637static int raid10_spare_active(struct mddev *mddev)
1638{
1639	int i;
1640	struct r10conf *conf = mddev->private;
1641	struct raid10_info *tmp;
1642	int count = 0;
1643	unsigned long flags;
1644
1645	/*
1646	 * Find all non-in_sync disks within the RAID10 configuration
1647	 * and mark them in_sync
1648	 */
1649	for (i = 0; i < conf->geo.raid_disks; i++) {
1650		tmp = conf->mirrors + i;
1651		if (tmp->replacement
1652		    && tmp->replacement->recovery_offset == MaxSector
1653		    && !test_bit(Faulty, &tmp->replacement->flags)
1654		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1655			/* Replacement has just become active */
1656			if (!tmp->rdev
1657			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1658				count++;
1659			if (tmp->rdev) {
1660				/* Replaced device not technically faulty,
1661				 * but we need to be sure it gets removed
1662				 * and never re-added.
1663				 */
1664				set_bit(Faulty, &tmp->rdev->flags);
1665				sysfs_notify_dirent_safe(
1666					tmp->rdev->sysfs_state);
1667			}
1668			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1669		} else if (tmp->rdev
1670			   && tmp->rdev->recovery_offset == MaxSector
1671			   && !test_bit(Faulty, &tmp->rdev->flags)
1672			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1673			count++;
1674			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1675		}
1676	}
1677	spin_lock_irqsave(&conf->device_lock, flags);
1678	mddev->degraded -= count;
1679	spin_unlock_irqrestore(&conf->device_lock, flags);
1680
1681	print_conf(conf);
1682	return count;
1683}
1684
1685static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1686{
1687	struct r10conf *conf = mddev->private;
1688	int err = -EEXIST;
1689	int mirror;
1690	int first = 0;
1691	int last = conf->geo.raid_disks - 1;
1692
1693	if (mddev->recovery_cp < MaxSector)
1694		/* only hot-add to in-sync arrays, as recovery is
1695		 * very different from resync
1696		 */
1697		return -EBUSY;
1698	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1699		return -EINVAL;
1700
1701	if (md_integrity_add_rdev(rdev, mddev))
1702		return -ENXIO;
1703
1704	if (rdev->raid_disk >= 0)
1705		first = last = rdev->raid_disk;
1706
1707	if (rdev->saved_raid_disk >= first &&
 
1708	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1709		mirror = rdev->saved_raid_disk;
1710	else
1711		mirror = first;
1712	for ( ; mirror <= last ; mirror++) {
1713		struct raid10_info *p = &conf->mirrors[mirror];
1714		if (p->recovery_disabled == mddev->recovery_disabled)
1715			continue;
1716		if (p->rdev) {
1717			if (!test_bit(WantReplacement, &p->rdev->flags) ||
1718			    p->replacement != NULL)
1719				continue;
1720			clear_bit(In_sync, &rdev->flags);
1721			set_bit(Replacement, &rdev->flags);
1722			rdev->raid_disk = mirror;
1723			err = 0;
1724			if (mddev->gendisk)
1725				disk_stack_limits(mddev->gendisk, rdev->bdev,
1726						  rdev->data_offset << 9);
1727			conf->fullsync = 1;
1728			rcu_assign_pointer(p->replacement, rdev);
1729			break;
1730		}
1731
1732		if (mddev->gendisk)
1733			disk_stack_limits(mddev->gendisk, rdev->bdev,
1734					  rdev->data_offset << 9);
1735
1736		p->head_position = 0;
1737		p->recovery_disabled = mddev->recovery_disabled - 1;
1738		rdev->raid_disk = mirror;
1739		err = 0;
1740		if (rdev->saved_raid_disk != mirror)
1741			conf->fullsync = 1;
1742		rcu_assign_pointer(p->rdev, rdev);
1743		break;
1744	}
1745	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1746		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1747
1748	print_conf(conf);
1749	return err;
1750}
1751
1752static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1753{
1754	struct r10conf *conf = mddev->private;
1755	int err = 0;
1756	int number = rdev->raid_disk;
1757	struct md_rdev **rdevp;
1758	struct raid10_info *p = conf->mirrors + number;
1759
1760	print_conf(conf);
 
 
 
1761	if (rdev == p->rdev)
1762		rdevp = &p->rdev;
1763	else if (rdev == p->replacement)
1764		rdevp = &p->replacement;
1765	else
1766		return 0;
1767
1768	if (test_bit(In_sync, &rdev->flags) ||
1769	    atomic_read(&rdev->nr_pending)) {
1770		err = -EBUSY;
1771		goto abort;
1772	}
1773	/* Only remove faulty devices if recovery
1774	 * is not possible.
1775	 */
1776	if (!test_bit(Faulty, &rdev->flags) &&
1777	    mddev->recovery_disabled != p->recovery_disabled &&
1778	    (!p->replacement || p->replacement == rdev) &&
1779	    number < conf->geo.raid_disks &&
1780	    enough(conf, -1)) {
1781		err = -EBUSY;
1782		goto abort;
1783	}
1784	*rdevp = NULL;
1785	synchronize_rcu();
1786	if (atomic_read(&rdev->nr_pending)) {
1787		/* lost the race, try later */
1788		err = -EBUSY;
1789		*rdevp = rdev;
1790		goto abort;
1791	} else if (p->replacement) {
 
 
 
1792		/* We must have just cleared 'rdev' */
1793		p->rdev = p->replacement;
1794		clear_bit(Replacement, &p->replacement->flags);
1795		smp_mb(); /* Make sure other CPUs may see both as identical
1796			   * but will never see neither -- if they are careful.
1797			   */
1798		p->replacement = NULL;
1799		clear_bit(WantReplacement, &rdev->flags);
1800	} else
1801		/* We might have just remove the Replacement as faulty
1802		 * Clear the flag just in case
1803		 */
1804		clear_bit(WantReplacement, &rdev->flags);
1805
 
1806	err = md_integrity_register(mddev);
1807
1808abort:
1809
1810	print_conf(conf);
1811	return err;
1812}
1813
1814static void end_sync_read(struct bio *bio)
1815{
1816	struct r10bio *r10_bio = bio->bi_private;
1817	struct r10conf *conf = r10_bio->mddev->private;
1818	int d;
1819
1820	if (bio == r10_bio->master_bio) {
1821		/* this is a reshape read */
1822		d = r10_bio->read_slot; /* really the read dev */
1823	} else
1824		d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1825
1826	if (!bio->bi_error)
1827		set_bit(R10BIO_Uptodate, &r10_bio->state);
1828	else
1829		/* The write handler will notice the lack of
1830		 * R10BIO_Uptodate and record any errors etc
1831		 */
1832		atomic_add(r10_bio->sectors,
1833			   &conf->mirrors[d].rdev->corrected_errors);
1834
1835	/* for reconstruct, we always reschedule after a read.
1836	 * for resync, only after all reads
1837	 */
1838	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1839	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1840	    atomic_dec_and_test(&r10_bio->remaining)) {
1841		/* we have read all the blocks,
1842		 * do the comparison in process context in raid10d
1843		 */
1844		reschedule_retry(r10_bio);
1845	}
1846}
1847
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1848static void end_sync_request(struct r10bio *r10_bio)
1849{
1850	struct mddev *mddev = r10_bio->mddev;
1851
1852	while (atomic_dec_and_test(&r10_bio->remaining)) {
1853		if (r10_bio->master_bio == NULL) {
1854			/* the primary of several recovery bios */
1855			sector_t s = r10_bio->sectors;
1856			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1857			    test_bit(R10BIO_WriteError, &r10_bio->state))
1858				reschedule_retry(r10_bio);
1859			else
1860				put_buf(r10_bio);
1861			md_done_sync(mddev, s, 1);
1862			break;
1863		} else {
1864			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1865			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1866			    test_bit(R10BIO_WriteError, &r10_bio->state))
1867				reschedule_retry(r10_bio);
1868			else
1869				put_buf(r10_bio);
1870			r10_bio = r10_bio2;
1871		}
1872	}
1873}
1874
1875static void end_sync_write(struct bio *bio)
1876{
1877	struct r10bio *r10_bio = bio->bi_private;
1878	struct mddev *mddev = r10_bio->mddev;
1879	struct r10conf *conf = mddev->private;
1880	int d;
1881	sector_t first_bad;
1882	int bad_sectors;
1883	int slot;
1884	int repl;
1885	struct md_rdev *rdev = NULL;
1886
1887	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1888	if (repl)
1889		rdev = conf->mirrors[d].replacement;
1890	else
1891		rdev = conf->mirrors[d].rdev;
1892
1893	if (bio->bi_error) {
1894		if (repl)
1895			md_error(mddev, rdev);
1896		else {
1897			set_bit(WriteErrorSeen, &rdev->flags);
1898			if (!test_and_set_bit(WantReplacement, &rdev->flags))
1899				set_bit(MD_RECOVERY_NEEDED,
1900					&rdev->mddev->recovery);
1901			set_bit(R10BIO_WriteError, &r10_bio->state);
1902		}
1903	} else if (is_badblock(rdev,
1904			     r10_bio->devs[slot].addr,
1905			     r10_bio->sectors,
1906			     &first_bad, &bad_sectors))
1907		set_bit(R10BIO_MadeGood, &r10_bio->state);
1908
1909	rdev_dec_pending(rdev, mddev);
1910
1911	end_sync_request(r10_bio);
1912}
1913
1914/*
1915 * Note: sync and recover and handled very differently for raid10
1916 * This code is for resync.
1917 * For resync, we read through virtual addresses and read all blocks.
1918 * If there is any error, we schedule a write.  The lowest numbered
1919 * drive is authoritative.
1920 * However requests come for physical address, so we need to map.
1921 * For every physical address there are raid_disks/copies virtual addresses,
1922 * which is always are least one, but is not necessarly an integer.
1923 * This means that a physical address can span multiple chunks, so we may
1924 * have to submit multiple io requests for a single sync request.
1925 */
1926/*
1927 * We check if all blocks are in-sync and only write to blocks that
1928 * aren't in sync
1929 */
1930static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1931{
1932	struct r10conf *conf = mddev->private;
1933	int i, first;
1934	struct bio *tbio, *fbio;
1935	int vcnt;
 
1936
1937	atomic_set(&r10_bio->remaining, 1);
1938
1939	/* find the first device with a block */
1940	for (i=0; i<conf->copies; i++)
1941		if (!r10_bio->devs[i].bio->bi_error)
1942			break;
1943
1944	if (i == conf->copies)
1945		goto done;
1946
1947	first = i;
1948	fbio = r10_bio->devs[i].bio;
1949	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
1950	fbio->bi_iter.bi_idx = 0;
 
1951
1952	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1953	/* now find blocks with errors */
1954	for (i=0 ; i < conf->copies ; i++) {
1955		int  j, d;
 
 
1956
1957		tbio = r10_bio->devs[i].bio;
1958
1959		if (tbio->bi_end_io != end_sync_read)
1960			continue;
1961		if (i == first)
1962			continue;
1963		if (!r10_bio->devs[i].bio->bi_error) {
 
 
 
 
1964			/* We know that the bi_io_vec layout is the same for
1965			 * both 'first' and 'i', so we just compare them.
1966			 * All vec entries are PAGE_SIZE;
1967			 */
1968			int sectors = r10_bio->sectors;
1969			for (j = 0; j < vcnt; j++) {
1970				int len = PAGE_SIZE;
1971				if (sectors < (len / 512))
1972					len = sectors * 512;
1973				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1974					   page_address(tbio->bi_io_vec[j].bv_page),
1975					   len))
1976					break;
1977				sectors -= len/512;
1978			}
1979			if (j == vcnt)
1980				continue;
1981			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
1982			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1983				/* Don't fix anything. */
1984				continue;
 
 
 
 
1985		}
1986		/* Ok, we need to write this bio, either to correct an
1987		 * inconsistency or to correct an unreadable block.
1988		 * First we need to fixup bv_offset, bv_len and
1989		 * bi_vecs, as the read request might have corrupted these
1990		 */
1991		bio_reset(tbio);
 
 
 
1992
1993		tbio->bi_vcnt = vcnt;
1994		tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
1995		tbio->bi_rw = WRITE;
1996		tbio->bi_private = r10_bio;
1997		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
1998		tbio->bi_end_io = end_sync_write;
1999
2000		bio_copy_data(tbio, fbio);
2001
2002		d = r10_bio->devs[i].devnum;
2003		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2004		atomic_inc(&r10_bio->remaining);
2005		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2006
 
 
2007		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2008		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2009		generic_make_request(tbio);
2010	}
2011
2012	/* Now write out to any replacement devices
2013	 * that are active
2014	 */
2015	for (i = 0; i < conf->copies; i++) {
2016		int d;
2017
2018		tbio = r10_bio->devs[i].repl_bio;
2019		if (!tbio || !tbio->bi_end_io)
2020			continue;
2021		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2022		    && r10_bio->devs[i].bio != fbio)
2023			bio_copy_data(tbio, fbio);
2024		d = r10_bio->devs[i].devnum;
2025		atomic_inc(&r10_bio->remaining);
2026		md_sync_acct(conf->mirrors[d].replacement->bdev,
2027			     bio_sectors(tbio));
2028		generic_make_request(tbio);
2029	}
2030
2031done:
2032	if (atomic_dec_and_test(&r10_bio->remaining)) {
2033		md_done_sync(mddev, r10_bio->sectors, 1);
2034		put_buf(r10_bio);
2035	}
2036}
2037
2038/*
2039 * Now for the recovery code.
2040 * Recovery happens across physical sectors.
2041 * We recover all non-is_sync drives by finding the virtual address of
2042 * each, and then choose a working drive that also has that virt address.
2043 * There is a separate r10_bio for each non-in_sync drive.
2044 * Only the first two slots are in use. The first for reading,
2045 * The second for writing.
2046 *
2047 */
2048static void fix_recovery_read_error(struct r10bio *r10_bio)
2049{
2050	/* We got a read error during recovery.
2051	 * We repeat the read in smaller page-sized sections.
2052	 * If a read succeeds, write it to the new device or record
2053	 * a bad block if we cannot.
2054	 * If a read fails, record a bad block on both old and
2055	 * new devices.
2056	 */
2057	struct mddev *mddev = r10_bio->mddev;
2058	struct r10conf *conf = mddev->private;
2059	struct bio *bio = r10_bio->devs[0].bio;
2060	sector_t sect = 0;
2061	int sectors = r10_bio->sectors;
2062	int idx = 0;
2063	int dr = r10_bio->devs[0].devnum;
2064	int dw = r10_bio->devs[1].devnum;
 
2065
2066	while (sectors) {
2067		int s = sectors;
2068		struct md_rdev *rdev;
2069		sector_t addr;
2070		int ok;
2071
2072		if (s > (PAGE_SIZE>>9))
2073			s = PAGE_SIZE >> 9;
2074
2075		rdev = conf->mirrors[dr].rdev;
2076		addr = r10_bio->devs[0].addr + sect,
2077		ok = sync_page_io(rdev,
2078				  addr,
2079				  s << 9,
2080				  bio->bi_io_vec[idx].bv_page,
2081				  READ, false);
2082		if (ok) {
2083			rdev = conf->mirrors[dw].rdev;
2084			addr = r10_bio->devs[1].addr + sect;
2085			ok = sync_page_io(rdev,
2086					  addr,
2087					  s << 9,
2088					  bio->bi_io_vec[idx].bv_page,
2089					  WRITE, false);
2090			if (!ok) {
2091				set_bit(WriteErrorSeen, &rdev->flags);
2092				if (!test_and_set_bit(WantReplacement,
2093						      &rdev->flags))
2094					set_bit(MD_RECOVERY_NEEDED,
2095						&rdev->mddev->recovery);
2096			}
2097		}
2098		if (!ok) {
2099			/* We don't worry if we cannot set a bad block -
2100			 * it really is bad so there is no loss in not
2101			 * recording it yet
2102			 */
2103			rdev_set_badblocks(rdev, addr, s, 0);
2104
2105			if (rdev != conf->mirrors[dw].rdev) {
2106				/* need bad block on destination too */
2107				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2108				addr = r10_bio->devs[1].addr + sect;
2109				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2110				if (!ok) {
2111					/* just abort the recovery */
2112					printk(KERN_NOTICE
2113					       "md/raid10:%s: recovery aborted"
2114					       " due to read error\n",
2115					       mdname(mddev));
2116
2117					conf->mirrors[dw].recovery_disabled
2118						= mddev->recovery_disabled;
2119					set_bit(MD_RECOVERY_INTR,
2120						&mddev->recovery);
2121					break;
2122				}
2123			}
2124		}
2125
2126		sectors -= s;
2127		sect += s;
2128		idx++;
2129	}
2130}
2131
2132static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2133{
2134	struct r10conf *conf = mddev->private;
2135	int d;
2136	struct bio *wbio, *wbio2;
2137
2138	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2139		fix_recovery_read_error(r10_bio);
2140		end_sync_request(r10_bio);
2141		return;
2142	}
2143
2144	/*
2145	 * share the pages with the first bio
2146	 * and submit the write request
2147	 */
2148	d = r10_bio->devs[1].devnum;
2149	wbio = r10_bio->devs[1].bio;
2150	wbio2 = r10_bio->devs[1].repl_bio;
2151	/* Need to test wbio2->bi_end_io before we call
2152	 * generic_make_request as if the former is NULL,
2153	 * the latter is free to free wbio2.
2154	 */
2155	if (wbio2 && !wbio2->bi_end_io)
2156		wbio2 = NULL;
2157	if (wbio->bi_end_io) {
2158		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2159		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2160		generic_make_request(wbio);
2161	}
2162	if (wbio2) {
2163		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2164		md_sync_acct(conf->mirrors[d].replacement->bdev,
2165			     bio_sectors(wbio2));
2166		generic_make_request(wbio2);
2167	}
2168}
2169
2170/*
2171 * Used by fix_read_error() to decay the per rdev read_errors.
2172 * We halve the read error count for every hour that has elapsed
2173 * since the last recorded read error.
2174 *
2175 */
2176static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2177{
2178	struct timespec cur_time_mon;
2179	unsigned long hours_since_last;
2180	unsigned int read_errors = atomic_read(&rdev->read_errors);
2181
2182	ktime_get_ts(&cur_time_mon);
2183
2184	if (rdev->last_read_error.tv_sec == 0 &&
2185	    rdev->last_read_error.tv_nsec == 0) {
2186		/* first time we've seen a read error */
2187		rdev->last_read_error = cur_time_mon;
2188		return;
2189	}
2190
2191	hours_since_last = (cur_time_mon.tv_sec -
2192			    rdev->last_read_error.tv_sec) / 3600;
2193
2194	rdev->last_read_error = cur_time_mon;
2195
2196	/*
2197	 * if hours_since_last is > the number of bits in read_errors
2198	 * just set read errors to 0. We do this to avoid
2199	 * overflowing the shift of read_errors by hours_since_last.
2200	 */
2201	if (hours_since_last >= 8 * sizeof(read_errors))
2202		atomic_set(&rdev->read_errors, 0);
2203	else
2204		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2205}
2206
2207static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2208			    int sectors, struct page *page, int rw)
2209{
2210	sector_t first_bad;
2211	int bad_sectors;
2212
2213	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2214	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2215		return -1;
2216	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2217		/* success */
2218		return 1;
2219	if (rw == WRITE) {
2220		set_bit(WriteErrorSeen, &rdev->flags);
2221		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2222			set_bit(MD_RECOVERY_NEEDED,
2223				&rdev->mddev->recovery);
2224	}
2225	/* need to record an error - either for the block or the device */
2226	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2227		md_error(rdev->mddev, rdev);
2228	return 0;
2229}
2230
2231/*
2232 * This is a kernel thread which:
2233 *
2234 *	1.	Retries failed read operations on working mirrors.
2235 *	2.	Updates the raid superblock when problems encounter.
2236 *	3.	Performs writes following reads for array synchronising.
2237 */
2238
2239static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2240{
2241	int sect = 0; /* Offset from r10_bio->sector */
2242	int sectors = r10_bio->sectors;
2243	struct md_rdev*rdev;
2244	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2245	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2246
2247	/* still own a reference to this rdev, so it cannot
2248	 * have been cleared recently.
2249	 */
2250	rdev = conf->mirrors[d].rdev;
2251
2252	if (test_bit(Faulty, &rdev->flags))
2253		/* drive has already been failed, just ignore any
2254		   more fix_read_error() attempts */
2255		return;
2256
2257	check_decay_read_errors(mddev, rdev);
2258	atomic_inc(&rdev->read_errors);
2259	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2260		char b[BDEVNAME_SIZE];
2261		bdevname(rdev->bdev, b);
2262
2263		printk(KERN_NOTICE
2264		       "md/raid10:%s: %s: Raid device exceeded "
2265		       "read_error threshold [cur %d:max %d]\n",
2266		       mdname(mddev), b,
2267		       atomic_read(&rdev->read_errors), max_read_errors);
2268		printk(KERN_NOTICE
2269		       "md/raid10:%s: %s: Failing raid device\n",
2270		       mdname(mddev), b);
2271		md_error(mddev, conf->mirrors[d].rdev);
2272		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2273		return;
2274	}
2275
2276	while(sectors) {
2277		int s = sectors;
2278		int sl = r10_bio->read_slot;
2279		int success = 0;
2280		int start;
2281
2282		if (s > (PAGE_SIZE>>9))
2283			s = PAGE_SIZE >> 9;
2284
2285		rcu_read_lock();
2286		do {
2287			sector_t first_bad;
2288			int bad_sectors;
2289
2290			d = r10_bio->devs[sl].devnum;
2291			rdev = rcu_dereference(conf->mirrors[d].rdev);
2292			if (rdev &&
2293			    test_bit(In_sync, &rdev->flags) &&
 
2294			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2295					&first_bad, &bad_sectors) == 0) {
2296				atomic_inc(&rdev->nr_pending);
2297				rcu_read_unlock();
2298				success = sync_page_io(rdev,
2299						       r10_bio->devs[sl].addr +
2300						       sect,
2301						       s<<9,
2302						       conf->tmppage, READ, false);
 
2303				rdev_dec_pending(rdev, mddev);
2304				rcu_read_lock();
2305				if (success)
2306					break;
2307			}
2308			sl++;
2309			if (sl == conf->copies)
2310				sl = 0;
2311		} while (!success && sl != r10_bio->read_slot);
2312		rcu_read_unlock();
2313
2314		if (!success) {
2315			/* Cannot read from anywhere, just mark the block
2316			 * as bad on the first device to discourage future
2317			 * reads.
2318			 */
2319			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2320			rdev = conf->mirrors[dn].rdev;
2321
2322			if (!rdev_set_badblocks(
2323				    rdev,
2324				    r10_bio->devs[r10_bio->read_slot].addr
2325				    + sect,
2326				    s, 0)) {
2327				md_error(mddev, rdev);
2328				r10_bio->devs[r10_bio->read_slot].bio
2329					= IO_BLOCKED;
2330			}
2331			break;
2332		}
2333
2334		start = sl;
2335		/* write it back and re-read */
2336		rcu_read_lock();
2337		while (sl != r10_bio->read_slot) {
2338			char b[BDEVNAME_SIZE];
2339
2340			if (sl==0)
2341				sl = conf->copies;
2342			sl--;
2343			d = r10_bio->devs[sl].devnum;
2344			rdev = rcu_dereference(conf->mirrors[d].rdev);
2345			if (!rdev ||
 
2346			    !test_bit(In_sync, &rdev->flags))
2347				continue;
2348
2349			atomic_inc(&rdev->nr_pending);
2350			rcu_read_unlock();
2351			if (r10_sync_page_io(rdev,
2352					     r10_bio->devs[sl].addr +
2353					     sect,
2354					     s, conf->tmppage, WRITE)
2355			    == 0) {
2356				/* Well, this device is dead */
2357				printk(KERN_NOTICE
2358				       "md/raid10:%s: read correction "
2359				       "write failed"
2360				       " (%d sectors at %llu on %s)\n",
2361				       mdname(mddev), s,
2362				       (unsigned long long)(
2363					       sect +
2364					       choose_data_offset(r10_bio,
2365								  rdev)),
2366				       bdevname(rdev->bdev, b));
2367				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2368				       "drive\n",
2369				       mdname(mddev),
2370				       bdevname(rdev->bdev, b));
2371			}
2372			rdev_dec_pending(rdev, mddev);
2373			rcu_read_lock();
2374		}
2375		sl = start;
2376		while (sl != r10_bio->read_slot) {
2377			char b[BDEVNAME_SIZE];
2378
2379			if (sl==0)
2380				sl = conf->copies;
2381			sl--;
2382			d = r10_bio->devs[sl].devnum;
2383			rdev = rcu_dereference(conf->mirrors[d].rdev);
2384			if (!rdev ||
 
2385			    !test_bit(In_sync, &rdev->flags))
2386				continue;
2387
2388			atomic_inc(&rdev->nr_pending);
2389			rcu_read_unlock();
2390			switch (r10_sync_page_io(rdev,
2391					     r10_bio->devs[sl].addr +
2392					     sect,
2393					     s, conf->tmppage,
2394						 READ)) {
2395			case 0:
2396				/* Well, this device is dead */
2397				printk(KERN_NOTICE
2398				       "md/raid10:%s: unable to read back "
2399				       "corrected sectors"
2400				       " (%d sectors at %llu on %s)\n",
2401				       mdname(mddev), s,
2402				       (unsigned long long)(
2403					       sect +
2404					       choose_data_offset(r10_bio, rdev)),
2405				       bdevname(rdev->bdev, b));
2406				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2407				       "drive\n",
2408				       mdname(mddev),
2409				       bdevname(rdev->bdev, b));
2410				break;
2411			case 1:
2412				printk(KERN_INFO
2413				       "md/raid10:%s: read error corrected"
2414				       " (%d sectors at %llu on %s)\n",
2415				       mdname(mddev), s,
2416				       (unsigned long long)(
2417					       sect +
2418					       choose_data_offset(r10_bio, rdev)),
2419				       bdevname(rdev->bdev, b));
2420				atomic_add(s, &rdev->corrected_errors);
2421			}
2422
2423			rdev_dec_pending(rdev, mddev);
2424			rcu_read_lock();
2425		}
2426		rcu_read_unlock();
2427
2428		sectors -= s;
2429		sect += s;
2430	}
2431}
2432
2433static int narrow_write_error(struct r10bio *r10_bio, int i)
2434{
2435	struct bio *bio = r10_bio->master_bio;
2436	struct mddev *mddev = r10_bio->mddev;
2437	struct r10conf *conf = mddev->private;
2438	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2439	/* bio has the data to be written to slot 'i' where
2440	 * we just recently had a write error.
2441	 * We repeatedly clone the bio and trim down to one block,
2442	 * then try the write.  Where the write fails we record
2443	 * a bad block.
2444	 * It is conceivable that the bio doesn't exactly align with
2445	 * blocks.  We must handle this.
2446	 *
2447	 * We currently own a reference to the rdev.
2448	 */
2449
2450	int block_sectors;
2451	sector_t sector;
2452	int sectors;
2453	int sect_to_write = r10_bio->sectors;
2454	int ok = 1;
2455
2456	if (rdev->badblocks.shift < 0)
2457		return 0;
2458
2459	block_sectors = roundup(1 << rdev->badblocks.shift,
2460				bdev_logical_block_size(rdev->bdev) >> 9);
2461	sector = r10_bio->sector;
2462	sectors = ((r10_bio->sector + block_sectors)
2463		   & ~(sector_t)(block_sectors - 1))
2464		- sector;
2465
2466	while (sect_to_write) {
2467		struct bio *wbio;
 
2468		if (sectors > sect_to_write)
2469			sectors = sect_to_write;
2470		/* Write at 'sector' for 'sectors' */
2471		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
 
2472		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2473		wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
2474				   choose_data_offset(r10_bio, rdev) +
2475				   (sector - r10_bio->sector));
2476		wbio->bi_bdev = rdev->bdev;
2477		if (submit_bio_wait(WRITE, wbio) < 0)
 
2478			/* Failure! */
2479			ok = rdev_set_badblocks(rdev, sector,
2480						sectors, 0)
2481				&& ok;
2482
2483		bio_put(wbio);
2484		sect_to_write -= sectors;
2485		sector += sectors;
2486		sectors = block_sectors;
2487	}
2488	return ok;
2489}
2490
2491static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2492{
2493	int slot = r10_bio->read_slot;
2494	struct bio *bio;
2495	struct r10conf *conf = mddev->private;
2496	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2497	char b[BDEVNAME_SIZE];
2498	unsigned long do_sync;
2499	int max_sectors;
2500
2501	/* we got a read error. Maybe the drive is bad.  Maybe just
2502	 * the block and we can fix it.
2503	 * We freeze all other IO, and try reading the block from
2504	 * other devices.  When we find one, we re-write
2505	 * and check it that fixes the read error.
2506	 * This is all done synchronously while the array is
2507	 * frozen.
2508	 */
2509	bio = r10_bio->devs[slot].bio;
2510	bdevname(bio->bi_bdev, b);
2511	bio_put(bio);
2512	r10_bio->devs[slot].bio = NULL;
2513
2514	if (mddev->ro == 0) {
 
 
2515		freeze_array(conf, 1);
2516		fix_read_error(conf, mddev, r10_bio);
2517		unfreeze_array(conf);
2518	} else
2519		r10_bio->devs[slot].bio = IO_BLOCKED;
2520
2521	rdev_dec_pending(rdev, mddev);
2522
2523read_more:
2524	rdev = read_balance(conf, r10_bio, &max_sectors);
2525	if (rdev == NULL) {
2526		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2527		       " read error for block %llu\n",
2528		       mdname(mddev), b,
2529		       (unsigned long long)r10_bio->sector);
2530		raid_end_bio_io(r10_bio);
2531		return;
2532	}
2533
2534	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2535	slot = r10_bio->read_slot;
2536	printk_ratelimited(
2537		KERN_ERR
2538		"md/raid10:%s: %s: redirecting "
2539		"sector %llu to another mirror\n",
2540		mdname(mddev),
2541		bdevname(rdev->bdev, b),
2542		(unsigned long long)r10_bio->sector);
2543	bio = bio_clone_mddev(r10_bio->master_bio,
2544			      GFP_NOIO, mddev);
2545	bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
2546	r10_bio->devs[slot].bio = bio;
2547	r10_bio->devs[slot].rdev = rdev;
2548	bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2549		+ choose_data_offset(r10_bio, rdev);
2550	bio->bi_bdev = rdev->bdev;
2551	bio->bi_rw = READ | do_sync;
2552	bio->bi_private = r10_bio;
2553	bio->bi_end_io = raid10_end_read_request;
2554	if (max_sectors < r10_bio->sectors) {
2555		/* Drat - have to split this up more */
2556		struct bio *mbio = r10_bio->master_bio;
2557		int sectors_handled =
2558			r10_bio->sector + max_sectors
2559			- mbio->bi_iter.bi_sector;
2560		r10_bio->sectors = max_sectors;
2561		spin_lock_irq(&conf->device_lock);
2562		if (mbio->bi_phys_segments == 0)
2563			mbio->bi_phys_segments = 2;
2564		else
2565			mbio->bi_phys_segments++;
2566		spin_unlock_irq(&conf->device_lock);
2567		generic_make_request(bio);
2568
2569		r10_bio = mempool_alloc(conf->r10bio_pool,
2570					GFP_NOIO);
2571		r10_bio->master_bio = mbio;
2572		r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2573		r10_bio->state = 0;
2574		set_bit(R10BIO_ReadError,
2575			&r10_bio->state);
2576		r10_bio->mddev = mddev;
2577		r10_bio->sector = mbio->bi_iter.bi_sector
2578			+ sectors_handled;
2579
2580		goto read_more;
2581	} else
2582		generic_make_request(bio);
2583}
2584
2585static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2586{
2587	/* Some sort of write request has finished and it
2588	 * succeeded in writing where we thought there was a
2589	 * bad block.  So forget the bad block.
2590	 * Or possibly if failed and we need to record
2591	 * a bad block.
2592	 */
2593	int m;
2594	struct md_rdev *rdev;
2595
2596	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2597	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2598		for (m = 0; m < conf->copies; m++) {
2599			int dev = r10_bio->devs[m].devnum;
2600			rdev = conf->mirrors[dev].rdev;
2601			if (r10_bio->devs[m].bio == NULL)
 
2602				continue;
2603			if (!r10_bio->devs[m].bio->bi_error) {
2604				rdev_clear_badblocks(
2605					rdev,
2606					r10_bio->devs[m].addr,
2607					r10_bio->sectors, 0);
2608			} else {
2609				if (!rdev_set_badblocks(
2610					    rdev,
2611					    r10_bio->devs[m].addr,
2612					    r10_bio->sectors, 0))
2613					md_error(conf->mddev, rdev);
2614			}
2615			rdev = conf->mirrors[dev].replacement;
2616			if (r10_bio->devs[m].repl_bio == NULL)
 
2617				continue;
2618
2619			if (!r10_bio->devs[m].repl_bio->bi_error) {
2620				rdev_clear_badblocks(
2621					rdev,
2622					r10_bio->devs[m].addr,
2623					r10_bio->sectors, 0);
2624			} else {
2625				if (!rdev_set_badblocks(
2626					    rdev,
2627					    r10_bio->devs[m].addr,
2628					    r10_bio->sectors, 0))
2629					md_error(conf->mddev, rdev);
2630			}
2631		}
2632		put_buf(r10_bio);
2633	} else {
2634		bool fail = false;
2635		for (m = 0; m < conf->copies; m++) {
2636			int dev = r10_bio->devs[m].devnum;
2637			struct bio *bio = r10_bio->devs[m].bio;
2638			rdev = conf->mirrors[dev].rdev;
2639			if (bio == IO_MADE_GOOD) {
2640				rdev_clear_badblocks(
2641					rdev,
2642					r10_bio->devs[m].addr,
2643					r10_bio->sectors, 0);
2644				rdev_dec_pending(rdev, conf->mddev);
2645			} else if (bio != NULL && bio->bi_error) {
2646				fail = true;
2647				if (!narrow_write_error(r10_bio, m)) {
2648					md_error(conf->mddev, rdev);
2649					set_bit(R10BIO_Degraded,
2650						&r10_bio->state);
2651				}
2652				rdev_dec_pending(rdev, conf->mddev);
2653			}
2654			bio = r10_bio->devs[m].repl_bio;
2655			rdev = conf->mirrors[dev].replacement;
2656			if (rdev && bio == IO_MADE_GOOD) {
2657				rdev_clear_badblocks(
2658					rdev,
2659					r10_bio->devs[m].addr,
2660					r10_bio->sectors, 0);
2661				rdev_dec_pending(rdev, conf->mddev);
2662			}
2663		}
2664		if (fail) {
2665			spin_lock_irq(&conf->device_lock);
2666			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2667			conf->nr_queued++;
2668			spin_unlock_irq(&conf->device_lock);
 
 
 
 
 
2669			md_wakeup_thread(conf->mddev->thread);
2670		} else {
2671			if (test_bit(R10BIO_WriteError,
2672				     &r10_bio->state))
2673				close_write(r10_bio);
2674			raid_end_bio_io(r10_bio);
2675		}
2676	}
2677}
2678
2679static void raid10d(struct md_thread *thread)
2680{
2681	struct mddev *mddev = thread->mddev;
2682	struct r10bio *r10_bio;
2683	unsigned long flags;
2684	struct r10conf *conf = mddev->private;
2685	struct list_head *head = &conf->retry_list;
2686	struct blk_plug plug;
2687
2688	md_check_recovery(mddev);
2689
2690	if (!list_empty_careful(&conf->bio_end_io_list) &&
2691	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2692		LIST_HEAD(tmp);
2693		spin_lock_irqsave(&conf->device_lock, flags);
2694		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
2695			while (!list_empty(&conf->bio_end_io_list)) {
2696				list_move(conf->bio_end_io_list.prev, &tmp);
2697				conf->nr_queued--;
2698			}
2699		}
2700		spin_unlock_irqrestore(&conf->device_lock, flags);
2701		while (!list_empty(&tmp)) {
2702			r10_bio = list_first_entry(&tmp, struct r10bio,
2703						   retry_list);
2704			list_del(&r10_bio->retry_list);
2705			if (mddev->degraded)
2706				set_bit(R10BIO_Degraded, &r10_bio->state);
2707
2708			if (test_bit(R10BIO_WriteError,
2709				     &r10_bio->state))
2710				close_write(r10_bio);
2711			raid_end_bio_io(r10_bio);
2712		}
2713	}
2714
2715	blk_start_plug(&plug);
2716	for (;;) {
2717
2718		flush_pending_writes(conf);
2719
2720		spin_lock_irqsave(&conf->device_lock, flags);
2721		if (list_empty(head)) {
2722			spin_unlock_irqrestore(&conf->device_lock, flags);
2723			break;
2724		}
2725		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2726		list_del(head->prev);
2727		conf->nr_queued--;
2728		spin_unlock_irqrestore(&conf->device_lock, flags);
2729
2730		mddev = r10_bio->mddev;
2731		conf = mddev->private;
2732		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2733		    test_bit(R10BIO_WriteError, &r10_bio->state))
2734			handle_write_completed(conf, r10_bio);
2735		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2736			reshape_request_write(mddev, r10_bio);
2737		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2738			sync_request_write(mddev, r10_bio);
2739		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2740			recovery_request_write(mddev, r10_bio);
2741		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2742			handle_read_error(mddev, r10_bio);
2743		else {
2744			/* just a partial read to be scheduled from a
2745			 * separate context
2746			 */
2747			int slot = r10_bio->read_slot;
2748			generic_make_request(r10_bio->devs[slot].bio);
2749		}
2750
2751		cond_resched();
2752		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2753			md_check_recovery(mddev);
2754	}
2755	blk_finish_plug(&plug);
2756}
2757
2758static int init_resync(struct r10conf *conf)
2759{
2760	int buffs;
2761	int i;
2762
2763	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2764	BUG_ON(conf->r10buf_pool);
2765	conf->have_replacement = 0;
2766	for (i = 0; i < conf->geo.raid_disks; i++)
2767		if (conf->mirrors[i].replacement)
2768			conf->have_replacement = 1;
2769	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2770	if (!conf->r10buf_pool)
2771		return -ENOMEM;
 
2772	conf->next_resync = 0;
2773	return 0;
2774}
2775
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2776/*
2777 * perform a "sync" on one "block"
2778 *
2779 * We need to make sure that no normal I/O request - particularly write
2780 * requests - conflict with active sync requests.
2781 *
2782 * This is achieved by tracking pending requests and a 'barrier' concept
2783 * that can be installed to exclude normal IO requests.
2784 *
2785 * Resync and recovery are handled very differently.
2786 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2787 *
2788 * For resync, we iterate over virtual addresses, read all copies,
2789 * and update if there are differences.  If only one copy is live,
2790 * skip it.
2791 * For recovery, we iterate over physical addresses, read a good
2792 * value for each non-in_sync drive, and over-write.
2793 *
2794 * So, for recovery we may have several outstanding complex requests for a
2795 * given address, one for each out-of-sync device.  We model this by allocating
2796 * a number of r10_bio structures, one for each out-of-sync device.
2797 * As we setup these structures, we collect all bio's together into a list
2798 * which we then process collectively to add pages, and then process again
2799 * to pass to generic_make_request.
2800 *
2801 * The r10_bio structures are linked using a borrowed master_bio pointer.
2802 * This link is counted in ->remaining.  When the r10_bio that points to NULL
2803 * has its remaining count decremented to 0, the whole complex operation
2804 * is complete.
2805 *
2806 */
2807
2808static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2809			     int *skipped)
2810{
2811	struct r10conf *conf = mddev->private;
2812	struct r10bio *r10_bio;
2813	struct bio *biolist = NULL, *bio;
2814	sector_t max_sector, nr_sectors;
2815	int i;
2816	int max_sync;
2817	sector_t sync_blocks;
2818	sector_t sectors_skipped = 0;
2819	int chunks_skipped = 0;
2820	sector_t chunk_mask = conf->geo.chunk_mask;
 
2821
2822	if (!conf->r10buf_pool)
2823		if (init_resync(conf))
2824			return 0;
2825
2826	/*
2827	 * Allow skipping a full rebuild for incremental assembly
2828	 * of a clean array, like RAID1 does.
2829	 */
2830	if (mddev->bitmap == NULL &&
2831	    mddev->recovery_cp == MaxSector &&
2832	    mddev->reshape_position == MaxSector &&
2833	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2834	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2835	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2836	    conf->fullsync == 0) {
2837		*skipped = 1;
2838		return mddev->dev_sectors - sector_nr;
2839	}
2840
2841 skipped:
2842	max_sector = mddev->dev_sectors;
2843	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2844	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2845		max_sector = mddev->resync_max_sectors;
2846	if (sector_nr >= max_sector) {
 
 
 
2847		/* If we aborted, we need to abort the
2848		 * sync on the 'current' bitmap chucks (there can
2849		 * be several when recovering multiple devices).
2850		 * as we may have started syncing it but not finished.
2851		 * We can find the current address in
2852		 * mddev->curr_resync, but for recovery,
2853		 * we need to convert that to several
2854		 * virtual addresses.
2855		 */
2856		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2857			end_reshape(conf);
2858			close_sync(conf);
2859			return 0;
2860		}
2861
2862		if (mddev->curr_resync < max_sector) { /* aborted */
2863			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2864				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2865						&sync_blocks, 1);
2866			else for (i = 0; i < conf->geo.raid_disks; i++) {
2867				sector_t sect =
2868					raid10_find_virt(conf, mddev->curr_resync, i);
2869				bitmap_end_sync(mddev->bitmap, sect,
2870						&sync_blocks, 1);
2871			}
2872		} else {
2873			/* completed sync */
2874			if ((!mddev->bitmap || conf->fullsync)
2875			    && conf->have_replacement
2876			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2877				/* Completed a full sync so the replacements
2878				 * are now fully recovered.
2879				 */
2880				for (i = 0; i < conf->geo.raid_disks; i++)
2881					if (conf->mirrors[i].replacement)
2882						conf->mirrors[i].replacement
2883							->recovery_offset
2884							= MaxSector;
 
 
 
2885			}
2886			conf->fullsync = 0;
2887		}
2888		bitmap_close_sync(mddev->bitmap);
2889		close_sync(conf);
2890		*skipped = 1;
2891		return sectors_skipped;
2892	}
2893
2894	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2895		return reshape_request(mddev, sector_nr, skipped);
2896
2897	if (chunks_skipped >= conf->geo.raid_disks) {
2898		/* if there has been nothing to do on any drive,
2899		 * then there is nothing to do at all..
2900		 */
2901		*skipped = 1;
2902		return (max_sector - sector_nr) + sectors_skipped;
2903	}
2904
2905	if (max_sector > mddev->resync_max)
2906		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2907
2908	/* make sure whole request will fit in a chunk - if chunks
2909	 * are meaningful
2910	 */
2911	if (conf->geo.near_copies < conf->geo.raid_disks &&
2912	    max_sector > (sector_nr | chunk_mask))
2913		max_sector = (sector_nr | chunk_mask) + 1;
2914
 
 
 
 
 
 
 
2915	/* Again, very different code for resync and recovery.
2916	 * Both must result in an r10bio with a list of bios that
2917	 * have bi_end_io, bi_sector, bi_bdev set,
2918	 * and bi_private set to the r10bio.
2919	 * For recovery, we may actually create several r10bios
2920	 * with 2 bios in each, that correspond to the bios in the main one.
2921	 * In this case, the subordinate r10bios link back through a
2922	 * borrowed master_bio pointer, and the counter in the master
2923	 * includes a ref from each subordinate.
2924	 */
2925	/* First, we decide what to do and set ->bi_end_io
2926	 * To end_sync_read if we want to read, and
2927	 * end_sync_write if we will want to write.
2928	 */
2929
2930	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
2931	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2932		/* recovery... the complicated one */
2933		int j;
2934		r10_bio = NULL;
2935
2936		for (i = 0 ; i < conf->geo.raid_disks; i++) {
2937			int still_degraded;
2938			struct r10bio *rb2;
2939			sector_t sect;
2940			int must_sync;
2941			int any_working;
 
 
2942			struct raid10_info *mirror = &conf->mirrors[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
2943
2944			if ((mirror->rdev == NULL ||
2945			     test_bit(In_sync, &mirror->rdev->flags))
2946			    &&
2947			    (mirror->replacement == NULL ||
2948			     test_bit(Faulty,
2949				      &mirror->replacement->flags)))
2950				continue;
 
2951
2952			still_degraded = 0;
2953			/* want to reconstruct this device */
2954			rb2 = r10_bio;
2955			sect = raid10_find_virt(conf, sector_nr, i);
2956			if (sect >= mddev->resync_max_sectors) {
2957				/* last stripe is not complete - don't
2958				 * try to recover this sector.
2959				 */
 
2960				continue;
2961			}
 
 
2962			/* Unless we are doing a full sync, or a replacement
2963			 * we only need to recover the block if it is set in
2964			 * the bitmap
2965			 */
2966			must_sync = bitmap_start_sync(mddev->bitmap, sect,
2967						      &sync_blocks, 1);
2968			if (sync_blocks < max_sync)
2969				max_sync = sync_blocks;
2970			if (!must_sync &&
2971			    mirror->replacement == NULL &&
2972			    !conf->fullsync) {
2973				/* yep, skip the sync_blocks here, but don't assume
2974				 * that there will never be anything to do here
2975				 */
2976				chunks_skipped = -1;
 
2977				continue;
2978			}
 
 
 
 
2979
2980			r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2981			r10_bio->state = 0;
2982			raise_barrier(conf, rb2 != NULL);
2983			atomic_set(&r10_bio->remaining, 0);
2984
2985			r10_bio->master_bio = (struct bio*)rb2;
2986			if (rb2)
2987				atomic_inc(&rb2->remaining);
2988			r10_bio->mddev = mddev;
2989			set_bit(R10BIO_IsRecover, &r10_bio->state);
2990			r10_bio->sector = sect;
2991
2992			raid10_find_phys(conf, r10_bio);
2993
2994			/* Need to check if the array will still be
2995			 * degraded
2996			 */
2997			for (j = 0; j < conf->geo.raid_disks; j++)
2998				if (conf->mirrors[j].rdev == NULL ||
2999				    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
 
 
3000					still_degraded = 1;
3001					break;
3002				}
 
3003
3004			must_sync = bitmap_start_sync(mddev->bitmap, sect,
3005						      &sync_blocks, still_degraded);
3006
3007			any_working = 0;
3008			for (j=0; j<conf->copies;j++) {
3009				int k;
3010				int d = r10_bio->devs[j].devnum;
3011				sector_t from_addr, to_addr;
3012				struct md_rdev *rdev;
 
3013				sector_t sector, first_bad;
3014				int bad_sectors;
3015				if (!conf->mirrors[d].rdev ||
3016				    !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
3017					continue;
3018				/* This is where we read from */
3019				any_working = 1;
3020				rdev = conf->mirrors[d].rdev;
3021				sector = r10_bio->devs[j].addr;
3022
3023				if (is_badblock(rdev, sector, max_sync,
3024						&first_bad, &bad_sectors)) {
3025					if (first_bad > sector)
3026						max_sync = first_bad - sector;
3027					else {
3028						bad_sectors -= (sector
3029								- first_bad);
3030						if (max_sync > bad_sectors)
3031							max_sync = bad_sectors;
3032						continue;
3033					}
3034				}
3035				bio = r10_bio->devs[0].bio;
3036				bio_reset(bio);
3037				bio->bi_next = biolist;
3038				biolist = bio;
3039				bio->bi_private = r10_bio;
3040				bio->bi_end_io = end_sync_read;
3041				bio->bi_rw = READ;
 
 
3042				from_addr = r10_bio->devs[j].addr;
3043				bio->bi_iter.bi_sector = from_addr +
3044					rdev->data_offset;
3045				bio->bi_bdev = rdev->bdev;
3046				atomic_inc(&rdev->nr_pending);
3047				/* and we write to 'i' (if not in_sync) */
3048
3049				for (k=0; k<conf->copies; k++)
3050					if (r10_bio->devs[k].devnum == i)
3051						break;
3052				BUG_ON(k == conf->copies);
3053				to_addr = r10_bio->devs[k].addr;
3054				r10_bio->devs[0].devnum = d;
3055				r10_bio->devs[0].addr = from_addr;
3056				r10_bio->devs[1].devnum = i;
3057				r10_bio->devs[1].addr = to_addr;
3058
3059				rdev = mirror->rdev;
3060				if (!test_bit(In_sync, &rdev->flags)) {
3061					bio = r10_bio->devs[1].bio;
3062					bio_reset(bio);
3063					bio->bi_next = biolist;
3064					biolist = bio;
3065					bio->bi_private = r10_bio;
3066					bio->bi_end_io = end_sync_write;
3067					bio->bi_rw = WRITE;
3068					bio->bi_iter.bi_sector = to_addr
3069						+ rdev->data_offset;
3070					bio->bi_bdev = rdev->bdev;
3071					atomic_inc(&r10_bio->remaining);
3072				} else
3073					r10_bio->devs[1].bio->bi_end_io = NULL;
3074
3075				/* and maybe write to replacement */
3076				bio = r10_bio->devs[1].repl_bio;
3077				if (bio)
3078					bio->bi_end_io = NULL;
3079				rdev = mirror->replacement;
3080				/* Note: if rdev != NULL, then bio
3081				 * cannot be NULL as r10buf_pool_alloc will
3082				 * have allocated it.
3083				 * So the second test here is pointless.
3084				 * But it keeps semantic-checkers happy, and
3085				 * this comment keeps human reviewers
3086				 * happy.
3087				 */
3088				if (rdev == NULL || bio == NULL ||
3089				    test_bit(Faulty, &rdev->flags))
3090					break;
3091				bio_reset(bio);
3092				bio->bi_next = biolist;
3093				biolist = bio;
3094				bio->bi_private = r10_bio;
3095				bio->bi_end_io = end_sync_write;
3096				bio->bi_rw = WRITE;
3097				bio->bi_iter.bi_sector = to_addr +
3098					rdev->data_offset;
3099				bio->bi_bdev = rdev->bdev;
3100				atomic_inc(&r10_bio->remaining);
3101				break;
3102			}
 
3103			if (j == conf->copies) {
3104				/* Cannot recover, so abort the recovery or
3105				 * record a bad block */
3106				if (any_working) {
3107					/* problem is that there are bad blocks
3108					 * on other device(s)
3109					 */
3110					int k;
3111					for (k = 0; k < conf->copies; k++)
3112						if (r10_bio->devs[k].devnum == i)
3113							break;
3114					if (!test_bit(In_sync,
3115						      &mirror->rdev->flags)
3116					    && !rdev_set_badblocks(
3117						    mirror->rdev,
3118						    r10_bio->devs[k].addr,
3119						    max_sync, 0))
3120						any_working = 0;
3121					if (mirror->replacement &&
3122					    !rdev_set_badblocks(
3123						    mirror->replacement,
3124						    r10_bio->devs[k].addr,
3125						    max_sync, 0))
3126						any_working = 0;
3127				}
3128				if (!any_working)  {
3129					if (!test_and_set_bit(MD_RECOVERY_INTR,
3130							      &mddev->recovery))
3131						printk(KERN_INFO "md/raid10:%s: insufficient "
3132						       "working devices for recovery.\n",
3133						       mdname(mddev));
3134					mirror->recovery_disabled
3135						= mddev->recovery_disabled;
3136				}
3137				put_buf(r10_bio);
3138				if (rb2)
3139					atomic_dec(&rb2->remaining);
3140				r10_bio = rb2;
 
 
 
3141				break;
3142			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3143		}
3144		if (biolist == NULL) {
3145			while (r10_bio) {
3146				struct r10bio *rb2 = r10_bio;
3147				r10_bio = (struct r10bio*) rb2->master_bio;
3148				rb2->master_bio = NULL;
3149				put_buf(rb2);
3150			}
3151			goto giveup;
3152		}
3153	} else {
3154		/* resync. Schedule a read for every block at this virt offset */
3155		int count = 0;
3156
3157		bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
 
 
 
 
 
 
 
 
 
3158
3159		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3160				       &sync_blocks, mddev->degraded) &&
3161		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3162						 &mddev->recovery)) {
3163			/* We can skip this block */
3164			*skipped = 1;
3165			return sync_blocks + sectors_skipped;
3166		}
3167		if (sync_blocks < max_sync)
3168			max_sync = sync_blocks;
3169		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3170		r10_bio->state = 0;
3171
3172		r10_bio->mddev = mddev;
3173		atomic_set(&r10_bio->remaining, 0);
3174		raise_barrier(conf, 0);
3175		conf->next_resync = sector_nr;
3176
3177		r10_bio->master_bio = NULL;
3178		r10_bio->sector = sector_nr;
3179		set_bit(R10BIO_IsSync, &r10_bio->state);
3180		raid10_find_phys(conf, r10_bio);
3181		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3182
3183		for (i = 0; i < conf->copies; i++) {
3184			int d = r10_bio->devs[i].devnum;
3185			sector_t first_bad, sector;
3186			int bad_sectors;
 
3187
3188			if (r10_bio->devs[i].repl_bio)
3189				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3190
3191			bio = r10_bio->devs[i].bio;
3192			bio_reset(bio);
3193			bio->bi_error = -EIO;
3194			if (conf->mirrors[d].rdev == NULL ||
3195			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
 
3196				continue;
 
3197			sector = r10_bio->devs[i].addr;
3198			if (is_badblock(conf->mirrors[d].rdev,
3199					sector, max_sync,
3200					&first_bad, &bad_sectors)) {
3201				if (first_bad > sector)
3202					max_sync = first_bad - sector;
3203				else {
3204					bad_sectors -= (sector - first_bad);
3205					if (max_sync > bad_sectors)
3206						max_sync = bad_sectors;
 
3207					continue;
3208				}
3209			}
3210			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3211			atomic_inc(&r10_bio->remaining);
3212			bio->bi_next = biolist;
3213			biolist = bio;
3214			bio->bi_private = r10_bio;
3215			bio->bi_end_io = end_sync_read;
3216			bio->bi_rw = READ;
3217			bio->bi_iter.bi_sector = sector +
3218				conf->mirrors[d].rdev->data_offset;
3219			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
 
3220			count++;
3221
3222			if (conf->mirrors[d].replacement == NULL ||
3223			    test_bit(Faulty,
3224				     &conf->mirrors[d].replacement->flags))
3225				continue;
 
 
3226
3227			/* Need to set up for writing to the replacement */
3228			bio = r10_bio->devs[i].repl_bio;
3229			bio_reset(bio);
3230			bio->bi_error = -EIO;
3231
3232			sector = r10_bio->devs[i].addr;
3233			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3234			bio->bi_next = biolist;
3235			biolist = bio;
3236			bio->bi_private = r10_bio;
3237			bio->bi_end_io = end_sync_write;
3238			bio->bi_rw = WRITE;
3239			bio->bi_iter.bi_sector = sector +
3240				conf->mirrors[d].replacement->data_offset;
3241			bio->bi_bdev = conf->mirrors[d].replacement->bdev;
 
3242			count++;
 
3243		}
3244
3245		if (count < 2) {
3246			for (i=0; i<conf->copies; i++) {
3247				int d = r10_bio->devs[i].devnum;
3248				if (r10_bio->devs[i].bio->bi_end_io)
3249					rdev_dec_pending(conf->mirrors[d].rdev,
3250							 mddev);
3251				if (r10_bio->devs[i].repl_bio &&
3252				    r10_bio->devs[i].repl_bio->bi_end_io)
3253					rdev_dec_pending(
3254						conf->mirrors[d].replacement,
3255						mddev);
3256			}
3257			put_buf(r10_bio);
3258			biolist = NULL;
3259			goto giveup;
3260		}
3261	}
3262
3263	nr_sectors = 0;
3264	if (sector_nr + max_sync < max_sector)
3265		max_sector = sector_nr + max_sync;
3266	do {
3267		struct page *page;
3268		int len = PAGE_SIZE;
3269		if (sector_nr + (len>>9) > max_sector)
3270			len = (max_sector - sector_nr) << 9;
3271		if (len == 0)
3272			break;
3273		for (bio= biolist ; bio ; bio=bio->bi_next) {
3274			struct bio *bio2;
3275			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3276			if (bio_add_page(bio, page, len, 0))
3277				continue;
3278
3279			/* stop here */
3280			bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3281			for (bio2 = biolist;
3282			     bio2 && bio2 != bio;
3283			     bio2 = bio2->bi_next) {
3284				/* remove last page from this bio */
3285				bio2->bi_vcnt--;
3286				bio2->bi_iter.bi_size -= len;
3287				bio_clear_flag(bio2, BIO_SEG_VALID);
3288			}
3289			goto bio_full;
3290		}
3291		nr_sectors += len>>9;
3292		sector_nr += len>>9;
3293	} while (biolist->bi_vcnt < RESYNC_PAGES);
3294 bio_full:
3295	r10_bio->sectors = nr_sectors;
3296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3297	while (biolist) {
3298		bio = biolist;
3299		biolist = biolist->bi_next;
3300
3301		bio->bi_next = NULL;
3302		r10_bio = bio->bi_private;
3303		r10_bio->sectors = nr_sectors;
3304
3305		if (bio->bi_end_io == end_sync_read) {
3306			md_sync_acct(bio->bi_bdev, nr_sectors);
3307			bio->bi_error = 0;
3308			generic_make_request(bio);
3309		}
3310	}
3311
3312	if (sectors_skipped)
3313		/* pretend they weren't skipped, it makes
3314		 * no important difference in this case
3315		 */
3316		md_done_sync(mddev, sectors_skipped, 1);
3317
3318	return sectors_skipped + nr_sectors;
3319 giveup:
3320	/* There is nowhere to write, so all non-sync
3321	 * drives must be failed or in resync, all drives
3322	 * have a bad block, so try the next chunk...
3323	 */
3324	if (sector_nr + max_sync < max_sector)
3325		max_sector = sector_nr + max_sync;
3326
3327	sectors_skipped += (max_sector - sector_nr);
3328	chunks_skipped ++;
3329	sector_nr = max_sector;
3330	goto skipped;
3331}
3332
3333static sector_t
3334raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3335{
3336	sector_t size;
3337	struct r10conf *conf = mddev->private;
3338
3339	if (!raid_disks)
3340		raid_disks = min(conf->geo.raid_disks,
3341				 conf->prev.raid_disks);
3342	if (!sectors)
3343		sectors = conf->dev_sectors;
3344
3345	size = sectors >> conf->geo.chunk_shift;
3346	sector_div(size, conf->geo.far_copies);
3347	size = size * raid_disks;
3348	sector_div(size, conf->geo.near_copies);
3349
3350	return size << conf->geo.chunk_shift;
3351}
3352
3353static void calc_sectors(struct r10conf *conf, sector_t size)
3354{
3355	/* Calculate the number of sectors-per-device that will
3356	 * actually be used, and set conf->dev_sectors and
3357	 * conf->stride
3358	 */
3359
3360	size = size >> conf->geo.chunk_shift;
3361	sector_div(size, conf->geo.far_copies);
3362	size = size * conf->geo.raid_disks;
3363	sector_div(size, conf->geo.near_copies);
3364	/* 'size' is now the number of chunks in the array */
3365	/* calculate "used chunks per device" */
3366	size = size * conf->copies;
3367
3368	/* We need to round up when dividing by raid_disks to
3369	 * get the stride size.
3370	 */
3371	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3372
3373	conf->dev_sectors = size << conf->geo.chunk_shift;
3374
3375	if (conf->geo.far_offset)
3376		conf->geo.stride = 1 << conf->geo.chunk_shift;
3377	else {
3378		sector_div(size, conf->geo.far_copies);
3379		conf->geo.stride = size << conf->geo.chunk_shift;
3380	}
3381}
3382
3383enum geo_type {geo_new, geo_old, geo_start};
3384static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3385{
3386	int nc, fc, fo;
3387	int layout, chunk, disks;
3388	switch (new) {
3389	case geo_old:
3390		layout = mddev->layout;
3391		chunk = mddev->chunk_sectors;
3392		disks = mddev->raid_disks - mddev->delta_disks;
3393		break;
3394	case geo_new:
3395		layout = mddev->new_layout;
3396		chunk = mddev->new_chunk_sectors;
3397		disks = mddev->raid_disks;
3398		break;
3399	default: /* avoid 'may be unused' warnings */
3400	case geo_start: /* new when starting reshape - raid_disks not
3401			 * updated yet. */
3402		layout = mddev->new_layout;
3403		chunk = mddev->new_chunk_sectors;
3404		disks = mddev->raid_disks + mddev->delta_disks;
3405		break;
3406	}
3407	if (layout >> 19)
3408		return -1;
3409	if (chunk < (PAGE_SIZE >> 9) ||
3410	    !is_power_of_2(chunk))
3411		return -2;
3412	nc = layout & 255;
3413	fc = (layout >> 8) & 255;
3414	fo = layout & (1<<16);
3415	geo->raid_disks = disks;
3416	geo->near_copies = nc;
3417	geo->far_copies = fc;
3418	geo->far_offset = fo;
3419	switch (layout >> 17) {
3420	case 0:	/* original layout.  simple but not always optimal */
3421		geo->far_set_size = disks;
3422		break;
3423	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3424		 * actually using this, but leave code here just in case.*/
3425		geo->far_set_size = disks/fc;
3426		WARN(geo->far_set_size < fc,
3427		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3428		break;
3429	case 2: /* "improved" layout fixed to match documentation */
3430		geo->far_set_size = fc * nc;
3431		break;
3432	default: /* Not a valid layout */
3433		return -1;
3434	}
3435	geo->chunk_mask = chunk - 1;
3436	geo->chunk_shift = ffz(~chunk);
3437	return nc*fc;
3438}
3439
3440static struct r10conf *setup_conf(struct mddev *mddev)
3441{
3442	struct r10conf *conf = NULL;
3443	int err = -EINVAL;
3444	struct geom geo;
3445	int copies;
3446
3447	copies = setup_geo(&geo, mddev, geo_new);
3448
3449	if (copies == -2) {
3450		printk(KERN_ERR "md/raid10:%s: chunk size must be "
3451		       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3452		       mdname(mddev), PAGE_SIZE);
3453		goto out;
3454	}
3455
3456	if (copies < 2 || copies > mddev->raid_disks) {
3457		printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3458		       mdname(mddev), mddev->new_layout);
3459		goto out;
3460	}
3461
3462	err = -ENOMEM;
3463	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3464	if (!conf)
3465		goto out;
3466
3467	/* FIXME calc properly */
3468	conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
3469							    max(0,-mddev->delta_disks)),
3470				GFP_KERNEL);
3471	if (!conf->mirrors)
3472		goto out;
3473
3474	conf->tmppage = alloc_page(GFP_KERNEL);
3475	if (!conf->tmppage)
3476		goto out;
3477
3478	conf->geo = geo;
3479	conf->copies = copies;
3480	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3481					   r10bio_pool_free, conf);
3482	if (!conf->r10bio_pool)
 
 
 
 
3483		goto out;
3484
3485	calc_sectors(conf, mddev->dev_sectors);
3486	if (mddev->reshape_position == MaxSector) {
3487		conf->prev = conf->geo;
3488		conf->reshape_progress = MaxSector;
3489	} else {
3490		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3491			err = -EINVAL;
3492			goto out;
3493		}
3494		conf->reshape_progress = mddev->reshape_position;
3495		if (conf->prev.far_offset)
3496			conf->prev.stride = 1 << conf->prev.chunk_shift;
3497		else
3498			/* far_copies must be 1 */
3499			conf->prev.stride = conf->dev_sectors;
3500	}
3501	conf->reshape_safe = conf->reshape_progress;
3502	spin_lock_init(&conf->device_lock);
3503	INIT_LIST_HEAD(&conf->retry_list);
3504	INIT_LIST_HEAD(&conf->bio_end_io_list);
3505
3506	spin_lock_init(&conf->resync_lock);
3507	init_waitqueue_head(&conf->wait_barrier);
 
3508
 
3509	conf->thread = md_register_thread(raid10d, mddev, "raid10");
3510	if (!conf->thread)
3511		goto out;
3512
3513	conf->mddev = mddev;
3514	return conf;
3515
3516 out:
3517	if (err == -ENOMEM)
3518		printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3519		       mdname(mddev));
3520	if (conf) {
3521		mempool_destroy(conf->r10bio_pool);
3522		kfree(conf->mirrors);
3523		safe_put_page(conf->tmppage);
 
3524		kfree(conf);
3525	}
3526	return ERR_PTR(err);
3527}
3528
 
 
 
 
 
 
 
 
 
 
3529static int raid10_run(struct mddev *mddev)
3530{
3531	struct r10conf *conf;
3532	int i, disk_idx, chunk_size;
3533	struct raid10_info *disk;
3534	struct md_rdev *rdev;
3535	sector_t size;
3536	sector_t min_offset_diff = 0;
3537	int first = 1;
3538	bool discard_supported = false;
 
 
3539
3540	if (mddev->private == NULL) {
3541		conf = setup_conf(mddev);
3542		if (IS_ERR(conf))
3543			return PTR_ERR(conf);
3544		mddev->private = conf;
3545	}
3546	conf = mddev->private;
3547	if (!conf)
3548		goto out;
3549
 
 
 
 
 
 
 
 
 
 
 
 
3550	mddev->thread = conf->thread;
3551	conf->thread = NULL;
3552
3553	chunk_size = mddev->chunk_sectors << 9;
3554	if (mddev->queue) {
3555		blk_queue_max_discard_sectors(mddev->queue,
3556					      mddev->chunk_sectors);
3557		blk_queue_max_write_same_sectors(mddev->queue, 0);
3558		blk_queue_io_min(mddev->queue, chunk_size);
3559		if (conf->geo.raid_disks % conf->geo.near_copies)
3560			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3561		else
3562			blk_queue_io_opt(mddev->queue, chunk_size *
3563					 (conf->geo.raid_disks / conf->geo.near_copies));
3564	}
3565
3566	rdev_for_each(rdev, mddev) {
3567		long long diff;
3568		struct request_queue *q;
3569
3570		disk_idx = rdev->raid_disk;
3571		if (disk_idx < 0)
3572			continue;
3573		if (disk_idx >= conf->geo.raid_disks &&
3574		    disk_idx >= conf->prev.raid_disks)
3575			continue;
3576		disk = conf->mirrors + disk_idx;
3577
3578		if (test_bit(Replacement, &rdev->flags)) {
3579			if (disk->replacement)
3580				goto out_free_conf;
3581			disk->replacement = rdev;
3582		} else {
3583			if (disk->rdev)
3584				goto out_free_conf;
3585			disk->rdev = rdev;
3586		}
3587		q = bdev_get_queue(rdev->bdev);
3588		diff = (rdev->new_data_offset - rdev->data_offset);
3589		if (!mddev->reshape_backwards)
3590			diff = -diff;
3591		if (diff < 0)
3592			diff = 0;
3593		if (first || diff < min_offset_diff)
3594			min_offset_diff = diff;
3595
3596		if (mddev->gendisk)
3597			disk_stack_limits(mddev->gendisk, rdev->bdev,
3598					  rdev->data_offset << 9);
3599
3600		disk->head_position = 0;
3601
3602		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3603			discard_supported = true;
3604	}
3605
3606	if (mddev->queue) {
3607		if (discard_supported)
3608			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3609						mddev->queue);
3610		else
3611			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3612						  mddev->queue);
3613	}
3614	/* need to check that every block has at least one working mirror */
3615	if (!enough(conf, -1)) {
3616		printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3617		       mdname(mddev));
3618		goto out_free_conf;
3619	}
3620
3621	if (conf->reshape_progress != MaxSector) {
3622		/* must ensure that shape change is supported */
3623		if (conf->geo.far_copies != 1 &&
3624		    conf->geo.far_offset == 0)
3625			goto out_free_conf;
3626		if (conf->prev.far_copies != 1 &&
3627		    conf->prev.far_offset == 0)
3628			goto out_free_conf;
3629	}
3630
3631	mddev->degraded = 0;
3632	for (i = 0;
3633	     i < conf->geo.raid_disks
3634		     || i < conf->prev.raid_disks;
3635	     i++) {
3636
3637		disk = conf->mirrors + i;
3638
3639		if (!disk->rdev && disk->replacement) {
3640			/* The replacement is all we have - use it */
3641			disk->rdev = disk->replacement;
3642			disk->replacement = NULL;
3643			clear_bit(Replacement, &disk->rdev->flags);
3644		}
3645
3646		if (!disk->rdev ||
3647		    !test_bit(In_sync, &disk->rdev->flags)) {
3648			disk->head_position = 0;
3649			mddev->degraded++;
3650			if (disk->rdev &&
3651			    disk->rdev->saved_raid_disk < 0)
3652				conf->fullsync = 1;
3653		}
 
 
 
 
 
 
 
3654		disk->recovery_disabled = mddev->recovery_disabled - 1;
3655	}
3656
3657	if (mddev->recovery_cp != MaxSector)
3658		printk(KERN_NOTICE "md/raid10:%s: not clean"
3659		       " -- starting background reconstruction\n",
3660		       mdname(mddev));
3661	printk(KERN_INFO
3662		"md/raid10:%s: active with %d out of %d devices\n",
3663		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3664		conf->geo.raid_disks);
3665	/*
3666	 * Ok, everything is just fine now
3667	 */
3668	mddev->dev_sectors = conf->dev_sectors;
3669	size = raid10_size(mddev, 0, 0);
3670	md_set_array_sectors(mddev, size);
3671	mddev->resync_max_sectors = size;
3672
3673	if (mddev->queue) {
3674		int stripe = conf->geo.raid_disks *
3675			((mddev->chunk_sectors << 9) / PAGE_SIZE);
3676
3677		/* Calculate max read-ahead size.
3678		 * We need to readahead at least twice a whole stripe....
3679		 * maybe...
3680		 */
3681		stripe /= conf->geo.near_copies;
3682		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3683			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3684	}
3685
3686	if (md_integrity_register(mddev))
3687		goto out_free_conf;
3688
3689	if (conf->reshape_progress != MaxSector) {
3690		unsigned long before_length, after_length;
3691
3692		before_length = ((1 << conf->prev.chunk_shift) *
3693				 conf->prev.far_copies);
3694		after_length = ((1 << conf->geo.chunk_shift) *
3695				conf->geo.far_copies);
3696
3697		if (max(before_length, after_length) > min_offset_diff) {
3698			/* This cannot work */
3699			printk("md/raid10: offset difference not enough to continue reshape\n");
3700			goto out_free_conf;
3701		}
3702		conf->offset_diff = min_offset_diff;
3703
3704		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3705		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3706		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3707		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3708		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3709							"reshape");
 
 
3710	}
3711
3712	return 0;
3713
3714out_free_conf:
3715	md_unregister_thread(&mddev->thread);
3716	mempool_destroy(conf->r10bio_pool);
3717	safe_put_page(conf->tmppage);
3718	kfree(conf->mirrors);
3719	kfree(conf);
3720	mddev->private = NULL;
3721out:
3722	return -EIO;
3723}
3724
3725static void raid10_free(struct mddev *mddev, void *priv)
3726{
3727	struct r10conf *conf = priv;
3728
3729	mempool_destroy(conf->r10bio_pool);
3730	safe_put_page(conf->tmppage);
3731	kfree(conf->mirrors);
3732	kfree(conf->mirrors_old);
3733	kfree(conf->mirrors_new);
 
3734	kfree(conf);
3735}
3736
3737static void raid10_quiesce(struct mddev *mddev, int state)
3738{
3739	struct r10conf *conf = mddev->private;
3740
3741	switch(state) {
3742	case 1:
3743		raise_barrier(conf, 0);
3744		break;
3745	case 0:
3746		lower_barrier(conf);
3747		break;
3748	}
3749}
3750
3751static int raid10_resize(struct mddev *mddev, sector_t sectors)
3752{
3753	/* Resize of 'far' arrays is not supported.
3754	 * For 'near' and 'offset' arrays we can set the
3755	 * number of sectors used to be an appropriate multiple
3756	 * of the chunk size.
3757	 * For 'offset', this is far_copies*chunksize.
3758	 * For 'near' the multiplier is the LCM of
3759	 * near_copies and raid_disks.
3760	 * So if far_copies > 1 && !far_offset, fail.
3761	 * Else find LCM(raid_disks, near_copy)*far_copies and
3762	 * multiply by chunk_size.  Then round to this number.
3763	 * This is mostly done by raid10_size()
3764	 */
3765	struct r10conf *conf = mddev->private;
3766	sector_t oldsize, size;
3767
3768	if (mddev->reshape_position != MaxSector)
3769		return -EBUSY;
3770
3771	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3772		return -EINVAL;
3773
3774	oldsize = raid10_size(mddev, 0, 0);
3775	size = raid10_size(mddev, sectors, 0);
3776	if (mddev->external_size &&
3777	    mddev->array_sectors > size)
3778		return -EINVAL;
3779	if (mddev->bitmap) {
3780		int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3781		if (ret)
3782			return ret;
3783	}
3784	md_set_array_sectors(mddev, size);
3785	set_capacity(mddev->gendisk, mddev->array_sectors);
3786	revalidate_disk(mddev->gendisk);
3787	if (sectors > mddev->dev_sectors &&
3788	    mddev->recovery_cp > oldsize) {
3789		mddev->recovery_cp = oldsize;
3790		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3791	}
3792	calc_sectors(conf, sectors);
3793	mddev->dev_sectors = conf->dev_sectors;
3794	mddev->resync_max_sectors = size;
3795	return 0;
3796}
3797
3798static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
3799{
3800	struct md_rdev *rdev;
3801	struct r10conf *conf;
3802
3803	if (mddev->degraded > 0) {
3804		printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3805		       mdname(mddev));
3806		return ERR_PTR(-EINVAL);
3807	}
3808	sector_div(size, devs);
3809
3810	/* Set new parameters */
3811	mddev->new_level = 10;
3812	/* new layout: far_copies = 1, near_copies = 2 */
3813	mddev->new_layout = (1<<8) + 2;
3814	mddev->new_chunk_sectors = mddev->chunk_sectors;
3815	mddev->delta_disks = mddev->raid_disks;
3816	mddev->raid_disks *= 2;
3817	/* make sure it will be not marked as dirty */
3818	mddev->recovery_cp = MaxSector;
3819	mddev->dev_sectors = size;
3820
3821	conf = setup_conf(mddev);
3822	if (!IS_ERR(conf)) {
3823		rdev_for_each(rdev, mddev)
3824			if (rdev->raid_disk >= 0) {
3825				rdev->new_raid_disk = rdev->raid_disk * 2;
3826				rdev->sectors = size;
3827			}
3828		conf->barrier = 1;
3829	}
3830
3831	return conf;
3832}
3833
3834static void *raid10_takeover(struct mddev *mddev)
3835{
3836	struct r0conf *raid0_conf;
3837
3838	/* raid10 can take over:
3839	 *  raid0 - providing it has only two drives
3840	 */
3841	if (mddev->level == 0) {
3842		/* for raid0 takeover only one zone is supported */
3843		raid0_conf = mddev->private;
3844		if (raid0_conf->nr_strip_zones > 1) {
3845			printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3846			       " with more than one zone.\n",
3847			       mdname(mddev));
3848			return ERR_PTR(-EINVAL);
3849		}
3850		return raid10_takeover_raid0(mddev,
3851			raid0_conf->strip_zone->zone_end,
3852			raid0_conf->strip_zone->nb_dev);
3853	}
3854	return ERR_PTR(-EINVAL);
3855}
3856
3857static int raid10_check_reshape(struct mddev *mddev)
3858{
3859	/* Called when there is a request to change
3860	 * - layout (to ->new_layout)
3861	 * - chunk size (to ->new_chunk_sectors)
3862	 * - raid_disks (by delta_disks)
3863	 * or when trying to restart a reshape that was ongoing.
3864	 *
3865	 * We need to validate the request and possibly allocate
3866	 * space if that might be an issue later.
3867	 *
3868	 * Currently we reject any reshape of a 'far' mode array,
3869	 * allow chunk size to change if new is generally acceptable,
3870	 * allow raid_disks to increase, and allow
3871	 * a switch between 'near' mode and 'offset' mode.
3872	 */
3873	struct r10conf *conf = mddev->private;
3874	struct geom geo;
3875
3876	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3877		return -EINVAL;
3878
3879	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3880		/* mustn't change number of copies */
3881		return -EINVAL;
3882	if (geo.far_copies > 1 && !geo.far_offset)
3883		/* Cannot switch to 'far' mode */
3884		return -EINVAL;
3885
3886	if (mddev->array_sectors & geo.chunk_mask)
3887			/* not factor of array size */
3888			return -EINVAL;
3889
3890	if (!enough(conf, -1))
3891		return -EINVAL;
3892
3893	kfree(conf->mirrors_new);
3894	conf->mirrors_new = NULL;
3895	if (mddev->delta_disks > 0) {
3896		/* allocate new 'mirrors' list */
3897		conf->mirrors_new = kzalloc(
3898			sizeof(struct raid10_info)
3899			*(mddev->raid_disks +
3900			  mddev->delta_disks),
3901			GFP_KERNEL);
3902		if (!conf->mirrors_new)
3903			return -ENOMEM;
3904	}
3905	return 0;
3906}
3907
3908/*
3909 * Need to check if array has failed when deciding whether to:
3910 *  - start an array
3911 *  - remove non-faulty devices
3912 *  - add a spare
3913 *  - allow a reshape
3914 * This determination is simple when no reshape is happening.
3915 * However if there is a reshape, we need to carefully check
3916 * both the before and after sections.
3917 * This is because some failed devices may only affect one
3918 * of the two sections, and some non-in_sync devices may
3919 * be insync in the section most affected by failed devices.
3920 */
3921static int calc_degraded(struct r10conf *conf)
3922{
3923	int degraded, degraded2;
3924	int i;
3925
3926	rcu_read_lock();
3927	degraded = 0;
3928	/* 'prev' section first */
3929	for (i = 0; i < conf->prev.raid_disks; i++) {
3930		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3931		if (!rdev || test_bit(Faulty, &rdev->flags))
3932			degraded++;
3933		else if (!test_bit(In_sync, &rdev->flags))
3934			/* When we can reduce the number of devices in
3935			 * an array, this might not contribute to
3936			 * 'degraded'.  It does now.
3937			 */
3938			degraded++;
3939	}
3940	rcu_read_unlock();
3941	if (conf->geo.raid_disks == conf->prev.raid_disks)
3942		return degraded;
3943	rcu_read_lock();
3944	degraded2 = 0;
3945	for (i = 0; i < conf->geo.raid_disks; i++) {
3946		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3947		if (!rdev || test_bit(Faulty, &rdev->flags))
3948			degraded2++;
3949		else if (!test_bit(In_sync, &rdev->flags)) {
3950			/* If reshape is increasing the number of devices,
3951			 * this section has already been recovered, so
3952			 * it doesn't contribute to degraded.
3953			 * else it does.
3954			 */
3955			if (conf->geo.raid_disks <= conf->prev.raid_disks)
3956				degraded2++;
3957		}
3958	}
3959	rcu_read_unlock();
3960	if (degraded2 > degraded)
3961		return degraded2;
3962	return degraded;
3963}
3964
3965static int raid10_start_reshape(struct mddev *mddev)
3966{
3967	/* A 'reshape' has been requested. This commits
3968	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
3969	 * This also checks if there are enough spares and adds them
3970	 * to the array.
3971	 * We currently require enough spares to make the final
3972	 * array non-degraded.  We also require that the difference
3973	 * between old and new data_offset - on each device - is
3974	 * enough that we never risk over-writing.
3975	 */
3976
3977	unsigned long before_length, after_length;
3978	sector_t min_offset_diff = 0;
3979	int first = 1;
3980	struct geom new;
3981	struct r10conf *conf = mddev->private;
3982	struct md_rdev *rdev;
3983	int spares = 0;
3984	int ret;
3985
3986	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3987		return -EBUSY;
3988
3989	if (setup_geo(&new, mddev, geo_start) != conf->copies)
3990		return -EINVAL;
3991
3992	before_length = ((1 << conf->prev.chunk_shift) *
3993			 conf->prev.far_copies);
3994	after_length = ((1 << conf->geo.chunk_shift) *
3995			conf->geo.far_copies);
3996
3997	rdev_for_each(rdev, mddev) {
3998		if (!test_bit(In_sync, &rdev->flags)
3999		    && !test_bit(Faulty, &rdev->flags))
4000			spares++;
4001		if (rdev->raid_disk >= 0) {
4002			long long diff = (rdev->new_data_offset
4003					  - rdev->data_offset);
4004			if (!mddev->reshape_backwards)
4005				diff = -diff;
4006			if (diff < 0)
4007				diff = 0;
4008			if (first || diff < min_offset_diff)
4009				min_offset_diff = diff;
 
4010		}
4011	}
4012
4013	if (max(before_length, after_length) > min_offset_diff)
4014		return -EINVAL;
4015
4016	if (spares < mddev->delta_disks)
4017		return -EINVAL;
4018
4019	conf->offset_diff = min_offset_diff;
4020	spin_lock_irq(&conf->device_lock);
4021	if (conf->mirrors_new) {
4022		memcpy(conf->mirrors_new, conf->mirrors,
4023		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4024		smp_mb();
4025		kfree(conf->mirrors_old);
4026		conf->mirrors_old = conf->mirrors;
4027		conf->mirrors = conf->mirrors_new;
4028		conf->mirrors_new = NULL;
4029	}
4030	setup_geo(&conf->geo, mddev, geo_start);
4031	smp_mb();
4032	if (mddev->reshape_backwards) {
4033		sector_t size = raid10_size(mddev, 0, 0);
4034		if (size < mddev->array_sectors) {
4035			spin_unlock_irq(&conf->device_lock);
4036			printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
4037			       mdname(mddev));
4038			return -EINVAL;
4039		}
4040		mddev->resync_max_sectors = size;
4041		conf->reshape_progress = size;
4042	} else
4043		conf->reshape_progress = 0;
4044	conf->reshape_safe = conf->reshape_progress;
4045	spin_unlock_irq(&conf->device_lock);
4046
4047	if (mddev->delta_disks && mddev->bitmap) {
4048		ret = bitmap_resize(mddev->bitmap,
4049				    raid10_size(mddev, 0,
4050						conf->geo.raid_disks),
4051				    0, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4052		if (ret)
4053			goto abort;
 
 
 
 
 
 
4054	}
 
4055	if (mddev->delta_disks > 0) {
4056		rdev_for_each(rdev, mddev)
4057			if (rdev->raid_disk < 0 &&
4058			    !test_bit(Faulty, &rdev->flags)) {
4059				if (raid10_add_disk(mddev, rdev) == 0) {
4060					if (rdev->raid_disk >=
4061					    conf->prev.raid_disks)
4062						set_bit(In_sync, &rdev->flags);
4063					else
4064						rdev->recovery_offset = 0;
4065
4066					if (sysfs_link_rdev(mddev, rdev))
4067						/* Failure here  is OK */;
4068				}
4069			} else if (rdev->raid_disk >= conf->prev.raid_disks
4070				   && !test_bit(Faulty, &rdev->flags)) {
4071				/* This is a spare that was manually added */
4072				set_bit(In_sync, &rdev->flags);
4073			}
4074	}
4075	/* When a reshape changes the number of devices,
4076	 * ->degraded is measured against the larger of the
4077	 * pre and  post numbers.
4078	 */
4079	spin_lock_irq(&conf->device_lock);
4080	mddev->degraded = calc_degraded(conf);
4081	spin_unlock_irq(&conf->device_lock);
4082	mddev->raid_disks = conf->geo.raid_disks;
4083	mddev->reshape_position = conf->reshape_progress;
4084	set_bit(MD_CHANGE_DEVS, &mddev->flags);
4085
4086	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4087	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4088	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4089	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4090	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4091
4092	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4093						"reshape");
4094	if (!mddev->sync_thread) {
4095		ret = -EAGAIN;
4096		goto abort;
4097	}
4098	conf->reshape_checkpoint = jiffies;
4099	md_wakeup_thread(mddev->sync_thread);
4100	md_new_event(mddev);
4101	return 0;
4102
4103abort:
4104	mddev->recovery = 0;
4105	spin_lock_irq(&conf->device_lock);
4106	conf->geo = conf->prev;
4107	mddev->raid_disks = conf->geo.raid_disks;
4108	rdev_for_each(rdev, mddev)
4109		rdev->new_data_offset = rdev->data_offset;
4110	smp_wmb();
4111	conf->reshape_progress = MaxSector;
4112	conf->reshape_safe = MaxSector;
4113	mddev->reshape_position = MaxSector;
4114	spin_unlock_irq(&conf->device_lock);
4115	return ret;
4116}
4117
4118/* Calculate the last device-address that could contain
4119 * any block from the chunk that includes the array-address 's'
4120 * and report the next address.
4121 * i.e. the address returned will be chunk-aligned and after
4122 * any data that is in the chunk containing 's'.
4123 */
4124static sector_t last_dev_address(sector_t s, struct geom *geo)
4125{
4126	s = (s | geo->chunk_mask) + 1;
4127	s >>= geo->chunk_shift;
4128	s *= geo->near_copies;
4129	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4130	s *= geo->far_copies;
4131	s <<= geo->chunk_shift;
4132	return s;
4133}
4134
4135/* Calculate the first device-address that could contain
4136 * any block from the chunk that includes the array-address 's'.
4137 * This too will be the start of a chunk
4138 */
4139static sector_t first_dev_address(sector_t s, struct geom *geo)
4140{
4141	s >>= geo->chunk_shift;
4142	s *= geo->near_copies;
4143	sector_div(s, geo->raid_disks);
4144	s *= geo->far_copies;
4145	s <<= geo->chunk_shift;
4146	return s;
4147}
4148
4149static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4150				int *skipped)
4151{
4152	/* We simply copy at most one chunk (smallest of old and new)
4153	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4154	 * or we hit a bad block or something.
4155	 * This might mean we pause for normal IO in the middle of
4156	 * a chunk, but that is not a problem as mddev->reshape_position
4157	 * can record any location.
4158	 *
4159	 * If we will want to write to a location that isn't
4160	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4161	 * we need to flush all reshape requests and update the metadata.
4162	 *
4163	 * When reshaping forwards (e.g. to more devices), we interpret
4164	 * 'safe' as the earliest block which might not have been copied
4165	 * down yet.  We divide this by previous stripe size and multiply
4166	 * by previous stripe length to get lowest device offset that we
4167	 * cannot write to yet.
4168	 * We interpret 'sector_nr' as an address that we want to write to.
4169	 * From this we use last_device_address() to find where we might
4170	 * write to, and first_device_address on the  'safe' position.
4171	 * If this 'next' write position is after the 'safe' position,
4172	 * we must update the metadata to increase the 'safe' position.
4173	 *
4174	 * When reshaping backwards, we round in the opposite direction
4175	 * and perform the reverse test:  next write position must not be
4176	 * less than current safe position.
4177	 *
4178	 * In all this the minimum difference in data offsets
4179	 * (conf->offset_diff - always positive) allows a bit of slack,
4180	 * so next can be after 'safe', but not by more than offset_diff
4181	 *
4182	 * We need to prepare all the bios here before we start any IO
4183	 * to ensure the size we choose is acceptable to all devices.
4184	 * The means one for each copy for write-out and an extra one for
4185	 * read-in.
4186	 * We store the read-in bio in ->master_bio and the others in
4187	 * ->devs[x].bio and ->devs[x].repl_bio.
4188	 */
4189	struct r10conf *conf = mddev->private;
4190	struct r10bio *r10_bio;
4191	sector_t next, safe, last;
4192	int max_sectors;
4193	int nr_sectors;
4194	int s;
4195	struct md_rdev *rdev;
4196	int need_flush = 0;
4197	struct bio *blist;
4198	struct bio *bio, *read_bio;
4199	int sectors_done = 0;
 
4200
4201	if (sector_nr == 0) {
4202		/* If restarting in the middle, skip the initial sectors */
4203		if (mddev->reshape_backwards &&
4204		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4205			sector_nr = (raid10_size(mddev, 0, 0)
4206				     - conf->reshape_progress);
4207		} else if (!mddev->reshape_backwards &&
4208			   conf->reshape_progress > 0)
4209			sector_nr = conf->reshape_progress;
4210		if (sector_nr) {
4211			mddev->curr_resync_completed = sector_nr;
4212			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4213			*skipped = 1;
4214			return sector_nr;
4215		}
4216	}
4217
4218	/* We don't use sector_nr to track where we are up to
4219	 * as that doesn't work well for ->reshape_backwards.
4220	 * So just use ->reshape_progress.
4221	 */
4222	if (mddev->reshape_backwards) {
4223		/* 'next' is the earliest device address that we might
4224		 * write to for this chunk in the new layout
4225		 */
4226		next = first_dev_address(conf->reshape_progress - 1,
4227					 &conf->geo);
4228
4229		/* 'safe' is the last device address that we might read from
4230		 * in the old layout after a restart
4231		 */
4232		safe = last_dev_address(conf->reshape_safe - 1,
4233					&conf->prev);
4234
4235		if (next + conf->offset_diff < safe)
4236			need_flush = 1;
4237
4238		last = conf->reshape_progress - 1;
4239		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4240					       & conf->prev.chunk_mask);
4241		if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4242			sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4243	} else {
4244		/* 'next' is after the last device address that we
4245		 * might write to for this chunk in the new layout
4246		 */
4247		next = last_dev_address(conf->reshape_progress, &conf->geo);
4248
4249		/* 'safe' is the earliest device address that we might
4250		 * read from in the old layout after a restart
4251		 */
4252		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4253
4254		/* Need to update metadata if 'next' might be beyond 'safe'
4255		 * as that would possibly corrupt data
4256		 */
4257		if (next > safe + conf->offset_diff)
4258			need_flush = 1;
4259
4260		sector_nr = conf->reshape_progress;
4261		last  = sector_nr | (conf->geo.chunk_mask
4262				     & conf->prev.chunk_mask);
4263
4264		if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4265			last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4266	}
4267
4268	if (need_flush ||
4269	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4270		/* Need to update reshape_position in metadata */
4271		wait_barrier(conf);
4272		mddev->reshape_position = conf->reshape_progress;
4273		if (mddev->reshape_backwards)
4274			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4275				- conf->reshape_progress;
4276		else
4277			mddev->curr_resync_completed = conf->reshape_progress;
4278		conf->reshape_checkpoint = jiffies;
4279		set_bit(MD_CHANGE_DEVS, &mddev->flags);
4280		md_wakeup_thread(mddev->thread);
4281		wait_event(mddev->sb_wait, mddev->flags == 0 ||
4282			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4283		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4284			allow_barrier(conf);
4285			return sectors_done;
4286		}
4287		conf->reshape_safe = mddev->reshape_position;
4288		allow_barrier(conf);
4289	}
4290
 
4291read_more:
4292	/* Now schedule reads for blocks from sector_nr to last */
4293	r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4294	r10_bio->state = 0;
4295	raise_barrier(conf, sectors_done != 0);
4296	atomic_set(&r10_bio->remaining, 0);
4297	r10_bio->mddev = mddev;
4298	r10_bio->sector = sector_nr;
4299	set_bit(R10BIO_IsReshape, &r10_bio->state);
4300	r10_bio->sectors = last - sector_nr + 1;
4301	rdev = read_balance(conf, r10_bio, &max_sectors);
4302	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4303
4304	if (!rdev) {
4305		/* Cannot read from here, so need to record bad blocks
4306		 * on all the target devices.
4307		 */
4308		// FIXME
4309		mempool_free(r10_bio, conf->r10buf_pool);
4310		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4311		return sectors_done;
4312	}
4313
4314	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4315
4316	read_bio->bi_bdev = rdev->bdev;
4317	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4318			       + rdev->data_offset);
4319	read_bio->bi_private = r10_bio;
4320	read_bio->bi_end_io = end_sync_read;
4321	read_bio->bi_rw = READ;
4322	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4323	read_bio->bi_error = 0;
4324	read_bio->bi_vcnt = 0;
4325	read_bio->bi_iter.bi_size = 0;
4326	r10_bio->master_bio = read_bio;
4327	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4329	/* Now find the locations in the new layout */
4330	__raid10_find_phys(&conf->geo, r10_bio);
4331
4332	blist = read_bio;
4333	read_bio->bi_next = NULL;
4334
 
4335	for (s = 0; s < conf->copies*2; s++) {
4336		struct bio *b;
4337		int d = r10_bio->devs[s/2].devnum;
4338		struct md_rdev *rdev2;
4339		if (s&1) {
4340			rdev2 = conf->mirrors[d].replacement;
4341			b = r10_bio->devs[s/2].repl_bio;
4342		} else {
4343			rdev2 = conf->mirrors[d].rdev;
4344			b = r10_bio->devs[s/2].bio;
4345		}
4346		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4347			continue;
4348
4349		bio_reset(b);
4350		b->bi_bdev = rdev2->bdev;
4351		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4352			rdev2->new_data_offset;
4353		b->bi_private = r10_bio;
4354		b->bi_end_io = end_reshape_write;
4355		b->bi_rw = WRITE;
4356		b->bi_next = blist;
4357		blist = b;
4358	}
4359
4360	/* Now add as many pages as possible to all of these bios. */
4361
4362	nr_sectors = 0;
 
4363	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4364		struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4365		int len = (max_sectors - s) << 9;
4366		if (len > PAGE_SIZE)
4367			len = PAGE_SIZE;
4368		for (bio = blist; bio ; bio = bio->bi_next) {
4369			struct bio *bio2;
4370			if (bio_add_page(bio, page, len, 0))
4371				continue;
4372
4373			/* Didn't fit, must stop */
4374			for (bio2 = blist;
4375			     bio2 && bio2 != bio;
4376			     bio2 = bio2->bi_next) {
4377				/* Remove last page from this bio */
4378				bio2->bi_vcnt--;
4379				bio2->bi_iter.bi_size -= len;
4380				bio_clear_flag(bio2, BIO_SEG_VALID);
4381			}
4382			goto bio_full;
4383		}
4384		sector_nr += len >> 9;
4385		nr_sectors += len >> 9;
4386	}
4387bio_full:
4388	r10_bio->sectors = nr_sectors;
4389
4390	/* Now submit the read */
4391	md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4392	atomic_inc(&r10_bio->remaining);
4393	read_bio->bi_next = NULL;
4394	generic_make_request(read_bio);
4395	sector_nr += nr_sectors;
4396	sectors_done += nr_sectors;
4397	if (sector_nr <= last)
4398		goto read_more;
4399
 
 
4400	/* Now that we have done the whole section we can
4401	 * update reshape_progress
4402	 */
4403	if (mddev->reshape_backwards)
4404		conf->reshape_progress -= sectors_done;
4405	else
4406		conf->reshape_progress += sectors_done;
4407
4408	return sectors_done;
4409}
4410
4411static void end_reshape_request(struct r10bio *r10_bio);
4412static int handle_reshape_read_error(struct mddev *mddev,
4413				     struct r10bio *r10_bio);
4414static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4415{
4416	/* Reshape read completed.  Hopefully we have a block
4417	 * to write out.
4418	 * If we got a read error then we do sync 1-page reads from
4419	 * elsewhere until we find the data - or give up.
4420	 */
4421	struct r10conf *conf = mddev->private;
4422	int s;
4423
4424	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4425		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4426			/* Reshape has been aborted */
4427			md_done_sync(mddev, r10_bio->sectors, 0);
4428			return;
4429		}
4430
4431	/* We definitely have the data in the pages, schedule the
4432	 * writes.
4433	 */
4434	atomic_set(&r10_bio->remaining, 1);
4435	for (s = 0; s < conf->copies*2; s++) {
4436		struct bio *b;
4437		int d = r10_bio->devs[s/2].devnum;
4438		struct md_rdev *rdev;
 
4439		if (s&1) {
4440			rdev = conf->mirrors[d].replacement;
4441			b = r10_bio->devs[s/2].repl_bio;
4442		} else {
4443			rdev = conf->mirrors[d].rdev;
4444			b = r10_bio->devs[s/2].bio;
4445		}
4446		if (!rdev || test_bit(Faulty, &rdev->flags))
 
4447			continue;
 
4448		atomic_inc(&rdev->nr_pending);
4449		md_sync_acct(b->bi_bdev, r10_bio->sectors);
 
4450		atomic_inc(&r10_bio->remaining);
4451		b->bi_next = NULL;
4452		generic_make_request(b);
4453	}
4454	end_reshape_request(r10_bio);
4455}
4456
4457static void end_reshape(struct r10conf *conf)
4458{
4459	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4460		return;
4461
4462	spin_lock_irq(&conf->device_lock);
4463	conf->prev = conf->geo;
4464	md_finish_reshape(conf->mddev);
4465	smp_wmb();
4466	conf->reshape_progress = MaxSector;
4467	conf->reshape_safe = MaxSector;
4468	spin_unlock_irq(&conf->device_lock);
4469
4470	/* read-ahead size must cover two whole stripes, which is
4471	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4472	 */
4473	if (conf->mddev->queue) {
4474		int stripe = conf->geo.raid_disks *
4475			((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4476		stripe /= conf->geo.near_copies;
4477		if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4478			conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4479	}
4480	conf->fullsync = 0;
4481}
4482
 
 
 
 
 
 
 
 
 
 
 
 
 
4483static int handle_reshape_read_error(struct mddev *mddev,
4484				     struct r10bio *r10_bio)
4485{
4486	/* Use sync reads to get the blocks from somewhere else */
4487	int sectors = r10_bio->sectors;
4488	struct r10conf *conf = mddev->private;
4489	struct {
4490		struct r10bio r10_bio;
4491		struct r10dev devs[conf->copies];
4492	} on_stack;
4493	struct r10bio *r10b = &on_stack.r10_bio;
4494	int slot = 0;
4495	int idx = 0;
4496	struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
 
 
 
 
 
 
 
 
 
4497
4498	r10b->sector = r10_bio->sector;
4499	__raid10_find_phys(&conf->prev, r10b);
4500
4501	while (sectors) {
4502		int s = sectors;
4503		int success = 0;
4504		int first_slot = slot;
4505
4506		if (s > (PAGE_SIZE >> 9))
4507			s = PAGE_SIZE >> 9;
4508
 
4509		while (!success) {
4510			int d = r10b->devs[slot].devnum;
4511			struct md_rdev *rdev = conf->mirrors[d].rdev;
4512			sector_t addr;
4513			if (rdev == NULL ||
4514			    test_bit(Faulty, &rdev->flags) ||
4515			    !test_bit(In_sync, &rdev->flags))
4516				goto failed;
4517
4518			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
 
 
4519			success = sync_page_io(rdev,
4520					       addr,
4521					       s << 9,
4522					       bvec[idx].bv_page,
4523					       READ, false);
 
 
4524			if (success)
4525				break;
4526		failed:
4527			slot++;
4528			if (slot >= conf->copies)
4529				slot = 0;
4530			if (slot == first_slot)
4531				break;
4532		}
 
4533		if (!success) {
4534			/* couldn't read this block, must give up */
4535			set_bit(MD_RECOVERY_INTR,
4536				&mddev->recovery);
 
4537			return -EIO;
4538		}
4539		sectors -= s;
4540		idx++;
4541	}
 
4542	return 0;
4543}
4544
4545static void end_reshape_write(struct bio *bio)
4546{
4547	struct r10bio *r10_bio = bio->bi_private;
4548	struct mddev *mddev = r10_bio->mddev;
4549	struct r10conf *conf = mddev->private;
4550	int d;
4551	int slot;
4552	int repl;
4553	struct md_rdev *rdev = NULL;
4554
4555	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4556	if (repl)
4557		rdev = conf->mirrors[d].replacement;
4558	if (!rdev) {
4559		smp_mb();
4560		rdev = conf->mirrors[d].rdev;
4561	}
4562
4563	if (bio->bi_error) {
4564		/* FIXME should record badblock */
4565		md_error(mddev, rdev);
4566	}
4567
4568	rdev_dec_pending(rdev, mddev);
4569	end_reshape_request(r10_bio);
4570}
4571
4572static void end_reshape_request(struct r10bio *r10_bio)
4573{
4574	if (!atomic_dec_and_test(&r10_bio->remaining))
4575		return;
4576	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4577	bio_put(r10_bio->master_bio);
4578	put_buf(r10_bio);
4579}
4580
4581static void raid10_finish_reshape(struct mddev *mddev)
4582{
4583	struct r10conf *conf = mddev->private;
4584
4585	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4586		return;
4587
4588	if (mddev->delta_disks > 0) {
4589		sector_t size = raid10_size(mddev, 0, 0);
4590		md_set_array_sectors(mddev, size);
4591		if (mddev->recovery_cp > mddev->resync_max_sectors) {
4592			mddev->recovery_cp = mddev->resync_max_sectors;
4593			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4594		}
4595		mddev->resync_max_sectors = size;
4596		set_capacity(mddev->gendisk, mddev->array_sectors);
4597		revalidate_disk(mddev->gendisk);
4598	} else {
4599		int d;
 
4600		for (d = conf->geo.raid_disks ;
4601		     d < conf->geo.raid_disks - mddev->delta_disks;
4602		     d++) {
4603			struct md_rdev *rdev = conf->mirrors[d].rdev;
4604			if (rdev)
4605				clear_bit(In_sync, &rdev->flags);
4606			rdev = conf->mirrors[d].replacement;
4607			if (rdev)
4608				clear_bit(In_sync, &rdev->flags);
4609		}
 
4610	}
4611	mddev->layout = mddev->new_layout;
4612	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4613	mddev->reshape_position = MaxSector;
4614	mddev->delta_disks = 0;
4615	mddev->reshape_backwards = 0;
4616}
4617
4618static struct md_personality raid10_personality =
4619{
4620	.name		= "raid10",
4621	.level		= 10,
4622	.owner		= THIS_MODULE,
4623	.make_request	= raid10_make_request,
4624	.run		= raid10_run,
4625	.free		= raid10_free,
4626	.status		= raid10_status,
4627	.error_handler	= raid10_error,
4628	.hot_add_disk	= raid10_add_disk,
4629	.hot_remove_disk= raid10_remove_disk,
4630	.spare_active	= raid10_spare_active,
4631	.sync_request	= raid10_sync_request,
4632	.quiesce	= raid10_quiesce,
4633	.size		= raid10_size,
4634	.resize		= raid10_resize,
4635	.takeover	= raid10_takeover,
4636	.check_reshape	= raid10_check_reshape,
4637	.start_reshape	= raid10_start_reshape,
4638	.finish_reshape	= raid10_finish_reshape,
4639	.congested	= raid10_congested,
4640};
4641
4642static int __init raid_init(void)
4643{
4644	return register_md_personality(&raid10_personality);
4645}
4646
4647static void raid_exit(void)
4648{
4649	unregister_md_personality(&raid10_personality);
4650}
4651
4652module_init(raid_init);
4653module_exit(raid_exit);
4654MODULE_LICENSE("GPL");
4655MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4656MODULE_ALIAS("md-personality-9"); /* RAID10 */
4657MODULE_ALIAS("md-raid10");
4658MODULE_ALIAS("md-level-10");
4659
4660module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);