Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * raid10.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 2000-2004 Neil Brown
   5 *
   6 * RAID-10 support for md.
   7 *
   8 * Base on code in raid1.c.  See raid1.c for further copyright information.
   9 *
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21#include <linux/slab.h>
  22#include <linux/delay.h>
  23#include <linux/blkdev.h>
 
  24#include <linux/seq_file.h>
  25#include <linux/ratelimit.h>
 
 
  26#include "md.h"
  27#include "raid10.h"
  28#include "raid0.h"
  29#include "bitmap.h"
  30
  31/*
  32 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  33 * The layout of data is defined by
  34 *    chunk_size
  35 *    raid_disks
  36 *    near_copies (stored in low byte of layout)
  37 *    far_copies (stored in second byte of layout)
  38 *    far_offset (stored in bit 16 of layout )
 
 
  39 *
  40 * The data to be stored is divided into chunks using chunksize.
  41 * Each device is divided into far_copies sections.
  42 * In each section, chunks are laid out in a style similar to raid0, but
  43 * near_copies copies of each chunk is stored (each on a different drive).
  44 * The starting device for each section is offset near_copies from the starting
  45 * device of the previous section.
  46 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
  47 * drive.
  48 * near_copies and far_copies must be at least one, and their product is at most
  49 * raid_disks.
  50 *
  51 * If far_offset is true, then the far_copies are handled a bit differently.
  52 * The copies are still in different stripes, but instead of be very far apart
  53 * on disk, there are adjacent stripes.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54 */
  55
  56/*
  57 * Number of guaranteed r10bios in case of extreme VM load:
  58 */
  59#define	NR_RAID10_BIOS 256
  60
  61static void allow_barrier(conf_t *conf);
  62static void lower_barrier(conf_t *conf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63
  64static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
  65{
  66	conf_t *conf = data;
  67	int size = offsetof(struct r10bio_s, devs[conf->copies]);
  68
  69	/* allocate a r10bio with room for raid_disks entries in the bios array */
 
  70	return kzalloc(size, gfp_flags);
  71}
  72
  73static void r10bio_pool_free(void *r10_bio, void *data)
  74{
  75	kfree(r10_bio);
  76}
  77
  78/* Maximum size of each resync request */
  79#define RESYNC_BLOCK_SIZE (64*1024)
  80#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  81/* amount of memory to reserve for resync requests */
  82#define RESYNC_WINDOW (1024*1024)
  83/* maximum number of concurrent requests, memory permitting */
  84#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
  85
  86/*
  87 * When performing a resync, we need to read and compare, so
  88 * we need as many pages are there are copies.
  89 * When performing a recovery, we need 2 bios, one for read,
  90 * one for write (we recover only one drive per r10buf)
  91 *
  92 */
  93static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
  94{
  95	conf_t *conf = data;
  96	struct page *page;
  97	r10bio_t *r10_bio;
  98	struct bio *bio;
  99	int i, j;
 100	int nalloc;
 101
 102	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 103	if (!r10_bio)
 104		return NULL;
 105
 106	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
 
 107		nalloc = conf->copies; /* resync */
 108	else
 109		nalloc = 2; /* recovery */
 110
 111	/*
 112	 * Allocate bios.
 113	 */
 114	for (j = nalloc ; j-- ; ) {
 115		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 116		if (!bio)
 117			goto out_free_bio;
 118		r10_bio->devs[j].bio = bio;
 
 
 
 
 
 
 119	}
 120	/*
 121	 * Allocate RESYNC_PAGES data pages and attach them
 122	 * where needed.
 123	 */
 124	for (j = 0 ; j < nalloc; j++) {
 
 125		bio = r10_bio->devs[j].bio;
 126		for (i = 0; i < RESYNC_PAGES; i++) {
 127			if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
 128						&conf->mddev->recovery)) {
 129				/* we can share bv_page's during recovery */
 
 130				struct bio *rbio = r10_bio->devs[0].bio;
 131				page = rbio->bi_io_vec[i].bv_page;
 132				get_page(page);
 133			} else
 134				page = alloc_page(gfp_flags);
 135			if (unlikely(!page))
 136				goto out_free_pages;
 137
 138			bio->bi_io_vec[i].bv_page = page;
 
 
 139		}
 140	}
 141
 142	return r10_bio;
 143
 144out_free_pages:
 145	for ( ; i > 0 ; i--)
 146		safe_put_page(bio->bi_io_vec[i-1].bv_page);
 147	while (j--)
 148		for (i = 0; i < RESYNC_PAGES ; i++)
 149			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
 150	j = -1;
 151out_free_bio:
 152	while ( ++j < nalloc )
 153		bio_put(r10_bio->devs[j].bio);
 
 
 
 
 154	r10bio_pool_free(r10_bio, conf);
 155	return NULL;
 156}
 157
 158static void r10buf_pool_free(void *__r10_bio, void *data)
 159{
 160	int i;
 161	conf_t *conf = data;
 162	r10bio_t *r10bio = __r10_bio;
 163	int j;
 164
 165	for (j=0; j < conf->copies; j++) {
 166		struct bio *bio = r10bio->devs[j].bio;
 167		if (bio) {
 168			for (i = 0; i < RESYNC_PAGES; i++) {
 169				safe_put_page(bio->bi_io_vec[i].bv_page);
 170				bio->bi_io_vec[i].bv_page = NULL;
 171			}
 172			bio_put(bio);
 173		}
 
 
 
 174	}
 175	r10bio_pool_free(r10bio, conf);
 176}
 177
 178static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
 179{
 180	int i;
 181
 182	for (i = 0; i < conf->copies; i++) {
 183		struct bio **bio = & r10_bio->devs[i].bio;
 184		if (!BIO_SPECIAL(*bio))
 185			bio_put(*bio);
 186		*bio = NULL;
 
 
 
 
 187	}
 188}
 189
 190static void free_r10bio(r10bio_t *r10_bio)
 191{
 192	conf_t *conf = r10_bio->mddev->private;
 193
 194	put_all_bios(conf, r10_bio);
 195	mempool_free(r10_bio, conf->r10bio_pool);
 196}
 197
 198static void put_buf(r10bio_t *r10_bio)
 199{
 200	conf_t *conf = r10_bio->mddev->private;
 201
 202	mempool_free(r10_bio, conf->r10buf_pool);
 203
 204	lower_barrier(conf);
 205}
 206
 207static void reschedule_retry(r10bio_t *r10_bio)
 208{
 209	unsigned long flags;
 210	mddev_t *mddev = r10_bio->mddev;
 211	conf_t *conf = mddev->private;
 212
 213	spin_lock_irqsave(&conf->device_lock, flags);
 214	list_add(&r10_bio->retry_list, &conf->retry_list);
 215	conf->nr_queued ++;
 216	spin_unlock_irqrestore(&conf->device_lock, flags);
 217
 218	/* wake up frozen array... */
 219	wake_up(&conf->wait_barrier);
 220
 221	md_wakeup_thread(mddev->thread);
 222}
 223
 224/*
 225 * raid_end_bio_io() is called when we have finished servicing a mirrored
 226 * operation and are ready to return a success/failure code to the buffer
 227 * cache layer.
 228 */
 229static void raid_end_bio_io(r10bio_t *r10_bio)
 230{
 231	struct bio *bio = r10_bio->master_bio;
 232	int done;
 233	conf_t *conf = r10_bio->mddev->private;
 234
 235	if (bio->bi_phys_segments) {
 236		unsigned long flags;
 237		spin_lock_irqsave(&conf->device_lock, flags);
 238		bio->bi_phys_segments--;
 239		done = (bio->bi_phys_segments == 0);
 240		spin_unlock_irqrestore(&conf->device_lock, flags);
 241	} else
 242		done = 1;
 243	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 244		clear_bit(BIO_UPTODATE, &bio->bi_flags);
 245	if (done) {
 246		bio_endio(bio, 0);
 247		/*
 248		 * Wake up any possible resync thread that waits for the device
 249		 * to go idle.
 250		 */
 251		allow_barrier(conf);
 252	}
 253	free_r10bio(r10_bio);
 254}
 255
 256/*
 257 * Update disk head position estimator based on IRQ completion info.
 258 */
 259static inline void update_head_pos(int slot, r10bio_t *r10_bio)
 260{
 261	conf_t *conf = r10_bio->mddev->private;
 262
 263	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 264		r10_bio->devs[slot].addr + (r10_bio->sectors);
 265}
 266
 267/*
 268 * Find the disk number which triggered given bio
 269 */
 270static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
 271			 struct bio *bio, int *slotp)
 272{
 273	int slot;
 
 274
 275	for (slot = 0; slot < conf->copies; slot++)
 276		if (r10_bio->devs[slot].bio == bio)
 277			break;
 
 
 
 
 
 278
 279	BUG_ON(slot == conf->copies);
 280	update_head_pos(slot, r10_bio);
 281
 282	if (slotp)
 283		*slotp = slot;
 
 
 284	return r10_bio->devs[slot].devnum;
 285}
 286
 287static void raid10_end_read_request(struct bio *bio, int error)
 288{
 289	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 290	r10bio_t *r10_bio = bio->bi_private;
 291	int slot, dev;
 292	conf_t *conf = r10_bio->mddev->private;
 293
 294
 295	slot = r10_bio->read_slot;
 296	dev = r10_bio->devs[slot].devnum;
 
 297	/*
 298	 * this branch is our 'one mirror IO has finished' event handler:
 299	 */
 300	update_head_pos(slot, r10_bio);
 301
 302	if (uptodate) {
 303		/*
 304		 * Set R10BIO_Uptodate in our master bio, so that
 305		 * we will return a good error code to the higher
 306		 * levels even if IO on some other mirrored buffer fails.
 307		 *
 308		 * The 'master' represents the composite IO operation to
 309		 * user-side. So if something waits for IO, then it will
 310		 * wait for the 'master' bio.
 311		 */
 312		set_bit(R10BIO_Uptodate, &r10_bio->state);
 
 
 
 
 
 
 
 
 
 
 
 313		raid_end_bio_io(r10_bio);
 314		rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 315	} else {
 316		/*
 317		 * oops, read error - keep the refcount on the rdev
 318		 */
 319		char b[BDEVNAME_SIZE];
 320		printk_ratelimited(KERN_ERR
 321				   "md/raid10:%s: %s: rescheduling sector %llu\n",
 322				   mdname(conf->mddev),
 323				   bdevname(conf->mirrors[dev].rdev->bdev, b),
 324				   (unsigned long long)r10_bio->sector);
 325		set_bit(R10BIO_ReadError, &r10_bio->state);
 326		reschedule_retry(r10_bio);
 327	}
 328}
 329
 330static void close_write(r10bio_t *r10_bio)
 331{
 332	/* clear the bitmap if all writes complete successfully */
 333	bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 334			r10_bio->sectors,
 335			!test_bit(R10BIO_Degraded, &r10_bio->state),
 336			0);
 337	md_write_end(r10_bio->mddev);
 338}
 339
 340static void one_write_done(r10bio_t *r10_bio)
 341{
 342	if (atomic_dec_and_test(&r10_bio->remaining)) {
 343		if (test_bit(R10BIO_WriteError, &r10_bio->state))
 344			reschedule_retry(r10_bio);
 345		else {
 346			close_write(r10_bio);
 347			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 348				reschedule_retry(r10_bio);
 349			else
 350				raid_end_bio_io(r10_bio);
 351		}
 352	}
 353}
 354
 355static void raid10_end_write_request(struct bio *bio, int error)
 356{
 357	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 358	r10bio_t *r10_bio = bio->bi_private;
 359	int dev;
 360	int dec_rdev = 1;
 361	conf_t *conf = r10_bio->mddev->private;
 362	int slot;
 363
 364	dev = find_bio_disk(conf, r10_bio, bio, &slot);
 365
 
 
 
 
 
 
 
 
 
 
 
 
 366	/*
 367	 * this branch is our 'one mirror IO has finished' event handler:
 368	 */
 369	if (!uptodate) {
 370		set_bit(WriteErrorSeen,	&conf->mirrors[dev].rdev->flags);
 371		set_bit(R10BIO_WriteError, &r10_bio->state);
 372		dec_rdev = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 373	} else {
 374		/*
 375		 * Set R10BIO_Uptodate in our master bio, so that
 376		 * we will return a good error code for to the higher
 377		 * levels even if IO on some other mirrored buffer fails.
 378		 *
 379		 * The 'master' represents the composite IO operation to
 380		 * user-side. So if something waits for IO, then it will
 381		 * wait for the 'master' bio.
 382		 */
 383		sector_t first_bad;
 384		int bad_sectors;
 385
 386		set_bit(R10BIO_Uptodate, &r10_bio->state);
 
 
 
 
 
 
 
 
 
 
 387
 388		/* Maybe we can clear some bad blocks. */
 389		if (is_badblock(conf->mirrors[dev].rdev,
 390				r10_bio->devs[slot].addr,
 391				r10_bio->sectors,
 392				&first_bad, &bad_sectors)) {
 393			bio_put(bio);
 394			r10_bio->devs[slot].bio = IO_MADE_GOOD;
 
 
 
 395			dec_rdev = 0;
 396			set_bit(R10BIO_MadeGood, &r10_bio->state);
 397		}
 398	}
 399
 400	/*
 401	 *
 402	 * Let's see if all mirrored write operations have finished
 403	 * already.
 404	 */
 405	one_write_done(r10_bio);
 406	if (dec_rdev)
 407		rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 
 
 408}
 409
 410
 411/*
 412 * RAID10 layout manager
 413 * As well as the chunksize and raid_disks count, there are two
 414 * parameters: near_copies and far_copies.
 415 * near_copies * far_copies must be <= raid_disks.
 416 * Normally one of these will be 1.
 417 * If both are 1, we get raid0.
 418 * If near_copies == raid_disks, we get raid1.
 419 *
 420 * Chunks are laid out in raid0 style with near_copies copies of the
 421 * first chunk, followed by near_copies copies of the next chunk and
 422 * so on.
 423 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 424 * as described above, we start again with a device offset of near_copies.
 425 * So we effectively have another copy of the whole array further down all
 426 * the drives, but with blocks on different drives.
 427 * With this layout, and block is never stored twice on the one device.
 428 *
 429 * raid10_find_phys finds the sector offset of a given virtual sector
 430 * on each device that it is on.
 431 *
 432 * raid10_find_virt does the reverse mapping, from a device and a
 433 * sector offset to a virtual address
 434 */
 435
 436static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
 437{
 438	int n,f;
 439	sector_t sector;
 440	sector_t chunk;
 441	sector_t stripe;
 442	int dev;
 443
 444	int slot = 0;
 
 
 
 
 
 
 
 445
 446	/* now calculate first sector/dev */
 447	chunk = r10bio->sector >> conf->chunk_shift;
 448	sector = r10bio->sector & conf->chunk_mask;
 449
 450	chunk *= conf->near_copies;
 451	stripe = chunk;
 452	dev = sector_div(stripe, conf->raid_disks);
 453	if (conf->far_offset)
 454		stripe *= conf->far_copies;
 455
 456	sector += stripe << conf->chunk_shift;
 457
 458	/* and calculate all the others */
 459	for (n=0; n < conf->near_copies; n++) {
 460		int d = dev;
 
 461		sector_t s = sector;
 462		r10bio->devs[slot].addr = sector;
 463		r10bio->devs[slot].devnum = d;
 
 464		slot++;
 465
 466		for (f = 1; f < conf->far_copies; f++) {
 467			d += conf->near_copies;
 468			if (d >= conf->raid_disks)
 469				d -= conf->raid_disks;
 470			s += conf->stride;
 
 
 
 
 
 
 
 
 
 471			r10bio->devs[slot].devnum = d;
 472			r10bio->devs[slot].addr = s;
 473			slot++;
 474		}
 475		dev++;
 476		if (dev >= conf->raid_disks) {
 477			dev = 0;
 478			sector += (conf->chunk_mask + 1);
 479		}
 480	}
 481	BUG_ON(slot != conf->copies);
 482}
 483
 484static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485{
 486	sector_t offset, chunk, vchunk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487
 488	offset = sector & conf->chunk_mask;
 489	if (conf->far_offset) {
 490		int fc;
 491		chunk = sector >> conf->chunk_shift;
 492		fc = sector_div(chunk, conf->far_copies);
 493		dev -= fc * conf->near_copies;
 494		if (dev < 0)
 495			dev += conf->raid_disks;
 496	} else {
 497		while (sector >= conf->stride) {
 498			sector -= conf->stride;
 499			if (dev < conf->near_copies)
 500				dev += conf->raid_disks - conf->near_copies;
 501			else
 502				dev -= conf->near_copies;
 503		}
 504		chunk = sector >> conf->chunk_shift;
 505	}
 506	vchunk = chunk * conf->raid_disks + dev;
 507	sector_div(vchunk, conf->near_copies);
 508	return (vchunk << conf->chunk_shift) + offset;
 509}
 510
 511/**
 512 *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
 513 *	@q: request queue
 514 *	@bvm: properties of new bio
 515 *	@biovec: the request that could be merged to it.
 516 *
 517 *	Return amount of bytes we can accept at this offset
 518 *      If near_copies == raid_disk, there are no striping issues,
 519 *      but in that case, the function isn't called at all.
 520 */
 521static int raid10_mergeable_bvec(struct request_queue *q,
 522				 struct bvec_merge_data *bvm,
 523				 struct bio_vec *biovec)
 524{
 525	mddev_t *mddev = q->queuedata;
 526	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 527	int max;
 528	unsigned int chunk_sectors = mddev->chunk_sectors;
 529	unsigned int bio_sectors = bvm->bi_size >> 9;
 530
 531	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
 532	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
 533	if (max <= biovec->bv_len && bio_sectors == 0)
 534		return biovec->bv_len;
 535	else
 536		return max;
 537}
 538
 539/*
 540 * This routine returns the disk from which the requested read should
 541 * be done. There is a per-array 'next expected sequential IO' sector
 542 * number - if this matches on the next IO then we use the last disk.
 543 * There is also a per-disk 'last know head position' sector that is
 544 * maintained from IRQ contexts, both the normal and the resync IO
 545 * completion handlers update this position correctly. If there is no
 546 * perfect sequential match then we pick the disk whose head is closest.
 547 *
 548 * If there are 2 mirrors in the same 2 devices, performance degrades
 549 * because position is mirror, not device based.
 550 *
 551 * The rdev for the device selected will have nr_pending incremented.
 552 */
 553
 554/*
 555 * FIXME: possibly should rethink readbalancing and do it differently
 556 * depending on near_copies / far_copies geometry.
 557 */
 558static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
 
 
 559{
 560	const sector_t this_sector = r10_bio->sector;
 561	int disk, slot;
 562	int sectors = r10_bio->sectors;
 563	int best_good_sectors;
 564	sector_t new_distance, best_dist;
 565	mdk_rdev_t *rdev;
 566	int do_balance;
 567	int best_slot;
 
 568
 569	raid10_find_phys(conf, r10_bio);
 570	rcu_read_lock();
 571retry:
 572	sectors = r10_bio->sectors;
 573	best_slot = -1;
 
 574	best_dist = MaxSector;
 575	best_good_sectors = 0;
 576	do_balance = 1;
 
 577	/*
 578	 * Check if we can balance. We can balance on the whole
 579	 * device if no resync is going on (recovery is ok), or below
 580	 * the resync window. We take the first readable disk when
 581	 * above the resync window.
 582	 */
 583	if (conf->mddev->recovery_cp < MaxSector
 584	    && (this_sector + sectors >= conf->next_resync))
 585		do_balance = 0;
 586
 587	for (slot = 0; slot < conf->copies ; slot++) {
 588		sector_t first_bad;
 589		int bad_sectors;
 590		sector_t dev_sector;
 591
 592		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 593			continue;
 594		disk = r10_bio->devs[slot].devnum;
 595		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 596		if (rdev == NULL)
 
 
 
 
 597			continue;
 598		if (!test_bit(In_sync, &rdev->flags))
 
 599			continue;
 600
 601		dev_sector = r10_bio->devs[slot].addr;
 602		if (is_badblock(rdev, dev_sector, sectors,
 603				&first_bad, &bad_sectors)) {
 604			if (best_dist < MaxSector)
 605				/* Already have a better slot */
 606				continue;
 607			if (first_bad <= dev_sector) {
 608				/* Cannot read here.  If this is the
 609				 * 'primary' device, then we must not read
 610				 * beyond 'bad_sectors' from another device.
 611				 */
 612				bad_sectors -= (dev_sector - first_bad);
 613				if (!do_balance && sectors > bad_sectors)
 614					sectors = bad_sectors;
 615				if (best_good_sectors > sectors)
 616					best_good_sectors = sectors;
 617			} else {
 618				sector_t good_sectors =
 619					first_bad - dev_sector;
 620				if (good_sectors > best_good_sectors) {
 621					best_good_sectors = good_sectors;
 622					best_slot = slot;
 
 623				}
 624				if (!do_balance)
 625					/* Must read from here */
 626					break;
 627			}
 628			continue;
 629		} else
 630			best_good_sectors = sectors;
 631
 632		if (!do_balance)
 633			break;
 634
 
 
 
 635		/* This optimisation is debatable, and completely destroys
 636		 * sequential read speed for 'far copies' arrays.  So only
 637		 * keep it for 'near' arrays, and review those later.
 638		 */
 639		if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
 640			break;
 641
 642		/* for far > 1 always use the lowest address */
 643		if (conf->far_copies > 1)
 644			new_distance = r10_bio->devs[slot].addr;
 645		else
 646			new_distance = abs(r10_bio->devs[slot].addr -
 647					   conf->mirrors[disk].head_position);
 648		if (new_distance < best_dist) {
 649			best_dist = new_distance;
 650			best_slot = slot;
 
 651		}
 652	}
 653	if (slot == conf->copies)
 654		slot = best_slot;
 
 
 655
 656	if (slot >= 0) {
 657		disk = r10_bio->devs[slot].devnum;
 658		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 659		if (!rdev)
 660			goto retry;
 661		atomic_inc(&rdev->nr_pending);
 662		if (test_bit(Faulty, &rdev->flags)) {
 663			/* Cannot risk returning a device that failed
 664			 * before we inc'ed nr_pending
 665			 */
 666			rdev_dec_pending(rdev, conf->mddev);
 667			goto retry;
 668		}
 669		r10_bio->read_slot = slot;
 670	} else
 671		disk = -1;
 672	rcu_read_unlock();
 673	*max_sectors = best_good_sectors;
 674
 675	return disk;
 676}
 677
 678static int raid10_congested(void *data, int bits)
 679{
 680	mddev_t *mddev = data;
 681	conf_t *conf = mddev->private;
 682	int i, ret = 0;
 683
 684	if (mddev_congested(mddev, bits))
 
 685		return 1;
 
 686	rcu_read_lock();
 687	for (i = 0; i < conf->raid_disks && ret == 0; i++) {
 688		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
 
 
 
 689		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 690			struct request_queue *q = bdev_get_queue(rdev->bdev);
 691
 692			ret |= bdi_congested(&q->backing_dev_info, bits);
 693		}
 694	}
 695	rcu_read_unlock();
 696	return ret;
 697}
 698
 699static void flush_pending_writes(conf_t *conf)
 700{
 701	/* Any writes that have been queued but are awaiting
 702	 * bitmap updates get flushed here.
 703	 */
 704	spin_lock_irq(&conf->device_lock);
 705
 706	if (conf->pending_bio_list.head) {
 707		struct bio *bio;
 708		bio = bio_list_get(&conf->pending_bio_list);
 
 709		spin_unlock_irq(&conf->device_lock);
 710		/* flush any pending bitmap writes to disk
 711		 * before proceeding w/ I/O */
 712		bitmap_unplug(conf->mddev->bitmap);
 
 713
 714		while (bio) { /* submit pending writes */
 715			struct bio *next = bio->bi_next;
 
 716			bio->bi_next = NULL;
 717			generic_make_request(bio);
 
 
 
 
 
 
 
 
 
 718			bio = next;
 719		}
 720	} else
 721		spin_unlock_irq(&conf->device_lock);
 722}
 723
 724/* Barriers....
 725 * Sometimes we need to suspend IO while we do something else,
 726 * either some resync/recovery, or reconfigure the array.
 727 * To do this we raise a 'barrier'.
 728 * The 'barrier' is a counter that can be raised multiple times
 729 * to count how many activities are happening which preclude
 730 * normal IO.
 731 * We can only raise the barrier if there is no pending IO.
 732 * i.e. if nr_pending == 0.
 733 * We choose only to raise the barrier if no-one is waiting for the
 734 * barrier to go down.  This means that as soon as an IO request
 735 * is ready, no other operations which require a barrier will start
 736 * until the IO request has had a chance.
 737 *
 738 * So: regular IO calls 'wait_barrier'.  When that returns there
 739 *    is no backgroup IO happening,  It must arrange to call
 740 *    allow_barrier when it has finished its IO.
 741 * backgroup IO calls must call raise_barrier.  Once that returns
 742 *    there is no normal IO happeing.  It must arrange to call
 743 *    lower_barrier when the particular background IO completes.
 744 */
 745
 746static void raise_barrier(conf_t *conf, int force)
 747{
 748	BUG_ON(force && !conf->barrier);
 749	spin_lock_irq(&conf->resync_lock);
 750
 751	/* Wait until no block IO is waiting (unless 'force') */
 752	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 753			    conf->resync_lock, );
 754
 755	/* block any new IO from starting */
 756	conf->barrier++;
 757
 758	/* Now wait for all pending IO to complete */
 759	wait_event_lock_irq(conf->wait_barrier,
 760			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
 761			    conf->resync_lock, );
 762
 763	spin_unlock_irq(&conf->resync_lock);
 764}
 765
 766static void lower_barrier(conf_t *conf)
 767{
 768	unsigned long flags;
 769	spin_lock_irqsave(&conf->resync_lock, flags);
 770	conf->barrier--;
 771	spin_unlock_irqrestore(&conf->resync_lock, flags);
 772	wake_up(&conf->wait_barrier);
 773}
 774
 775static void wait_barrier(conf_t *conf)
 776{
 777	spin_lock_irq(&conf->resync_lock);
 778	if (conf->barrier) {
 779		conf->nr_waiting++;
 780		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
 781				    conf->resync_lock,
 782				    );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783		conf->nr_waiting--;
 
 
 784	}
 785	conf->nr_pending++;
 786	spin_unlock_irq(&conf->resync_lock);
 787}
 788
 789static void allow_barrier(conf_t *conf)
 790{
 791	unsigned long flags;
 792	spin_lock_irqsave(&conf->resync_lock, flags);
 793	conf->nr_pending--;
 794	spin_unlock_irqrestore(&conf->resync_lock, flags);
 795	wake_up(&conf->wait_barrier);
 796}
 797
 798static void freeze_array(conf_t *conf)
 799{
 800	/* stop syncio and normal IO and wait for everything to
 801	 * go quiet.
 802	 * We increment barrier and nr_waiting, and then
 803	 * wait until nr_pending match nr_queued+1
 804	 * This is called in the context of one normal IO request
 805	 * that has failed. Thus any sync request that might be pending
 806	 * will be blocked by nr_pending, and we need to wait for
 807	 * pending IO requests to complete or be queued for re-try.
 808	 * Thus the number queued (nr_queued) plus this request (1)
 809	 * must match the number of pending IOs (nr_pending) before
 810	 * we continue.
 811	 */
 812	spin_lock_irq(&conf->resync_lock);
 
 813	conf->barrier++;
 814	conf->nr_waiting++;
 815	wait_event_lock_irq(conf->wait_barrier,
 816			    conf->nr_pending == conf->nr_queued+1,
 817			    conf->resync_lock,
 818			    flush_pending_writes(conf));
 819
 
 820	spin_unlock_irq(&conf->resync_lock);
 821}
 822
 823static void unfreeze_array(conf_t *conf)
 824{
 825	/* reverse the effect of the freeze */
 826	spin_lock_irq(&conf->resync_lock);
 827	conf->barrier--;
 828	conf->nr_waiting--;
 829	wake_up(&conf->wait_barrier);
 830	spin_unlock_irq(&conf->resync_lock);
 831}
 832
 833static int make_request(mddev_t *mddev, struct bio * bio)
 
 834{
 835	conf_t *conf = mddev->private;
 836	mirror_info_t *mirror;
 837	r10bio_t *r10_bio;
 838	struct bio *read_bio;
 839	int i;
 840	int chunk_sects = conf->chunk_mask + 1;
 841	const int rw = bio_data_dir(bio);
 842	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
 843	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
 844	unsigned long flags;
 845	mdk_rdev_t *blocked_rdev;
 846	int plugged;
 847	int sectors_handled;
 848	int max_sectors;
 849
 850	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 851		md_flush_request(mddev, bio);
 852		return 0;
 853	}
 854
 855	/* If this request crosses a chunk boundary, we need to
 856	 * split it.  This will only happen for 1 PAGE (or less) requests.
 857	 */
 858	if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
 859		      > chunk_sects &&
 860		    conf->near_copies < conf->raid_disks)) {
 861		struct bio_pair *bp;
 862		/* Sanity check -- queue functions should prevent this happening */
 863		if (bio->bi_vcnt != 1 ||
 864		    bio->bi_idx != 0)
 865			goto bad_map;
 866		/* This is a one page bio that upper layers
 867		 * refuse to split for us, so we need to split it.
 868		 */
 869		bp = bio_split(bio,
 870			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
 871
 872		/* Each of these 'make_request' calls will call 'wait_barrier'.
 873		 * If the first succeeds but the second blocks due to the resync
 874		 * thread raising the barrier, we will deadlock because the
 875		 * IO to the underlying device will be queued in generic_make_request
 876		 * and will never complete, so will never reduce nr_pending.
 877		 * So increment nr_waiting here so no new raise_barriers will
 878		 * succeed, and so the second wait_barrier cannot block.
 879		 */
 880		spin_lock_irq(&conf->resync_lock);
 881		conf->nr_waiting++;
 882		spin_unlock_irq(&conf->resync_lock);
 883
 884		if (make_request(mddev, &bp->bio1))
 885			generic_make_request(&bp->bio1);
 886		if (make_request(mddev, &bp->bio2))
 887			generic_make_request(&bp->bio2);
 
 
 
 888
 889		spin_lock_irq(&conf->resync_lock);
 890		conf->nr_waiting--;
 
 
 
 891		wake_up(&conf->wait_barrier);
 892		spin_unlock_irq(&conf->resync_lock);
 
 
 
 893
 894		bio_pair_release(bp);
 895		return 0;
 896	bad_map:
 897		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
 898		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
 899		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 900
 901		bio_io_error(bio);
 902		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 903	}
 
 
 904
 905	md_write_start(mddev, bio);
 
 
 
 
 
 
 
 
 
 
 
 906
 907	/*
 908	 * Register the new request and wait if the reconstruction
 909	 * thread has put up a bar for new requests.
 910	 * Continue immediately if no resync is active currently.
 911	 */
 912	wait_barrier(conf);
 913
 914	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 915
 916	r10_bio->master_bio = bio;
 917	r10_bio->sectors = bio->bi_size >> 9;
 918
 919	r10_bio->mddev = mddev;
 920	r10_bio->sector = bio->bi_sector;
 921	r10_bio->state = 0;
 
 
 
 
 
 
 
 
 922
 923	/* We might need to issue multiple reads to different
 924	 * devices if there are bad blocks around, so we keep
 925	 * track of the number of reads in bio->bi_phys_segments.
 926	 * If this is 0, there is only one r10_bio and no locking
 927	 * will be needed when the request completes.  If it is
 928	 * non-zero, then it is the number of not-completed requests.
 929	 */
 930	bio->bi_phys_segments = 0;
 931	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
 932
 933	if (rw == READ) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934		/*
 935		 * read balancing logic:
 
 
 
 936		 */
 937		int disk;
 938		int slot;
 939
 940read_again:
 941		disk = read_balance(conf, r10_bio, &max_sectors);
 942		slot = r10_bio->read_slot;
 943		if (disk < 0) {
 944			raid_end_bio_io(r10_bio);
 945			return 0;
 946		}
 947		mirror = conf->mirrors + disk;
 948
 949		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
 950		md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
 951			    max_sectors);
 952
 953		r10_bio->devs[slot].bio = read_bio;
 954
 955		read_bio->bi_sector = r10_bio->devs[slot].addr +
 956			mirror->rdev->data_offset;
 957		read_bio->bi_bdev = mirror->rdev->bdev;
 958		read_bio->bi_end_io = raid10_end_read_request;
 959		read_bio->bi_rw = READ | do_sync;
 960		read_bio->bi_private = r10_bio;
 961
 962		if (max_sectors < r10_bio->sectors) {
 963			/* Could not read all from this device, so we will
 964			 * need another r10_bio.
 965			 */
 966			sectors_handled = (r10_bio->sectors + max_sectors
 967					   - bio->bi_sector);
 968			r10_bio->sectors = max_sectors;
 969			spin_lock_irq(&conf->device_lock);
 970			if (bio->bi_phys_segments == 0)
 971				bio->bi_phys_segments = 2;
 972			else
 973				bio->bi_phys_segments++;
 974			spin_unlock(&conf->device_lock);
 975			/* Cannot call generic_make_request directly
 976			 * as that will be queued in __generic_make_request
 977			 * and subsequent mempool_alloc might block
 978			 * waiting for it.  so hand bio over to raid10d.
 979			 */
 980			reschedule_retry(r10_bio);
 981
 982			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 983
 984			r10_bio->master_bio = bio;
 985			r10_bio->sectors = ((bio->bi_size >> 9)
 986					    - sectors_handled);
 987			r10_bio->state = 0;
 988			r10_bio->mddev = mddev;
 989			r10_bio->sector = bio->bi_sector + sectors_handled;
 990			goto read_again;
 991		} else
 992			generic_make_request(read_bio);
 993		return 0;
 994	}
 995
 996	/*
 997	 * WRITE:
 
 
 998	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999	/* first select target devices under rcu_lock and
1000	 * inc refcount on their rdev.  Record them by setting
1001	 * bios[x] to bio
1002	 * If there are known/acknowledged bad blocks on any device
1003	 * on which we have seen a write error, we want to avoid
1004	 * writing to those blocks.  This potentially requires several
1005	 * writes to write around the bad blocks.  Each set of writes
1006	 * gets its own r10_bio with a set of bios attached.  The number
1007	 * of r10_bios is recored in bio->bi_phys_segments just as with
1008	 * the read case.
1009	 */
1010	plugged = mddev_check_plugged(mddev);
1011
 
1012	raid10_find_phys(conf, r10_bio);
1013retry_write:
1014	blocked_rdev = NULL;
1015	rcu_read_lock();
1016	max_sectors = r10_bio->sectors;
1017
1018	for (i = 0;  i < conf->copies; i++) {
1019		int d = r10_bio->devs[i].devnum;
1020		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
 
 
 
 
1021		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1022			atomic_inc(&rdev->nr_pending);
1023			blocked_rdev = rdev;
1024			break;
1025		}
 
 
 
 
 
 
 
 
 
 
1026		r10_bio->devs[i].bio = NULL;
1027		if (!rdev || test_bit(Faulty, &rdev->flags)) {
 
 
1028			set_bit(R10BIO_Degraded, &r10_bio->state);
1029			continue;
1030		}
1031		if (test_bit(WriteErrorSeen, &rdev->flags)) {
1032			sector_t first_bad;
1033			sector_t dev_sector = r10_bio->devs[i].addr;
1034			int bad_sectors;
1035			int is_bad;
1036
1037			is_bad = is_badblock(rdev, dev_sector,
1038					     max_sectors,
1039					     &first_bad, &bad_sectors);
1040			if (is_bad < 0) {
1041				/* Mustn't write here until the bad block
1042				 * is acknowledged
1043				 */
1044				atomic_inc(&rdev->nr_pending);
1045				set_bit(BlockedBadBlocks, &rdev->flags);
1046				blocked_rdev = rdev;
1047				break;
1048			}
1049			if (is_bad && first_bad <= dev_sector) {
1050				/* Cannot write here at all */
1051				bad_sectors -= (dev_sector - first_bad);
1052				if (bad_sectors < max_sectors)
1053					/* Mustn't write more than bad_sectors
1054					 * to other devices yet
1055					 */
1056					max_sectors = bad_sectors;
1057				/* We don't set R10BIO_Degraded as that
1058				 * only applies if the disk is missing,
1059				 * so it might be re-added, and we want to
1060				 * know to recover this chunk.
1061				 * In this case the device is here, and the
1062				 * fact that this chunk is not in-sync is
1063				 * recorded in the bad block log.
1064				 */
1065				continue;
1066			}
1067			if (is_bad) {
1068				int good_sectors = first_bad - dev_sector;
1069				if (good_sectors < max_sectors)
1070					max_sectors = good_sectors;
1071			}
1072		}
1073		r10_bio->devs[i].bio = bio;
1074		atomic_inc(&rdev->nr_pending);
 
 
 
 
 
 
1075	}
1076	rcu_read_unlock();
1077
1078	if (unlikely(blocked_rdev)) {
1079		/* Have to wait for this device to get unblocked, then retry */
1080		int j;
1081		int d;
1082
1083		for (j = 0; j < i; j++)
1084			if (r10_bio->devs[j].bio) {
1085				d = r10_bio->devs[j].devnum;
1086				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1087			}
 
 
 
 
 
 
 
 
 
 
 
 
1088		allow_barrier(conf);
 
1089		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1090		wait_barrier(conf);
1091		goto retry_write;
1092	}
1093
1094	if (max_sectors < r10_bio->sectors) {
1095		/* We are splitting this into multiple parts, so
1096		 * we need to prepare for allocating another r10_bio.
1097		 */
1098		r10_bio->sectors = max_sectors;
1099		spin_lock_irq(&conf->device_lock);
1100		if (bio->bi_phys_segments == 0)
1101			bio->bi_phys_segments = 2;
1102		else
1103			bio->bi_phys_segments++;
1104		spin_unlock_irq(&conf->device_lock);
1105	}
1106	sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
 
1107
1108	atomic_set(&r10_bio->remaining, 1);
1109	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1110
1111	for (i = 0; i < conf->copies; i++) {
1112		struct bio *mbio;
1113		int d = r10_bio->devs[i].devnum;
1114		if (!r10_bio->devs[i].bio)
1115			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1116
1117		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1118		md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1119			    max_sectors);
1120		r10_bio->devs[i].bio = mbio;
1121
1122		mbio->bi_sector	= (r10_bio->devs[i].addr+
1123				   conf->mirrors[d].rdev->data_offset);
1124		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1125		mbio->bi_end_io	= raid10_end_write_request;
1126		mbio->bi_rw = WRITE | do_sync | do_fua;
1127		mbio->bi_private = r10_bio;
1128
1129		atomic_inc(&r10_bio->remaining);
1130		spin_lock_irqsave(&conf->device_lock, flags);
1131		bio_list_add(&conf->pending_bio_list, mbio);
1132		spin_unlock_irqrestore(&conf->device_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133	}
1134
1135	/* Don't remove the bias on 'remaining' (one_write_done) until
1136	 * after checking if we need to go around again.
1137	 */
1138
1139	if (sectors_handled < (bio->bi_size >> 9)) {
1140		one_write_done(r10_bio);
1141		/* We need another r10_bio.  It has already been counted
1142		 * in bio->bi_phys_segments.
1143		 */
1144		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1145
1146		r10_bio->master_bio = bio;
1147		r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1148
1149		r10_bio->mddev = mddev;
1150		r10_bio->sector = bio->bi_sector + sectors_handled;
1151		r10_bio->state = 0;
1152		goto retry_write;
1153	}
1154	one_write_done(r10_bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155
1156	/* In case raid10d snuck in to freeze_array */
1157	wake_up(&conf->wait_barrier);
1158
1159	if (do_sync || !mddev->bitmap || !plugged)
1160		md_wakeup_thread(mddev->thread);
1161	return 0;
1162}
1163
1164static void status(struct seq_file *seq, mddev_t *mddev)
1165{
1166	conf_t *conf = mddev->private;
1167	int i;
1168
1169	if (conf->near_copies < conf->raid_disks)
1170		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1171	if (conf->near_copies > 1)
1172		seq_printf(seq, " %d near-copies", conf->near_copies);
1173	if (conf->far_copies > 1) {
1174		if (conf->far_offset)
1175			seq_printf(seq, " %d offset-copies", conf->far_copies);
1176		else
1177			seq_printf(seq, " %d far-copies", conf->far_copies);
 
 
1178	}
1179	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1180					conf->raid_disks - mddev->degraded);
1181	for (i = 0; i < conf->raid_disks; i++)
1182		seq_printf(seq, "%s",
1183			      conf->mirrors[i].rdev &&
1184			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
 
 
1185	seq_printf(seq, "]");
1186}
1187
1188/* check if there are enough drives for
1189 * every block to appear on atleast one.
1190 * Don't consider the device numbered 'ignore'
1191 * as we might be about to remove it.
1192 */
1193static int enough(conf_t *conf, int ignore)
1194{
1195	int first = 0;
 
 
 
 
 
 
 
 
 
1196
 
1197	do {
1198		int n = conf->copies;
1199		int cnt = 0;
 
1200		while (n--) {
1201			if (conf->mirrors[first].rdev &&
1202			    first != ignore)
 
 
1203				cnt++;
1204			first = (first+1) % conf->raid_disks;
1205		}
1206		if (cnt == 0)
1207			return 0;
 
1208	} while (first != 0);
1209	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210}
1211
1212static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1213{
1214	char b[BDEVNAME_SIZE];
1215	conf_t *conf = mddev->private;
 
1216
1217	/*
1218	 * If it is not operational, then we have already marked it as dead
1219	 * else if it is the last working disks, ignore the error, let the
1220	 * next level up know.
1221	 * else mark the drive as failed
1222	 */
 
1223	if (test_bit(In_sync, &rdev->flags)
1224	    && !enough(conf, rdev->raid_disk))
1225		/*
1226		 * Don't fail the drive, just return an IO error.
1227		 */
1228		return;
1229	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1230		unsigned long flags;
1231		spin_lock_irqsave(&conf->device_lock, flags);
1232		mddev->degraded++;
1233		spin_unlock_irqrestore(&conf->device_lock, flags);
1234		/*
1235		 * if recovery is running, make sure it aborts.
1236		 */
1237		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1238	}
 
 
 
 
 
 
1239	set_bit(Blocked, &rdev->flags);
1240	set_bit(Faulty, &rdev->flags);
1241	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1242	printk(KERN_ALERT
1243	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
1244	       "md/raid10:%s: Operation continuing on %d devices.\n",
1245	       mdname(mddev), bdevname(rdev->bdev, b),
1246	       mdname(mddev), conf->raid_disks - mddev->degraded);
 
1247}
1248
1249static void print_conf(conf_t *conf)
1250{
1251	int i;
1252	mirror_info_t *tmp;
1253
1254	printk(KERN_DEBUG "RAID10 conf printout:\n");
1255	if (!conf) {
1256		printk(KERN_DEBUG "(!conf)\n");
1257		return;
1258	}
1259	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1260		conf->raid_disks);
1261
1262	for (i = 0; i < conf->raid_disks; i++) {
 
 
1263		char b[BDEVNAME_SIZE];
1264		tmp = conf->mirrors + i;
1265		if (tmp->rdev)
1266			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1267				i, !test_bit(In_sync, &tmp->rdev->flags),
1268			        !test_bit(Faulty, &tmp->rdev->flags),
1269				bdevname(tmp->rdev->bdev,b));
1270	}
1271}
1272
1273static void close_sync(conf_t *conf)
1274{
1275	wait_barrier(conf);
1276	allow_barrier(conf);
1277
1278	mempool_destroy(conf->r10buf_pool);
1279	conf->r10buf_pool = NULL;
1280}
1281
1282static int raid10_spare_active(mddev_t *mddev)
1283{
1284	int i;
1285	conf_t *conf = mddev->private;
1286	mirror_info_t *tmp;
1287	int count = 0;
1288	unsigned long flags;
1289
1290	/*
1291	 * Find all non-in_sync disks within the RAID10 configuration
1292	 * and mark them in_sync
1293	 */
1294	for (i = 0; i < conf->raid_disks; i++) {
1295		tmp = conf->mirrors + i;
1296		if (tmp->rdev
1297		    && !test_bit(Faulty, &tmp->rdev->flags)
1298		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299			count++;
1300			sysfs_notify_dirent(tmp->rdev->sysfs_state);
1301		}
1302	}
1303	spin_lock_irqsave(&conf->device_lock, flags);
1304	mddev->degraded -= count;
1305	spin_unlock_irqrestore(&conf->device_lock, flags);
1306
1307	print_conf(conf);
1308	return count;
1309}
1310
1311
1312static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1313{
1314	conf_t *conf = mddev->private;
1315	int err = -EEXIST;
1316	int mirror;
1317	int first = 0;
1318	int last = conf->raid_disks - 1;
1319
1320	if (mddev->recovery_cp < MaxSector)
1321		/* only hot-add to in-sync arrays, as recovery is
1322		 * very different from resync
1323		 */
1324		return -EBUSY;
1325	if (!enough(conf, -1))
1326		return -EINVAL;
1327
 
 
 
1328	if (rdev->raid_disk >= 0)
1329		first = last = rdev->raid_disk;
1330
1331	if (rdev->saved_raid_disk >= first &&
1332	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1333		mirror = rdev->saved_raid_disk;
1334	else
1335		mirror = first;
1336	for ( ; mirror <= last ; mirror++) {
1337		mirror_info_t *p = &conf->mirrors[mirror];
1338		if (p->recovery_disabled == mddev->recovery_disabled)
1339			continue;
1340		if (!p->rdev)
1341			continue;
1342
1343		disk_stack_limits(mddev->gendisk, rdev->bdev,
1344				  rdev->data_offset << 9);
1345		/* as we don't honour merge_bvec_fn, we must
1346		 * never risk violating it, so limit
1347		 * ->max_segments to one lying with a single
1348		 * page, as a one page request is never in
1349		 * violation.
1350		 */
1351		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
1352			blk_queue_max_segments(mddev->queue, 1);
1353			blk_queue_segment_boundary(mddev->queue,
1354						   PAGE_CACHE_SIZE - 1);
1355		}
1356
 
 
 
 
1357		p->head_position = 0;
 
1358		rdev->raid_disk = mirror;
1359		err = 0;
1360		if (rdev->saved_raid_disk != mirror)
1361			conf->fullsync = 1;
1362		rcu_assign_pointer(p->rdev, rdev);
1363		break;
1364	}
 
 
1365
1366	md_integrity_add_rdev(rdev, mddev);
1367	print_conf(conf);
1368	return err;
1369}
1370
1371static int raid10_remove_disk(mddev_t *mddev, int number)
1372{
1373	conf_t *conf = mddev->private;
1374	int err = 0;
1375	mdk_rdev_t *rdev;
1376	mirror_info_t *p = conf->mirrors+ number;
 
1377
1378	print_conf(conf);
1379	rdev = p->rdev;
1380	if (rdev) {
1381		if (test_bit(In_sync, &rdev->flags) ||
1382		    atomic_read(&rdev->nr_pending)) {
1383			err = -EBUSY;
1384			goto abort;
1385		}
1386		/* Only remove faulty devices in recovery
1387		 * is not possible.
1388		 */
1389		if (!test_bit(Faulty, &rdev->flags) &&
1390		    mddev->recovery_disabled != p->recovery_disabled &&
1391		    enough(conf, -1)) {
1392			err = -EBUSY;
1393			goto abort;
1394		}
1395		p->rdev = NULL;
 
 
 
 
 
 
 
 
1396		synchronize_rcu();
1397		if (atomic_read(&rdev->nr_pending)) {
1398			/* lost the race, try later */
1399			err = -EBUSY;
1400			p->rdev = rdev;
1401			goto abort;
1402		}
1403		err = md_integrity_register(mddev);
1404	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1405abort:
1406
1407	print_conf(conf);
1408	return err;
1409}
1410
1411
1412static void end_sync_read(struct bio *bio, int error)
1413{
1414	r10bio_t *r10_bio = bio->bi_private;
1415	conf_t *conf = r10_bio->mddev->private;
1416	int d;
1417
1418	d = find_bio_disk(conf, r10_bio, bio, NULL);
 
 
 
 
1419
1420	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1421		set_bit(R10BIO_Uptodate, &r10_bio->state);
1422	else
1423		/* The write handler will notice the lack of
1424		 * R10BIO_Uptodate and record any errors etc
1425		 */
1426		atomic_add(r10_bio->sectors,
1427			   &conf->mirrors[d].rdev->corrected_errors);
1428
1429	/* for reconstruct, we always reschedule after a read.
1430	 * for resync, only after all reads
1431	 */
1432	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1433	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1434	    atomic_dec_and_test(&r10_bio->remaining)) {
1435		/* we have read all the blocks,
1436		 * do the comparison in process context in raid10d
1437		 */
1438		reschedule_retry(r10_bio);
1439	}
1440}
1441
1442static void end_sync_request(r10bio_t *r10_bio)
1443{
1444	mddev_t *mddev = r10_bio->mddev;
1445
1446	while (atomic_dec_and_test(&r10_bio->remaining)) {
1447		if (r10_bio->master_bio == NULL) {
1448			/* the primary of several recovery bios */
1449			sector_t s = r10_bio->sectors;
1450			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1451			    test_bit(R10BIO_WriteError, &r10_bio->state))
1452				reschedule_retry(r10_bio);
1453			else
1454				put_buf(r10_bio);
1455			md_done_sync(mddev, s, 1);
1456			break;
1457		} else {
1458			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1459			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1460			    test_bit(R10BIO_WriteError, &r10_bio->state))
1461				reschedule_retry(r10_bio);
1462			else
1463				put_buf(r10_bio);
1464			r10_bio = r10_bio2;
1465		}
1466	}
1467}
1468
1469static void end_sync_write(struct bio *bio, int error)
1470{
1471	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1472	r10bio_t *r10_bio = bio->bi_private;
1473	mddev_t *mddev = r10_bio->mddev;
1474	conf_t *conf = mddev->private;
1475	int d;
1476	sector_t first_bad;
1477	int bad_sectors;
1478	int slot;
 
 
1479
1480	d = find_bio_disk(conf, r10_bio, bio, &slot);
 
 
 
 
1481
1482	if (!uptodate) {
1483		set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
1484		set_bit(R10BIO_WriteError, &r10_bio->state);
1485	} else if (is_badblock(conf->mirrors[d].rdev,
 
 
 
 
 
 
 
1486			     r10_bio->devs[slot].addr,
1487			     r10_bio->sectors,
1488			     &first_bad, &bad_sectors))
1489		set_bit(R10BIO_MadeGood, &r10_bio->state);
1490
1491	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1492
1493	end_sync_request(r10_bio);
1494}
1495
1496/*
1497 * Note: sync and recover and handled very differently for raid10
1498 * This code is for resync.
1499 * For resync, we read through virtual addresses and read all blocks.
1500 * If there is any error, we schedule a write.  The lowest numbered
1501 * drive is authoritative.
1502 * However requests come for physical address, so we need to map.
1503 * For every physical address there are raid_disks/copies virtual addresses,
1504 * which is always are least one, but is not necessarly an integer.
1505 * This means that a physical address can span multiple chunks, so we may
1506 * have to submit multiple io requests for a single sync request.
1507 */
1508/*
1509 * We check if all blocks are in-sync and only write to blocks that
1510 * aren't in sync
1511 */
1512static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1513{
1514	conf_t *conf = mddev->private;
1515	int i, first;
1516	struct bio *tbio, *fbio;
 
1517
1518	atomic_set(&r10_bio->remaining, 1);
1519
1520	/* find the first device with a block */
1521	for (i=0; i<conf->copies; i++)
1522		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1523			break;
1524
1525	if (i == conf->copies)
1526		goto done;
1527
1528	first = i;
1529	fbio = r10_bio->devs[i].bio;
 
 
1530
 
1531	/* now find blocks with errors */
1532	for (i=0 ; i < conf->copies ; i++) {
1533		int  j, d;
1534		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1535
1536		tbio = r10_bio->devs[i].bio;
1537
1538		if (tbio->bi_end_io != end_sync_read)
1539			continue;
1540		if (i == first)
1541			continue;
1542		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
 
 
1543			/* We know that the bi_io_vec layout is the same for
1544			 * both 'first' and 'i', so we just compare them.
1545			 * All vec entries are PAGE_SIZE;
1546			 */
1547			for (j = 0; j < vcnt; j++)
 
 
 
 
1548				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1549					   page_address(tbio->bi_io_vec[j].bv_page),
1550					   PAGE_SIZE))
1551					break;
 
 
1552			if (j == vcnt)
1553				continue;
1554			mddev->resync_mismatches += r10_bio->sectors;
1555			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1556				/* Don't fix anything. */
1557				continue;
 
 
 
 
1558		}
1559		/* Ok, we need to write this bio, either to correct an
1560		 * inconsistency or to correct an unreadable block.
1561		 * First we need to fixup bv_offset, bv_len and
1562		 * bi_vecs, as the read request might have corrupted these
1563		 */
 
 
1564		tbio->bi_vcnt = vcnt;
1565		tbio->bi_size = r10_bio->sectors << 9;
1566		tbio->bi_idx = 0;
1567		tbio->bi_phys_segments = 0;
1568		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1569		tbio->bi_flags |= 1 << BIO_UPTODATE;
1570		tbio->bi_next = NULL;
1571		tbio->bi_rw = WRITE;
1572		tbio->bi_private = r10_bio;
1573		tbio->bi_sector = r10_bio->devs[i].addr;
1574
1575		for (j=0; j < vcnt ; j++) {
1576			tbio->bi_io_vec[j].bv_offset = 0;
1577			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1578
1579			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1580			       page_address(fbio->bi_io_vec[j].bv_page),
1581			       PAGE_SIZE);
1582		}
1583		tbio->bi_end_io = end_sync_write;
 
 
 
1584
1585		d = r10_bio->devs[i].devnum;
1586		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1587		atomic_inc(&r10_bio->remaining);
1588		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1589
1590		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
 
 
1591		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1592		generic_make_request(tbio);
1593	}
1594
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1595done:
1596	if (atomic_dec_and_test(&r10_bio->remaining)) {
1597		md_done_sync(mddev, r10_bio->sectors, 1);
1598		put_buf(r10_bio);
1599	}
1600}
1601
1602/*
1603 * Now for the recovery code.
1604 * Recovery happens across physical sectors.
1605 * We recover all non-is_sync drives by finding the virtual address of
1606 * each, and then choose a working drive that also has that virt address.
1607 * There is a separate r10_bio for each non-in_sync drive.
1608 * Only the first two slots are in use. The first for reading,
1609 * The second for writing.
1610 *
1611 */
1612static void fix_recovery_read_error(r10bio_t *r10_bio)
1613{
1614	/* We got a read error during recovery.
1615	 * We repeat the read in smaller page-sized sections.
1616	 * If a read succeeds, write it to the new device or record
1617	 * a bad block if we cannot.
1618	 * If a read fails, record a bad block on both old and
1619	 * new devices.
1620	 */
1621	mddev_t *mddev = r10_bio->mddev;
1622	conf_t *conf = mddev->private;
1623	struct bio *bio = r10_bio->devs[0].bio;
1624	sector_t sect = 0;
1625	int sectors = r10_bio->sectors;
1626	int idx = 0;
1627	int dr = r10_bio->devs[0].devnum;
1628	int dw = r10_bio->devs[1].devnum;
1629
1630	while (sectors) {
1631		int s = sectors;
1632		mdk_rdev_t *rdev;
1633		sector_t addr;
1634		int ok;
1635
1636		if (s > (PAGE_SIZE>>9))
1637			s = PAGE_SIZE >> 9;
1638
1639		rdev = conf->mirrors[dr].rdev;
1640		addr = r10_bio->devs[0].addr + sect,
1641		ok = sync_page_io(rdev,
1642				  addr,
1643				  s << 9,
1644				  bio->bi_io_vec[idx].bv_page,
1645				  READ, false);
1646		if (ok) {
1647			rdev = conf->mirrors[dw].rdev;
1648			addr = r10_bio->devs[1].addr + sect;
1649			ok = sync_page_io(rdev,
1650					  addr,
1651					  s << 9,
1652					  bio->bi_io_vec[idx].bv_page,
1653					  WRITE, false);
1654			if (!ok)
1655				set_bit(WriteErrorSeen, &rdev->flags);
 
 
 
 
 
1656		}
1657		if (!ok) {
1658			/* We don't worry if we cannot set a bad block -
1659			 * it really is bad so there is no loss in not
1660			 * recording it yet
1661			 */
1662			rdev_set_badblocks(rdev, addr, s, 0);
1663
1664			if (rdev != conf->mirrors[dw].rdev) {
1665				/* need bad block on destination too */
1666				mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev;
1667				addr = r10_bio->devs[1].addr + sect;
1668				ok = rdev_set_badblocks(rdev2, addr, s, 0);
1669				if (!ok) {
1670					/* just abort the recovery */
1671					printk(KERN_NOTICE
1672					       "md/raid10:%s: recovery aborted"
1673					       " due to read error\n",
1674					       mdname(mddev));
1675
1676					conf->mirrors[dw].recovery_disabled
1677						= mddev->recovery_disabled;
1678					set_bit(MD_RECOVERY_INTR,
1679						&mddev->recovery);
1680					break;
1681				}
1682			}
1683		}
1684
1685		sectors -= s;
1686		sect += s;
1687		idx++;
1688	}
1689}
1690
1691static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1692{
1693	conf_t *conf = mddev->private;
1694	int d;
1695	struct bio *wbio;
1696
1697	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
1698		fix_recovery_read_error(r10_bio);
1699		end_sync_request(r10_bio);
1700		return;
1701	}
1702
1703	/*
1704	 * share the pages with the first bio
1705	 * and submit the write request
1706	 */
1707	wbio = r10_bio->devs[1].bio;
1708	d = r10_bio->devs[1].devnum;
1709
1710	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1711	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1712	generic_make_request(wbio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1713}
1714
1715
1716/*
1717 * Used by fix_read_error() to decay the per rdev read_errors.
1718 * We halve the read error count for every hour that has elapsed
1719 * since the last recorded read error.
1720 *
1721 */
1722static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
1723{
1724	struct timespec cur_time_mon;
1725	unsigned long hours_since_last;
1726	unsigned int read_errors = atomic_read(&rdev->read_errors);
1727
1728	ktime_get_ts(&cur_time_mon);
1729
1730	if (rdev->last_read_error.tv_sec == 0 &&
1731	    rdev->last_read_error.tv_nsec == 0) {
1732		/* first time we've seen a read error */
1733		rdev->last_read_error = cur_time_mon;
1734		return;
1735	}
1736
1737	hours_since_last = (cur_time_mon.tv_sec -
1738			    rdev->last_read_error.tv_sec) / 3600;
1739
1740	rdev->last_read_error = cur_time_mon;
1741
1742	/*
1743	 * if hours_since_last is > the number of bits in read_errors
1744	 * just set read errors to 0. We do this to avoid
1745	 * overflowing the shift of read_errors by hours_since_last.
1746	 */
1747	if (hours_since_last >= 8 * sizeof(read_errors))
1748		atomic_set(&rdev->read_errors, 0);
1749	else
1750		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
1751}
1752
1753static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
1754			    int sectors, struct page *page, int rw)
1755{
1756	sector_t first_bad;
1757	int bad_sectors;
1758
1759	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
1760	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
1761		return -1;
1762	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1763		/* success */
1764		return 1;
1765	if (rw == WRITE)
1766		set_bit(WriteErrorSeen, &rdev->flags);
 
 
 
 
1767	/* need to record an error - either for the block or the device */
1768	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1769		md_error(rdev->mddev, rdev);
1770	return 0;
1771}
1772
1773/*
1774 * This is a kernel thread which:
1775 *
1776 *	1.	Retries failed read operations on working mirrors.
1777 *	2.	Updates the raid superblock when problems encounter.
1778 *	3.	Performs writes following reads for array synchronising.
1779 */
1780
1781static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1782{
1783	int sect = 0; /* Offset from r10_bio->sector */
1784	int sectors = r10_bio->sectors;
1785	mdk_rdev_t*rdev;
1786	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
1787	int d = r10_bio->devs[r10_bio->read_slot].devnum;
1788
1789	/* still own a reference to this rdev, so it cannot
1790	 * have been cleared recently.
1791	 */
1792	rdev = conf->mirrors[d].rdev;
1793
1794	if (test_bit(Faulty, &rdev->flags))
1795		/* drive has already been failed, just ignore any
1796		   more fix_read_error() attempts */
1797		return;
1798
1799	check_decay_read_errors(mddev, rdev);
1800	atomic_inc(&rdev->read_errors);
1801	if (atomic_read(&rdev->read_errors) > max_read_errors) {
1802		char b[BDEVNAME_SIZE];
1803		bdevname(rdev->bdev, b);
1804
1805		printk(KERN_NOTICE
1806		       "md/raid10:%s: %s: Raid device exceeded "
1807		       "read_error threshold [cur %d:max %d]\n",
1808		       mdname(mddev), b,
1809		       atomic_read(&rdev->read_errors), max_read_errors);
1810		printk(KERN_NOTICE
1811		       "md/raid10:%s: %s: Failing raid device\n",
1812		       mdname(mddev), b);
1813		md_error(mddev, conf->mirrors[d].rdev);
1814		return;
1815	}
1816
1817	while(sectors) {
1818		int s = sectors;
1819		int sl = r10_bio->read_slot;
1820		int success = 0;
1821		int start;
1822
1823		if (s > (PAGE_SIZE>>9))
1824			s = PAGE_SIZE >> 9;
1825
1826		rcu_read_lock();
1827		do {
1828			sector_t first_bad;
1829			int bad_sectors;
1830
1831			d = r10_bio->devs[sl].devnum;
1832			rdev = rcu_dereference(conf->mirrors[d].rdev);
1833			if (rdev &&
1834			    test_bit(In_sync, &rdev->flags) &&
 
1835			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
1836					&first_bad, &bad_sectors) == 0) {
1837				atomic_inc(&rdev->nr_pending);
1838				rcu_read_unlock();
1839				success = sync_page_io(rdev,
1840						       r10_bio->devs[sl].addr +
1841						       sect,
1842						       s<<9,
1843						       conf->tmppage, READ, false);
 
1844				rdev_dec_pending(rdev, mddev);
1845				rcu_read_lock();
1846				if (success)
1847					break;
1848			}
1849			sl++;
1850			if (sl == conf->copies)
1851				sl = 0;
1852		} while (!success && sl != r10_bio->read_slot);
1853		rcu_read_unlock();
1854
1855		if (!success) {
1856			/* Cannot read from anywhere, just mark the block
1857			 * as bad on the first device to discourage future
1858			 * reads.
1859			 */
1860			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1861			rdev = conf->mirrors[dn].rdev;
1862
1863			if (!rdev_set_badblocks(
1864				    rdev,
1865				    r10_bio->devs[r10_bio->read_slot].addr
1866				    + sect,
1867				    s, 0))
1868				md_error(mddev, rdev);
 
 
 
1869			break;
1870		}
1871
1872		start = sl;
1873		/* write it back and re-read */
1874		rcu_read_lock();
1875		while (sl != r10_bio->read_slot) {
1876			char b[BDEVNAME_SIZE];
1877
1878			if (sl==0)
1879				sl = conf->copies;
1880			sl--;
1881			d = r10_bio->devs[sl].devnum;
1882			rdev = rcu_dereference(conf->mirrors[d].rdev);
1883			if (!rdev ||
 
1884			    !test_bit(In_sync, &rdev->flags))
1885				continue;
1886
1887			atomic_inc(&rdev->nr_pending);
1888			rcu_read_unlock();
1889			if (r10_sync_page_io(rdev,
1890					     r10_bio->devs[sl].addr +
1891					     sect,
1892					     s<<9, conf->tmppage, WRITE)
1893			    == 0) {
1894				/* Well, this device is dead */
1895				printk(KERN_NOTICE
1896				       "md/raid10:%s: read correction "
1897				       "write failed"
1898				       " (%d sectors at %llu on %s)\n",
1899				       mdname(mddev), s,
1900				       (unsigned long long)(
1901					       sect + rdev->data_offset),
1902				       bdevname(rdev->bdev, b));
1903				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1904				       "drive\n",
1905				       mdname(mddev),
1906				       bdevname(rdev->bdev, b));
1907			}
1908			rdev_dec_pending(rdev, mddev);
1909			rcu_read_lock();
1910		}
1911		sl = start;
1912		while (sl != r10_bio->read_slot) {
1913			char b[BDEVNAME_SIZE];
1914
1915			if (sl==0)
1916				sl = conf->copies;
1917			sl--;
1918			d = r10_bio->devs[sl].devnum;
1919			rdev = rcu_dereference(conf->mirrors[d].rdev);
1920			if (!rdev ||
 
1921			    !test_bit(In_sync, &rdev->flags))
1922				continue;
1923
1924			atomic_inc(&rdev->nr_pending);
1925			rcu_read_unlock();
1926			switch (r10_sync_page_io(rdev,
1927					     r10_bio->devs[sl].addr +
1928					     sect,
1929					     s<<9, conf->tmppage,
1930						 READ)) {
1931			case 0:
1932				/* Well, this device is dead */
1933				printk(KERN_NOTICE
1934				       "md/raid10:%s: unable to read back "
1935				       "corrected sectors"
1936				       " (%d sectors at %llu on %s)\n",
1937				       mdname(mddev), s,
1938				       (unsigned long long)(
1939					       sect + rdev->data_offset),
 
1940				       bdevname(rdev->bdev, b));
1941				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
1942				       "drive\n",
1943				       mdname(mddev),
1944				       bdevname(rdev->bdev, b));
1945				break;
1946			case 1:
1947				printk(KERN_INFO
1948				       "md/raid10:%s: read error corrected"
1949				       " (%d sectors at %llu on %s)\n",
1950				       mdname(mddev), s,
1951				       (unsigned long long)(
1952					       sect + rdev->data_offset),
 
1953				       bdevname(rdev->bdev, b));
1954				atomic_add(s, &rdev->corrected_errors);
1955			}
1956
1957			rdev_dec_pending(rdev, mddev);
1958			rcu_read_lock();
1959		}
1960		rcu_read_unlock();
1961
1962		sectors -= s;
1963		sect += s;
1964	}
1965}
1966
1967static void bi_complete(struct bio *bio, int error)
1968{
1969	complete((struct completion *)bio->bi_private);
1970}
1971
1972static int submit_bio_wait(int rw, struct bio *bio)
1973{
1974	struct completion event;
1975	rw |= REQ_SYNC;
1976
1977	init_completion(&event);
1978	bio->bi_private = &event;
1979	bio->bi_end_io = bi_complete;
1980	submit_bio(rw, bio);
1981	wait_for_completion(&event);
1982
1983	return test_bit(BIO_UPTODATE, &bio->bi_flags);
1984}
1985
1986static int narrow_write_error(r10bio_t *r10_bio, int i)
1987{
1988	struct bio *bio = r10_bio->master_bio;
1989	mddev_t *mddev = r10_bio->mddev;
1990	conf_t *conf = mddev->private;
1991	mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
1992	/* bio has the data to be written to slot 'i' where
1993	 * we just recently had a write error.
1994	 * We repeatedly clone the bio and trim down to one block,
1995	 * then try the write.  Where the write fails we record
1996	 * a bad block.
1997	 * It is conceivable that the bio doesn't exactly align with
1998	 * blocks.  We must handle this.
1999	 *
2000	 * We currently own a reference to the rdev.
2001	 */
2002
2003	int block_sectors;
2004	sector_t sector;
2005	int sectors;
2006	int sect_to_write = r10_bio->sectors;
2007	int ok = 1;
2008
2009	if (rdev->badblocks.shift < 0)
2010		return 0;
2011
2012	block_sectors = 1 << rdev->badblocks.shift;
 
2013	sector = r10_bio->sector;
2014	sectors = ((r10_bio->sector + block_sectors)
2015		   & ~(sector_t)(block_sectors - 1))
2016		- sector;
2017
2018	while (sect_to_write) {
2019		struct bio *wbio;
 
2020		if (sectors > sect_to_write)
2021			sectors = sect_to_write;
2022		/* Write at 'sector' for 'sectors' */
2023		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2024		md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2025		wbio->bi_sector = (r10_bio->devs[i].addr+
2026				   rdev->data_offset+
2027				   (sector - r10_bio->sector));
2028		wbio->bi_bdev = rdev->bdev;
2029		if (submit_bio_wait(WRITE, wbio) == 0)
 
 
2030			/* Failure! */
2031			ok = rdev_set_badblocks(rdev, sector,
2032						sectors, 0)
2033				&& ok;
2034
2035		bio_put(wbio);
2036		sect_to_write -= sectors;
2037		sector += sectors;
2038		sectors = block_sectors;
2039	}
2040	return ok;
2041}
2042
2043static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
2044{
2045	int slot = r10_bio->read_slot;
2046	int mirror = r10_bio->devs[slot].devnum;
2047	struct bio *bio;
2048	conf_t *conf = mddev->private;
2049	mdk_rdev_t *rdev;
2050	char b[BDEVNAME_SIZE];
2051	unsigned long do_sync;
2052	int max_sectors;
 
 
2053
2054	/* we got a read error. Maybe the drive is bad.  Maybe just
2055	 * the block and we can fix it.
2056	 * We freeze all other IO, and try reading the block from
2057	 * other devices.  When we find one, we re-write
2058	 * and check it that fixes the read error.
2059	 * This is all done synchronously while the array is
2060	 * frozen.
2061	 */
2062	if (mddev->ro == 0) {
2063		freeze_array(conf);
 
 
 
 
 
 
 
 
 
2064		fix_read_error(conf, mddev, r10_bio);
2065		unfreeze_array(conf);
2066	}
2067	rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
 
 
2068
2069	bio = r10_bio->devs[slot].bio;
2070	bdevname(bio->bi_bdev, b);
2071	r10_bio->devs[slot].bio =
2072		mddev->ro ? IO_BLOCKED : NULL;
2073read_more:
2074	mirror = read_balance(conf, r10_bio, &max_sectors);
2075	if (mirror == -1) {
2076		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2077		       " read error for block %llu\n",
2078		       mdname(mddev), b,
2079		       (unsigned long long)r10_bio->sector);
2080		raid_end_bio_io(r10_bio);
2081		bio_put(bio);
2082		return;
2083	}
2084
2085	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2086	if (bio)
2087		bio_put(bio);
2088	slot = r10_bio->read_slot;
2089	rdev = conf->mirrors[mirror].rdev;
2090	printk_ratelimited(
2091		KERN_ERR
2092		"md/raid10:%s: %s: redirecting"
2093		"sector %llu to another mirror\n",
2094		mdname(mddev),
2095		bdevname(rdev->bdev, b),
2096		(unsigned long long)r10_bio->sector);
2097	bio = bio_clone_mddev(r10_bio->master_bio,
2098			      GFP_NOIO, mddev);
2099	md_trim_bio(bio,
2100		    r10_bio->sector - bio->bi_sector,
2101		    max_sectors);
2102	r10_bio->devs[slot].bio = bio;
2103	bio->bi_sector = r10_bio->devs[slot].addr
2104		+ rdev->data_offset;
 
2105	bio->bi_bdev = rdev->bdev;
2106	bio->bi_rw = READ | do_sync;
 
 
 
2107	bio->bi_private = r10_bio;
2108	bio->bi_end_io = raid10_end_read_request;
 
 
 
 
2109	if (max_sectors < r10_bio->sectors) {
2110		/* Drat - have to split this up more */
2111		struct bio *mbio = r10_bio->master_bio;
2112		int sectors_handled =
2113			r10_bio->sector + max_sectors
2114			- mbio->bi_sector;
2115		r10_bio->sectors = max_sectors;
2116		spin_lock_irq(&conf->device_lock);
2117		if (mbio->bi_phys_segments == 0)
2118			mbio->bi_phys_segments = 2;
2119		else
2120			mbio->bi_phys_segments++;
2121		spin_unlock_irq(&conf->device_lock);
2122		generic_make_request(bio);
2123		bio = NULL;
2124
2125		r10_bio = mempool_alloc(conf->r10bio_pool,
2126					GFP_NOIO);
2127		r10_bio->master_bio = mbio;
2128		r10_bio->sectors = (mbio->bi_size >> 9)
2129			- sectors_handled;
2130		r10_bio->state = 0;
2131		set_bit(R10BIO_ReadError,
2132			&r10_bio->state);
2133		r10_bio->mddev = mddev;
2134		r10_bio->sector = mbio->bi_sector
2135			+ sectors_handled;
2136
2137		goto read_more;
2138	} else
2139		generic_make_request(bio);
2140}
2141
2142static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
2143{
2144	/* Some sort of write request has finished and it
2145	 * succeeded in writing where we thought there was a
2146	 * bad block.  So forget the bad block.
2147	 * Or possibly if failed and we need to record
2148	 * a bad block.
2149	 */
2150	int m;
2151	mdk_rdev_t *rdev;
2152
2153	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2154	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2155		for (m = 0; m < conf->copies; m++) {
2156			int dev = r10_bio->devs[m].devnum;
2157			rdev = conf->mirrors[dev].rdev;
2158			if (r10_bio->devs[m].bio == NULL)
2159				continue;
2160			if (test_bit(BIO_UPTODATE,
2161				     &r10_bio->devs[m].bio->bi_flags)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2162				rdev_clear_badblocks(
2163					rdev,
2164					r10_bio->devs[m].addr,
2165					r10_bio->sectors);
2166			} else {
2167				if (!rdev_set_badblocks(
2168					    rdev,
2169					    r10_bio->devs[m].addr,
2170					    r10_bio->sectors, 0))
2171					md_error(conf->mddev, rdev);
2172			}
2173		}
2174		put_buf(r10_bio);
2175	} else {
 
2176		for (m = 0; m < conf->copies; m++) {
2177			int dev = r10_bio->devs[m].devnum;
2178			struct bio *bio = r10_bio->devs[m].bio;
2179			rdev = conf->mirrors[dev].rdev;
2180			if (bio == IO_MADE_GOOD) {
2181				rdev_clear_badblocks(
2182					rdev,
2183					r10_bio->devs[m].addr,
2184					r10_bio->sectors);
2185				rdev_dec_pending(rdev, conf->mddev);
2186			} else if (bio != NULL &&
2187				   !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2188				if (!narrow_write_error(r10_bio, m)) {
2189					md_error(conf->mddev, rdev);
2190					set_bit(R10BIO_Degraded,
2191						&r10_bio->state);
2192				}
2193				rdev_dec_pending(rdev, conf->mddev);
2194			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2195		}
2196		if (test_bit(R10BIO_WriteError,
2197			     &r10_bio->state))
2198			close_write(r10_bio);
2199		raid_end_bio_io(r10_bio);
2200	}
2201}
2202
2203static void raid10d(mddev_t *mddev)
2204{
2205	r10bio_t *r10_bio;
 
2206	unsigned long flags;
2207	conf_t *conf = mddev->private;
2208	struct list_head *head = &conf->retry_list;
2209	struct blk_plug plug;
2210
2211	md_check_recovery(mddev);
2212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2213	blk_start_plug(&plug);
2214	for (;;) {
2215
2216		flush_pending_writes(conf);
2217
2218		spin_lock_irqsave(&conf->device_lock, flags);
2219		if (list_empty(head)) {
2220			spin_unlock_irqrestore(&conf->device_lock, flags);
2221			break;
2222		}
2223		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
2224		list_del(head->prev);
2225		conf->nr_queued--;
2226		spin_unlock_irqrestore(&conf->device_lock, flags);
2227
2228		mddev = r10_bio->mddev;
2229		conf = mddev->private;
2230		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2231		    test_bit(R10BIO_WriteError, &r10_bio->state))
2232			handle_write_completed(conf, r10_bio);
 
 
2233		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2234			sync_request_write(mddev, r10_bio);
2235		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2236			recovery_request_write(mddev, r10_bio);
2237		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2238			handle_read_error(mddev, r10_bio);
2239		else {
2240			/* just a partial read to be scheduled from a
2241			 * separate context
2242			 */
2243			int slot = r10_bio->read_slot;
2244			generic_make_request(r10_bio->devs[slot].bio);
2245		}
2246
2247		cond_resched();
2248		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2249			md_check_recovery(mddev);
2250	}
2251	blk_finish_plug(&plug);
2252}
2253
2254
2255static int init_resync(conf_t *conf)
2256{
2257	int buffs;
 
2258
2259	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2260	BUG_ON(conf->r10buf_pool);
 
 
 
 
2261	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2262	if (!conf->r10buf_pool)
2263		return -ENOMEM;
2264	conf->next_resync = 0;
2265	return 0;
2266}
2267
2268/*
2269 * perform a "sync" on one "block"
2270 *
2271 * We need to make sure that no normal I/O request - particularly write
2272 * requests - conflict with active sync requests.
2273 *
2274 * This is achieved by tracking pending requests and a 'barrier' concept
2275 * that can be installed to exclude normal IO requests.
2276 *
2277 * Resync and recovery are handled very differently.
2278 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2279 *
2280 * For resync, we iterate over virtual addresses, read all copies,
2281 * and update if there are differences.  If only one copy is live,
2282 * skip it.
2283 * For recovery, we iterate over physical addresses, read a good
2284 * value for each non-in_sync drive, and over-write.
2285 *
2286 * So, for recovery we may have several outstanding complex requests for a
2287 * given address, one for each out-of-sync device.  We model this by allocating
2288 * a number of r10_bio structures, one for each out-of-sync device.
2289 * As we setup these structures, we collect all bio's together into a list
2290 * which we then process collectively to add pages, and then process again
2291 * to pass to generic_make_request.
2292 *
2293 * The r10_bio structures are linked using a borrowed master_bio pointer.
2294 * This link is counted in ->remaining.  When the r10_bio that points to NULL
2295 * has its remaining count decremented to 0, the whole complex operation
2296 * is complete.
2297 *
2298 */
2299
2300static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
2301			     int *skipped, int go_faster)
2302{
2303	conf_t *conf = mddev->private;
2304	r10bio_t *r10_bio;
2305	struct bio *biolist = NULL, *bio;
2306	sector_t max_sector, nr_sectors;
2307	int i;
2308	int max_sync;
2309	sector_t sync_blocks;
2310	sector_t sectors_skipped = 0;
2311	int chunks_skipped = 0;
 
2312
2313	if (!conf->r10buf_pool)
2314		if (init_resync(conf))
2315			return 0;
2316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2317 skipped:
2318	max_sector = mddev->dev_sectors;
2319	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
 
2320		max_sector = mddev->resync_max_sectors;
2321	if (sector_nr >= max_sector) {
2322		/* If we aborted, we need to abort the
2323		 * sync on the 'current' bitmap chucks (there can
2324		 * be several when recovering multiple devices).
2325		 * as we may have started syncing it but not finished.
2326		 * We can find the current address in
2327		 * mddev->curr_resync, but for recovery,
2328		 * we need to convert that to several
2329		 * virtual addresses.
2330		 */
 
 
 
 
 
 
2331		if (mddev->curr_resync < max_sector) { /* aborted */
2332			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2333				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2334						&sync_blocks, 1);
2335			else for (i=0; i<conf->raid_disks; i++) {
2336				sector_t sect =
2337					raid10_find_virt(conf, mddev->curr_resync, i);
2338				bitmap_end_sync(mddev->bitmap, sect,
2339						&sync_blocks, 1);
2340			}
2341		} else /* completed sync */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2342			conf->fullsync = 0;
2343
2344		bitmap_close_sync(mddev->bitmap);
2345		close_sync(conf);
2346		*skipped = 1;
2347		return sectors_skipped;
2348	}
2349	if (chunks_skipped >= conf->raid_disks) {
 
 
 
 
2350		/* if there has been nothing to do on any drive,
2351		 * then there is nothing to do at all..
2352		 */
2353		*skipped = 1;
2354		return (max_sector - sector_nr) + sectors_skipped;
2355	}
2356
2357	if (max_sector > mddev->resync_max)
2358		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2359
2360	/* make sure whole request will fit in a chunk - if chunks
2361	 * are meaningful
2362	 */
2363	if (conf->near_copies < conf->raid_disks &&
2364	    max_sector > (sector_nr | conf->chunk_mask))
2365		max_sector = (sector_nr | conf->chunk_mask) + 1;
 
2366	/*
2367	 * If there is non-resync activity waiting for us then
2368	 * put in a delay to throttle resync.
2369	 */
2370	if (!go_faster && conf->nr_waiting)
2371		msleep_interruptible(1000);
2372
2373	/* Again, very different code for resync and recovery.
2374	 * Both must result in an r10bio with a list of bios that
2375	 * have bi_end_io, bi_sector, bi_bdev set,
2376	 * and bi_private set to the r10bio.
2377	 * For recovery, we may actually create several r10bios
2378	 * with 2 bios in each, that correspond to the bios in the main one.
2379	 * In this case, the subordinate r10bios link back through a
2380	 * borrowed master_bio pointer, and the counter in the master
2381	 * includes a ref from each subordinate.
2382	 */
2383	/* First, we decide what to do and set ->bi_end_io
2384	 * To end_sync_read if we want to read, and
2385	 * end_sync_write if we will want to write.
2386	 */
2387
2388	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
2389	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2390		/* recovery... the complicated one */
2391		int j;
2392		r10_bio = NULL;
2393
2394		for (i=0 ; i<conf->raid_disks; i++) {
2395			int still_degraded;
2396			r10bio_t *rb2;
2397			sector_t sect;
2398			int must_sync;
2399			int any_working;
 
 
2400
2401			if (conf->mirrors[i].rdev == NULL ||
2402			    test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 
 
 
 
 
 
 
 
 
2403				continue;
 
2404
2405			still_degraded = 0;
2406			/* want to reconstruct this device */
2407			rb2 = r10_bio;
2408			sect = raid10_find_virt(conf, sector_nr, i);
2409			/* Unless we are doing a full sync, we only need
2410			 * to recover the block if it is set in the bitmap
 
 
 
 
 
 
 
 
 
 
2411			 */
2412			must_sync = bitmap_start_sync(mddev->bitmap, sect,
2413						      &sync_blocks, 1);
2414			if (sync_blocks < max_sync)
2415				max_sync = sync_blocks;
2416			if (!must_sync &&
 
2417			    !conf->fullsync) {
2418				/* yep, skip the sync_blocks here, but don't assume
2419				 * that there will never be anything to do here
2420				 */
2421				chunks_skipped = -1;
 
2422				continue;
2423			}
 
 
 
 
2424
2425			r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
 
2426			raise_barrier(conf, rb2 != NULL);
2427			atomic_set(&r10_bio->remaining, 0);
2428
2429			r10_bio->master_bio = (struct bio*)rb2;
2430			if (rb2)
2431				atomic_inc(&rb2->remaining);
2432			r10_bio->mddev = mddev;
2433			set_bit(R10BIO_IsRecover, &r10_bio->state);
2434			r10_bio->sector = sect;
2435
2436			raid10_find_phys(conf, r10_bio);
2437
2438			/* Need to check if the array will still be
2439			 * degraded
2440			 */
2441			for (j=0; j<conf->raid_disks; j++)
2442				if (conf->mirrors[j].rdev == NULL ||
2443				    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
 
 
2444					still_degraded = 1;
2445					break;
2446				}
 
2447
2448			must_sync = bitmap_start_sync(mddev->bitmap, sect,
2449						      &sync_blocks, still_degraded);
2450
2451			any_working = 0;
2452			for (j=0; j<conf->copies;j++) {
2453				int k;
2454				int d = r10_bio->devs[j].devnum;
2455				sector_t from_addr, to_addr;
2456				mdk_rdev_t *rdev;
 
2457				sector_t sector, first_bad;
2458				int bad_sectors;
2459				if (!conf->mirrors[d].rdev ||
2460				    !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
2461					continue;
2462				/* This is where we read from */
2463				any_working = 1;
2464				rdev = conf->mirrors[d].rdev;
2465				sector = r10_bio->devs[j].addr;
2466
2467				if (is_badblock(rdev, sector, max_sync,
2468						&first_bad, &bad_sectors)) {
2469					if (first_bad > sector)
2470						max_sync = first_bad - sector;
2471					else {
2472						bad_sectors -= (sector
2473								- first_bad);
2474						if (max_sync > bad_sectors)
2475							max_sync = bad_sectors;
2476						continue;
2477					}
2478				}
2479				bio = r10_bio->devs[0].bio;
 
2480				bio->bi_next = biolist;
2481				biolist = bio;
2482				bio->bi_private = r10_bio;
2483				bio->bi_end_io = end_sync_read;
2484				bio->bi_rw = READ;
 
 
2485				from_addr = r10_bio->devs[j].addr;
2486				bio->bi_sector = from_addr +
2487					conf->mirrors[d].rdev->data_offset;
2488				bio->bi_bdev = conf->mirrors[d].rdev->bdev;
2489				atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2490				atomic_inc(&r10_bio->remaining);
2491				/* and we write to 'i' */
2492
2493				for (k=0; k<conf->copies; k++)
2494					if (r10_bio->devs[k].devnum == i)
2495						break;
2496				BUG_ON(k == conf->copies);
2497				bio = r10_bio->devs[1].bio;
2498				bio->bi_next = biolist;
2499				biolist = bio;
2500				bio->bi_private = r10_bio;
2501				bio->bi_end_io = end_sync_write;
2502				bio->bi_rw = WRITE;
2503				to_addr = r10_bio->devs[k].addr;
2504				bio->bi_sector = to_addr +
2505					conf->mirrors[i].rdev->data_offset;
2506				bio->bi_bdev = conf->mirrors[i].rdev->bdev;
2507
2508				r10_bio->devs[0].devnum = d;
2509				r10_bio->devs[0].addr = from_addr;
2510				r10_bio->devs[1].devnum = i;
2511				r10_bio->devs[1].addr = to_addr;
2512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2513				break;
2514			}
 
2515			if (j == conf->copies) {
2516				/* Cannot recover, so abort the recovery or
2517				 * record a bad block */
2518				put_buf(r10_bio);
2519				if (rb2)
2520					atomic_dec(&rb2->remaining);
2521				r10_bio = rb2;
2522				if (any_working) {
2523					/* problem is that there are bad blocks
2524					 * on other device(s)
2525					 */
2526					int k;
2527					for (k = 0; k < conf->copies; k++)
2528						if (r10_bio->devs[k].devnum == i)
2529							break;
2530					if (!rdev_set_badblocks(
2531						    conf->mirrors[i].rdev,
 
 
 
 
 
 
 
 
2532						    r10_bio->devs[k].addr,
2533						    max_sync, 0))
2534						any_working = 0;
2535				}
2536				if (!any_working)  {
2537					if (!test_and_set_bit(MD_RECOVERY_INTR,
2538							      &mddev->recovery))
2539						printk(KERN_INFO "md/raid10:%s: insufficient "
2540						       "working devices for recovery.\n",
2541						       mdname(mddev));
2542					conf->mirrors[i].recovery_disabled
2543						= mddev->recovery_disabled;
2544				}
 
 
 
 
 
 
 
2545				break;
2546			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2547		}
2548		if (biolist == NULL) {
2549			while (r10_bio) {
2550				r10bio_t *rb2 = r10_bio;
2551				r10_bio = (r10bio_t*) rb2->master_bio;
2552				rb2->master_bio = NULL;
2553				put_buf(rb2);
2554			}
2555			goto giveup;
2556		}
2557	} else {
2558		/* resync. Schedule a read for every block at this virt offset */
2559		int count = 0;
2560
2561		bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2562
2563		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2564				       &sync_blocks, mddev->degraded) &&
2565		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
2566						 &mddev->recovery)) {
2567			/* We can skip this block */
2568			*skipped = 1;
2569			return sync_blocks + sectors_skipped;
2570		}
2571		if (sync_blocks < max_sync)
2572			max_sync = sync_blocks;
2573		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
 
2574
2575		r10_bio->mddev = mddev;
2576		atomic_set(&r10_bio->remaining, 0);
2577		raise_barrier(conf, 0);
2578		conf->next_resync = sector_nr;
2579
2580		r10_bio->master_bio = NULL;
2581		r10_bio->sector = sector_nr;
2582		set_bit(R10BIO_IsSync, &r10_bio->state);
2583		raid10_find_phys(conf, r10_bio);
2584		r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
2585
2586		for (i=0; i<conf->copies; i++) {
2587			int d = r10_bio->devs[i].devnum;
2588			sector_t first_bad, sector;
2589			int bad_sectors;
 
 
 
 
2590
2591			bio = r10_bio->devs[i].bio;
2592			bio->bi_end_io = NULL;
2593			clear_bit(BIO_UPTODATE, &bio->bi_flags);
2594			if (conf->mirrors[d].rdev == NULL ||
2595			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
 
 
2596				continue;
 
2597			sector = r10_bio->devs[i].addr;
2598			if (is_badblock(conf->mirrors[d].rdev,
2599					sector, max_sync,
2600					&first_bad, &bad_sectors)) {
2601				if (first_bad > sector)
2602					max_sync = first_bad - sector;
2603				else {
2604					bad_sectors -= (sector - first_bad);
2605					if (max_sync > bad_sectors)
2606						max_sync = max_sync;
 
2607					continue;
2608				}
2609			}
2610			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2611			atomic_inc(&r10_bio->remaining);
2612			bio->bi_next = biolist;
2613			biolist = bio;
2614			bio->bi_private = r10_bio;
2615			bio->bi_end_io = end_sync_read;
2616			bio->bi_rw = READ;
2617			bio->bi_sector = sector +
2618				conf->mirrors[d].rdev->data_offset;
2619			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2620			count++;
2621		}
2622
2623		if (count < 2) {
2624			for (i=0; i<conf->copies; i++) {
2625				int d = r10_bio->devs[i].devnum;
2626				if (r10_bio->devs[i].bio->bi_end_io)
2627					rdev_dec_pending(conf->mirrors[d].rdev,
2628							 mddev);
 
 
 
 
 
2629			}
2630			put_buf(r10_bio);
2631			biolist = NULL;
2632			goto giveup;
2633		}
2634	}
2635
2636	for (bio = biolist; bio ; bio=bio->bi_next) {
2637
2638		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
2639		if (bio->bi_end_io)
2640			bio->bi_flags |= 1 << BIO_UPTODATE;
2641		bio->bi_vcnt = 0;
2642		bio->bi_idx = 0;
2643		bio->bi_phys_segments = 0;
2644		bio->bi_size = 0;
2645	}
2646
2647	nr_sectors = 0;
2648	if (sector_nr + max_sync < max_sector)
2649		max_sector = sector_nr + max_sync;
2650	do {
2651		struct page *page;
2652		int len = PAGE_SIZE;
2653		if (sector_nr + (len>>9) > max_sector)
2654			len = (max_sector - sector_nr) << 9;
2655		if (len == 0)
2656			break;
2657		for (bio= biolist ; bio ; bio=bio->bi_next) {
2658			struct bio *bio2;
2659			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
2660			if (bio_add_page(bio, page, len, 0))
2661				continue;
2662
2663			/* stop here */
2664			bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
2665			for (bio2 = biolist;
2666			     bio2 && bio2 != bio;
2667			     bio2 = bio2->bi_next) {
2668				/* remove last page from this bio */
2669				bio2->bi_vcnt--;
2670				bio2->bi_size -= len;
2671				bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
2672			}
2673			goto bio_full;
2674		}
2675		nr_sectors += len>>9;
2676		sector_nr += len>>9;
2677	} while (biolist->bi_vcnt < RESYNC_PAGES);
2678 bio_full:
2679	r10_bio->sectors = nr_sectors;
2680
2681	while (biolist) {
2682		bio = biolist;
2683		biolist = biolist->bi_next;
2684
2685		bio->bi_next = NULL;
2686		r10_bio = bio->bi_private;
2687		r10_bio->sectors = nr_sectors;
2688
2689		if (bio->bi_end_io == end_sync_read) {
2690			md_sync_acct(bio->bi_bdev, nr_sectors);
 
2691			generic_make_request(bio);
2692		}
2693	}
2694
2695	if (sectors_skipped)
2696		/* pretend they weren't skipped, it makes
2697		 * no important difference in this case
2698		 */
2699		md_done_sync(mddev, sectors_skipped, 1);
2700
2701	return sectors_skipped + nr_sectors;
2702 giveup:
2703	/* There is nowhere to write, so all non-sync
2704	 * drives must be failed or in resync, all drives
2705	 * have a bad block, so try the next chunk...
2706	 */
2707	if (sector_nr + max_sync < max_sector)
2708		max_sector = sector_nr + max_sync;
2709
2710	sectors_skipped += (max_sector - sector_nr);
2711	chunks_skipped ++;
2712	sector_nr = max_sector;
2713	goto skipped;
2714}
2715
2716static sector_t
2717raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
2718{
2719	sector_t size;
2720	conf_t *conf = mddev->private;
2721
2722	if (!raid_disks)
2723		raid_disks = conf->raid_disks;
 
2724	if (!sectors)
2725		sectors = conf->dev_sectors;
2726
2727	size = sectors >> conf->chunk_shift;
2728	sector_div(size, conf->far_copies);
2729	size = size * raid_disks;
2730	sector_div(size, conf->near_copies);
2731
2732	return size << conf->chunk_shift;
2733}
2734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2735
2736static conf_t *setup_conf(mddev_t *mddev)
 
2737{
2738	conf_t *conf = NULL;
2739	int nc, fc, fo;
2740	sector_t stride, size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2741	int err = -EINVAL;
 
 
 
 
2742
2743	if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
2744	    !is_power_of_2(mddev->new_chunk_sectors)) {
2745		printk(KERN_ERR "md/raid10:%s: chunk size must be "
2746		       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
2747		       mdname(mddev), PAGE_SIZE);
2748		goto out;
2749	}
2750
2751	nc = mddev->new_layout & 255;
2752	fc = (mddev->new_layout >> 8) & 255;
2753	fo = mddev->new_layout & (1<<16);
2754
2755	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2756	    (mddev->new_layout >> 17)) {
2757		printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
2758		       mdname(mddev), mddev->new_layout);
2759		goto out;
2760	}
2761
2762	err = -ENOMEM;
2763	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
2764	if (!conf)
2765		goto out;
2766
2767	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
 
 
2768				GFP_KERNEL);
2769	if (!conf->mirrors)
2770		goto out;
2771
2772	conf->tmppage = alloc_page(GFP_KERNEL);
2773	if (!conf->tmppage)
2774		goto out;
2775
2776
2777	conf->raid_disks = mddev->raid_disks;
2778	conf->near_copies = nc;
2779	conf->far_copies = fc;
2780	conf->copies = nc*fc;
2781	conf->far_offset = fo;
2782	conf->chunk_mask = mddev->new_chunk_sectors - 1;
2783	conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
2784
2785	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2786					   r10bio_pool_free, conf);
2787	if (!conf->r10bio_pool)
2788		goto out;
2789
2790	size = mddev->dev_sectors >> conf->chunk_shift;
2791	sector_div(size, fc);
2792	size = size * conf->raid_disks;
2793	sector_div(size, nc);
2794	/* 'size' is now the number of chunks in the array */
2795	/* calculate "used chunks per device" in 'stride' */
2796	stride = size * conf->copies;
2797
2798	/* We need to round up when dividing by raid_disks to
2799	 * get the stride size.
2800	 */
2801	stride += conf->raid_disks - 1;
2802	sector_div(stride, conf->raid_disks);
2803
2804	conf->dev_sectors = stride << conf->chunk_shift;
2805
2806	if (fo)
2807		stride = 1;
2808	else
2809		sector_div(stride, fc);
2810	conf->stride = stride << conf->chunk_shift;
2811
2812
2813	spin_lock_init(&conf->device_lock);
2814	INIT_LIST_HEAD(&conf->retry_list);
 
2815
2816	spin_lock_init(&conf->resync_lock);
2817	init_waitqueue_head(&conf->wait_barrier);
 
2818
2819	conf->thread = md_register_thread(raid10d, mddev, NULL);
2820	if (!conf->thread)
2821		goto out;
2822
2823	conf->mddev = mddev;
2824	return conf;
2825
2826 out:
2827	printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
2828	       mdname(mddev));
2829	if (conf) {
2830		if (conf->r10bio_pool)
2831			mempool_destroy(conf->r10bio_pool);
2832		kfree(conf->mirrors);
2833		safe_put_page(conf->tmppage);
2834		kfree(conf);
2835	}
2836	return ERR_PTR(err);
2837}
2838
2839static int run(mddev_t *mddev)
2840{
2841	conf_t *conf;
2842	int i, disk_idx, chunk_size;
2843	mirror_info_t *disk;
2844	mdk_rdev_t *rdev;
2845	sector_t size;
2846
2847	/*
2848	 * copy the already verified devices into our private RAID10
2849	 * bookkeeping area. [whatever we allocate in run(),
2850	 * should be freed in stop()]
2851	 */
2852
2853	if (mddev->private == NULL) {
2854		conf = setup_conf(mddev);
2855		if (IS_ERR(conf))
2856			return PTR_ERR(conf);
2857		mddev->private = conf;
2858	}
2859	conf = mddev->private;
2860	if (!conf)
2861		goto out;
2862
2863	mddev->thread = conf->thread;
2864	conf->thread = NULL;
2865
2866	chunk_size = mddev->chunk_sectors << 9;
2867	blk_queue_io_min(mddev->queue, chunk_size);
2868	if (conf->raid_disks % conf->near_copies)
2869		blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
2870	else
2871		blk_queue_io_opt(mddev->queue, chunk_size *
2872				 (conf->raid_disks / conf->near_copies));
 
 
 
 
 
2873
2874	list_for_each_entry(rdev, &mddev->disks, same_set) {
 
 
2875
2876		disk_idx = rdev->raid_disk;
2877		if (disk_idx >= conf->raid_disks
2878		    || disk_idx < 0)
 
 
2879			continue;
2880		disk = conf->mirrors + disk_idx;
2881
2882		disk->rdev = rdev;
2883		disk_stack_limits(mddev->gendisk, rdev->bdev,
2884				  rdev->data_offset << 9);
2885		/* as we don't honour merge_bvec_fn, we must never risk
2886		 * violating it, so limit max_segments to 1 lying
2887		 * within a single page.
2888		 */
2889		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
2890			blk_queue_max_segments(mddev->queue, 1);
2891			blk_queue_segment_boundary(mddev->queue,
2892						   PAGE_CACHE_SIZE - 1);
2893		}
 
 
 
 
 
 
 
 
 
 
 
 
2894
2895		disk->head_position = 0;
 
 
 
 
 
 
 
 
 
 
 
 
2896	}
2897	/* need to check that every block has at least one working mirror */
2898	if (!enough(conf, -1)) {
2899		printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
2900		       mdname(mddev));
2901		goto out_free_conf;
2902	}
2903
 
 
 
 
 
 
 
 
 
 
2904	mddev->degraded = 0;
2905	for (i = 0; i < conf->raid_disks; i++) {
 
 
 
2906
2907		disk = conf->mirrors + i;
2908
 
 
 
 
 
 
 
2909		if (!disk->rdev ||
2910		    !test_bit(In_sync, &disk->rdev->flags)) {
2911			disk->head_position = 0;
2912			mddev->degraded++;
2913			if (disk->rdev)
 
2914				conf->fullsync = 1;
2915		}
 
2916	}
2917
2918	if (mddev->recovery_cp != MaxSector)
2919		printk(KERN_NOTICE "md/raid10:%s: not clean"
2920		       " -- starting background reconstruction\n",
2921		       mdname(mddev));
2922	printk(KERN_INFO
2923		"md/raid10:%s: active with %d out of %d devices\n",
2924		mdname(mddev), conf->raid_disks - mddev->degraded,
2925		conf->raid_disks);
2926	/*
2927	 * Ok, everything is just fine now
2928	 */
2929	mddev->dev_sectors = conf->dev_sectors;
2930	size = raid10_size(mddev, 0, 0);
2931	md_set_array_sectors(mddev, size);
2932	mddev->resync_max_sectors = size;
 
2933
2934	mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2935	mddev->queue->backing_dev_info.congested_data = mddev;
2936
2937	/* Calculate max read-ahead size.
2938	 * We need to readahead at least twice a whole stripe....
2939	 * maybe...
2940	 */
2941	{
2942		int stripe = conf->raid_disks *
2943			((mddev->chunk_sectors << 9) / PAGE_SIZE);
2944		stripe /= conf->near_copies;
2945		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2946			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2947	}
2948
2949	if (conf->near_copies < conf->raid_disks)
2950		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
 
 
 
 
 
 
2951
2952	if (md_integrity_register(mddev))
2953		goto out_free_conf;
2954
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2955	return 0;
2956
2957out_free_conf:
2958	md_unregister_thread(&mddev->thread);
2959	if (conf->r10bio_pool)
2960		mempool_destroy(conf->r10bio_pool);
2961	safe_put_page(conf->tmppage);
2962	kfree(conf->mirrors);
2963	kfree(conf);
2964	mddev->private = NULL;
2965out:
2966	return -EIO;
2967}
2968
2969static int stop(mddev_t *mddev)
2970{
2971	conf_t *conf = mddev->private;
2972
2973	raise_barrier(conf, 0);
2974	lower_barrier(conf);
2975
2976	md_unregister_thread(&mddev->thread);
2977	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2978	if (conf->r10bio_pool)
2979		mempool_destroy(conf->r10bio_pool);
2980	kfree(conf->mirrors);
 
 
2981	kfree(conf);
2982	mddev->private = NULL;
2983	return 0;
2984}
2985
2986static void raid10_quiesce(mddev_t *mddev, int state)
2987{
2988	conf_t *conf = mddev->private;
2989
2990	switch(state) {
2991	case 1:
2992		raise_barrier(conf, 0);
2993		break;
2994	case 0:
2995		lower_barrier(conf);
2996		break;
2997	}
2998}
2999
3000static void *raid10_takeover_raid0(mddev_t *mddev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3001{
3002	mdk_rdev_t *rdev;
3003	conf_t *conf;
3004
3005	if (mddev->degraded > 0) {
3006		printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3007		       mdname(mddev));
3008		return ERR_PTR(-EINVAL);
3009	}
 
3010
3011	/* Set new parameters */
3012	mddev->new_level = 10;
3013	/* new layout: far_copies = 1, near_copies = 2 */
3014	mddev->new_layout = (1<<8) + 2;
3015	mddev->new_chunk_sectors = mddev->chunk_sectors;
3016	mddev->delta_disks = mddev->raid_disks;
3017	mddev->raid_disks *= 2;
3018	/* make sure it will be not marked as dirty */
3019	mddev->recovery_cp = MaxSector;
 
3020
3021	conf = setup_conf(mddev);
3022	if (!IS_ERR(conf)) {
3023		list_for_each_entry(rdev, &mddev->disks, same_set)
3024			if (rdev->raid_disk >= 0)
3025				rdev->new_raid_disk = rdev->raid_disk * 2;
 
 
3026		conf->barrier = 1;
3027	}
3028
3029	return conf;
3030}
3031
3032static void *raid10_takeover(mddev_t *mddev)
3033{
3034	struct raid0_private_data *raid0_priv;
3035
3036	/* raid10 can take over:
3037	 *  raid0 - providing it has only two drives
3038	 */
3039	if (mddev->level == 0) {
3040		/* for raid0 takeover only one zone is supported */
3041		raid0_priv = mddev->private;
3042		if (raid0_priv->nr_strip_zones > 1) {
3043			printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3044			       " with more than one zone.\n",
3045			       mdname(mddev));
3046			return ERR_PTR(-EINVAL);
3047		}
3048		return raid10_takeover_raid0(mddev);
 
 
3049	}
3050	return ERR_PTR(-EINVAL);
3051}
3052
3053static struct mdk_personality raid10_personality =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3054{
3055	.name		= "raid10",
3056	.level		= 10,
3057	.owner		= THIS_MODULE,
3058	.make_request	= make_request,
3059	.run		= run,
3060	.stop		= stop,
3061	.status		= status,
3062	.error_handler	= error,
3063	.hot_add_disk	= raid10_add_disk,
3064	.hot_remove_disk= raid10_remove_disk,
3065	.spare_active	= raid10_spare_active,
3066	.sync_request	= sync_request,
3067	.quiesce	= raid10_quiesce,
3068	.size		= raid10_size,
 
3069	.takeover	= raid10_takeover,
 
 
 
 
3070};
3071
3072static int __init raid_init(void)
3073{
3074	return register_md_personality(&raid10_personality);
3075}
3076
3077static void raid_exit(void)
3078{
3079	unregister_md_personality(&raid10_personality);
3080}
3081
3082module_init(raid_init);
3083module_exit(raid_exit);
3084MODULE_LICENSE("GPL");
3085MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
3086MODULE_ALIAS("md-personality-9"); /* RAID10 */
3087MODULE_ALIAS("md-raid10");
3088MODULE_ALIAS("md-level-10");
v4.10.11
   1/*
   2 * raid10.c : Multiple Devices driver for Linux
   3 *
   4 * Copyright (C) 2000-2004 Neil Brown
   5 *
   6 * RAID-10 support for md.
   7 *
   8 * Base on code in raid1.c.  See raid1.c for further copyright information.
   9 *
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21#include <linux/slab.h>
  22#include <linux/delay.h>
  23#include <linux/blkdev.h>
  24#include <linux/module.h>
  25#include <linux/seq_file.h>
  26#include <linux/ratelimit.h>
  27#include <linux/kthread.h>
  28#include <trace/events/block.h>
  29#include "md.h"
  30#include "raid10.h"
  31#include "raid0.h"
  32#include "bitmap.h"
  33
  34/*
  35 * RAID10 provides a combination of RAID0 and RAID1 functionality.
  36 * The layout of data is defined by
  37 *    chunk_size
  38 *    raid_disks
  39 *    near_copies (stored in low byte of layout)
  40 *    far_copies (stored in second byte of layout)
  41 *    far_offset (stored in bit 16 of layout )
  42 *    use_far_sets (stored in bit 17 of layout )
  43 *    use_far_sets_bugfixed (stored in bit 18 of layout )
  44 *
  45 * The data to be stored is divided into chunks using chunksize.  Each device
  46 * is divided into far_copies sections.   In each section, chunks are laid out
  47 * in a style similar to raid0, but near_copies copies of each chunk is stored
  48 * (each on a different drive).  The starting device for each section is offset
  49 * near_copies from the starting device of the previous section.  Thus there
  50 * are (near_copies * far_copies) of each chunk, and each is on a different
  51 * drive.  near_copies and far_copies must be at least one, and their product
  52 * is at most raid_disks.
 
 
  53 *
  54 * If far_offset is true, then the far_copies are handled a bit differently.
  55 * The copies are still in different stripes, but instead of being very far
  56 * apart on disk, there are adjacent stripes.
  57 *
  58 * The far and offset algorithms are handled slightly differently if
  59 * 'use_far_sets' is true.  In this case, the array's devices are grouped into
  60 * sets that are (near_copies * far_copies) in size.  The far copied stripes
  61 * are still shifted by 'near_copies' devices, but this shifting stays confined
  62 * to the set rather than the entire array.  This is done to improve the number
  63 * of device combinations that can fail without causing the array to fail.
  64 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
  65 * on a device):
  66 *    A B C D    A B C D E
  67 *      ...         ...
  68 *    D A B C    E A B C D
  69 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
  70 *    [A B] [C D]    [A B] [C D E]
  71 *    |...| |...|    |...| | ... |
  72 *    [B A] [D C]    [B A] [E C D]
  73 */
  74
  75/*
  76 * Number of guaranteed r10bios in case of extreme VM load:
  77 */
  78#define	NR_RAID10_BIOS 256
  79
  80/* when we get a read error on a read-only array, we redirect to another
  81 * device without failing the first device, or trying to over-write to
  82 * correct the read error.  To keep track of bad blocks on a per-bio
  83 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  84 */
  85#define IO_BLOCKED ((struct bio *)1)
  86/* When we successfully write to a known bad-block, we need to remove the
  87 * bad-block marking which must be done from process context.  So we record
  88 * the success by setting devs[n].bio to IO_MADE_GOOD
  89 */
  90#define IO_MADE_GOOD ((struct bio *)2)
  91
  92#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  93
  94/* When there are this many requests queued to be written by
  95 * the raid10 thread, we become 'congested' to provide back-pressure
  96 * for writeback.
  97 */
  98static int max_queued_requests = 1024;
  99
 100static void allow_barrier(struct r10conf *conf);
 101static void lower_barrier(struct r10conf *conf);
 102static int _enough(struct r10conf *conf, int previous, int ignore);
 103static int enough(struct r10conf *conf, int ignore);
 104static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 105				int *skipped);
 106static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
 107static void end_reshape_write(struct bio *bio);
 108static void end_reshape(struct r10conf *conf);
 109
 110#define raid10_log(md, fmt, args...)				\
 111	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
 112
 113static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 114{
 115	struct r10conf *conf = data;
 116	int size = offsetof(struct r10bio, devs[conf->copies]);
 117
 118	/* allocate a r10bio with room for raid_disks entries in the
 119	 * bios array */
 120	return kzalloc(size, gfp_flags);
 121}
 122
 123static void r10bio_pool_free(void *r10_bio, void *data)
 124{
 125	kfree(r10_bio);
 126}
 127
 128/* Maximum size of each resync request */
 129#define RESYNC_BLOCK_SIZE (64*1024)
 130#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
 131/* amount of memory to reserve for resync requests */
 132#define RESYNC_WINDOW (1024*1024)
 133/* maximum number of concurrent requests, memory permitting */
 134#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
 135
 136/*
 137 * When performing a resync, we need to read and compare, so
 138 * we need as many pages are there are copies.
 139 * When performing a recovery, we need 2 bios, one for read,
 140 * one for write (we recover only one drive per r10buf)
 141 *
 142 */
 143static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 144{
 145	struct r10conf *conf = data;
 146	struct page *page;
 147	struct r10bio *r10_bio;
 148	struct bio *bio;
 149	int i, j;
 150	int nalloc;
 151
 152	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
 153	if (!r10_bio)
 154		return NULL;
 155
 156	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
 157	    test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
 158		nalloc = conf->copies; /* resync */
 159	else
 160		nalloc = 2; /* recovery */
 161
 162	/*
 163	 * Allocate bios.
 164	 */
 165	for (j = nalloc ; j-- ; ) {
 166		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 167		if (!bio)
 168			goto out_free_bio;
 169		r10_bio->devs[j].bio = bio;
 170		if (!conf->have_replacement)
 171			continue;
 172		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
 173		if (!bio)
 174			goto out_free_bio;
 175		r10_bio->devs[j].repl_bio = bio;
 176	}
 177	/*
 178	 * Allocate RESYNC_PAGES data pages and attach them
 179	 * where needed.
 180	 */
 181	for (j = 0 ; j < nalloc; j++) {
 182		struct bio *rbio = r10_bio->devs[j].repl_bio;
 183		bio = r10_bio->devs[j].bio;
 184		for (i = 0; i < RESYNC_PAGES; i++) {
 185			if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
 186					       &conf->mddev->recovery)) {
 187				/* we can share bv_page's during recovery
 188				 * and reshape */
 189				struct bio *rbio = r10_bio->devs[0].bio;
 190				page = rbio->bi_io_vec[i].bv_page;
 191				get_page(page);
 192			} else
 193				page = alloc_page(gfp_flags);
 194			if (unlikely(!page))
 195				goto out_free_pages;
 196
 197			bio->bi_io_vec[i].bv_page = page;
 198			if (rbio)
 199				rbio->bi_io_vec[i].bv_page = page;
 200		}
 201	}
 202
 203	return r10_bio;
 204
 205out_free_pages:
 206	for ( ; i > 0 ; i--)
 207		safe_put_page(bio->bi_io_vec[i-1].bv_page);
 208	while (j--)
 209		for (i = 0; i < RESYNC_PAGES ; i++)
 210			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
 211	j = 0;
 212out_free_bio:
 213	for ( ; j < nalloc; j++) {
 214		if (r10_bio->devs[j].bio)
 215			bio_put(r10_bio->devs[j].bio);
 216		if (r10_bio->devs[j].repl_bio)
 217			bio_put(r10_bio->devs[j].repl_bio);
 218	}
 219	r10bio_pool_free(r10_bio, conf);
 220	return NULL;
 221}
 222
 223static void r10buf_pool_free(void *__r10_bio, void *data)
 224{
 225	int i;
 226	struct r10conf *conf = data;
 227	struct r10bio *r10bio = __r10_bio;
 228	int j;
 229
 230	for (j=0; j < conf->copies; j++) {
 231		struct bio *bio = r10bio->devs[j].bio;
 232		if (bio) {
 233			for (i = 0; i < RESYNC_PAGES; i++) {
 234				safe_put_page(bio->bi_io_vec[i].bv_page);
 235				bio->bi_io_vec[i].bv_page = NULL;
 236			}
 237			bio_put(bio);
 238		}
 239		bio = r10bio->devs[j].repl_bio;
 240		if (bio)
 241			bio_put(bio);
 242	}
 243	r10bio_pool_free(r10bio, conf);
 244}
 245
 246static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 247{
 248	int i;
 249
 250	for (i = 0; i < conf->copies; i++) {
 251		struct bio **bio = & r10_bio->devs[i].bio;
 252		if (!BIO_SPECIAL(*bio))
 253			bio_put(*bio);
 254		*bio = NULL;
 255		bio = &r10_bio->devs[i].repl_bio;
 256		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
 257			bio_put(*bio);
 258		*bio = NULL;
 259	}
 260}
 261
 262static void free_r10bio(struct r10bio *r10_bio)
 263{
 264	struct r10conf *conf = r10_bio->mddev->private;
 265
 266	put_all_bios(conf, r10_bio);
 267	mempool_free(r10_bio, conf->r10bio_pool);
 268}
 269
 270static void put_buf(struct r10bio *r10_bio)
 271{
 272	struct r10conf *conf = r10_bio->mddev->private;
 273
 274	mempool_free(r10_bio, conf->r10buf_pool);
 275
 276	lower_barrier(conf);
 277}
 278
 279static void reschedule_retry(struct r10bio *r10_bio)
 280{
 281	unsigned long flags;
 282	struct mddev *mddev = r10_bio->mddev;
 283	struct r10conf *conf = mddev->private;
 284
 285	spin_lock_irqsave(&conf->device_lock, flags);
 286	list_add(&r10_bio->retry_list, &conf->retry_list);
 287	conf->nr_queued ++;
 288	spin_unlock_irqrestore(&conf->device_lock, flags);
 289
 290	/* wake up frozen array... */
 291	wake_up(&conf->wait_barrier);
 292
 293	md_wakeup_thread(mddev->thread);
 294}
 295
 296/*
 297 * raid_end_bio_io() is called when we have finished servicing a mirrored
 298 * operation and are ready to return a success/failure code to the buffer
 299 * cache layer.
 300 */
 301static void raid_end_bio_io(struct r10bio *r10_bio)
 302{
 303	struct bio *bio = r10_bio->master_bio;
 304	int done;
 305	struct r10conf *conf = r10_bio->mddev->private;
 306
 307	if (bio->bi_phys_segments) {
 308		unsigned long flags;
 309		spin_lock_irqsave(&conf->device_lock, flags);
 310		bio->bi_phys_segments--;
 311		done = (bio->bi_phys_segments == 0);
 312		spin_unlock_irqrestore(&conf->device_lock, flags);
 313	} else
 314		done = 1;
 315	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
 316		bio->bi_error = -EIO;
 317	if (done) {
 318		bio_endio(bio);
 319		/*
 320		 * Wake up any possible resync thread that waits for the device
 321		 * to go idle.
 322		 */
 323		allow_barrier(conf);
 324	}
 325	free_r10bio(r10_bio);
 326}
 327
 328/*
 329 * Update disk head position estimator based on IRQ completion info.
 330 */
 331static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 332{
 333	struct r10conf *conf = r10_bio->mddev->private;
 334
 335	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
 336		r10_bio->devs[slot].addr + (r10_bio->sectors);
 337}
 338
 339/*
 340 * Find the disk number which triggered given bio
 341 */
 342static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 343			 struct bio *bio, int *slotp, int *replp)
 344{
 345	int slot;
 346	int repl = 0;
 347
 348	for (slot = 0; slot < conf->copies; slot++) {
 349		if (r10_bio->devs[slot].bio == bio)
 350			break;
 351		if (r10_bio->devs[slot].repl_bio == bio) {
 352			repl = 1;
 353			break;
 354		}
 355	}
 356
 357	BUG_ON(slot == conf->copies);
 358	update_head_pos(slot, r10_bio);
 359
 360	if (slotp)
 361		*slotp = slot;
 362	if (replp)
 363		*replp = repl;
 364	return r10_bio->devs[slot].devnum;
 365}
 366
 367static void raid10_end_read_request(struct bio *bio)
 368{
 369	int uptodate = !bio->bi_error;
 370	struct r10bio *r10_bio = bio->bi_private;
 371	int slot, dev;
 372	struct md_rdev *rdev;
 373	struct r10conf *conf = r10_bio->mddev->private;
 374
 375	slot = r10_bio->read_slot;
 376	dev = r10_bio->devs[slot].devnum;
 377	rdev = r10_bio->devs[slot].rdev;
 378	/*
 379	 * this branch is our 'one mirror IO has finished' event handler:
 380	 */
 381	update_head_pos(slot, r10_bio);
 382
 383	if (uptodate) {
 384		/*
 385		 * Set R10BIO_Uptodate in our master bio, so that
 386		 * we will return a good error code to the higher
 387		 * levels even if IO on some other mirrored buffer fails.
 388		 *
 389		 * The 'master' represents the composite IO operation to
 390		 * user-side. So if something waits for IO, then it will
 391		 * wait for the 'master' bio.
 392		 */
 393		set_bit(R10BIO_Uptodate, &r10_bio->state);
 394	} else {
 395		/* If all other devices that store this block have
 396		 * failed, we want to return the error upwards rather
 397		 * than fail the last device.  Here we redefine
 398		 * "uptodate" to mean "Don't want to retry"
 399		 */
 400		if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
 401			     rdev->raid_disk))
 402			uptodate = 1;
 403	}
 404	if (uptodate) {
 405		raid_end_bio_io(r10_bio);
 406		rdev_dec_pending(rdev, conf->mddev);
 407	} else {
 408		/*
 409		 * oops, read error - keep the refcount on the rdev
 410		 */
 411		char b[BDEVNAME_SIZE];
 412		pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
 
 413				   mdname(conf->mddev),
 414				   bdevname(rdev->bdev, b),
 415				   (unsigned long long)r10_bio->sector);
 416		set_bit(R10BIO_ReadError, &r10_bio->state);
 417		reschedule_retry(r10_bio);
 418	}
 419}
 420
 421static void close_write(struct r10bio *r10_bio)
 422{
 423	/* clear the bitmap if all writes complete successfully */
 424	bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
 425			r10_bio->sectors,
 426			!test_bit(R10BIO_Degraded, &r10_bio->state),
 427			0);
 428	md_write_end(r10_bio->mddev);
 429}
 430
 431static void one_write_done(struct r10bio *r10_bio)
 432{
 433	if (atomic_dec_and_test(&r10_bio->remaining)) {
 434		if (test_bit(R10BIO_WriteError, &r10_bio->state))
 435			reschedule_retry(r10_bio);
 436		else {
 437			close_write(r10_bio);
 438			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
 439				reschedule_retry(r10_bio);
 440			else
 441				raid_end_bio_io(r10_bio);
 442		}
 443	}
 444}
 445
 446static void raid10_end_write_request(struct bio *bio)
 447{
 448	struct r10bio *r10_bio = bio->bi_private;
 
 449	int dev;
 450	int dec_rdev = 1;
 451	struct r10conf *conf = r10_bio->mddev->private;
 452	int slot, repl;
 453	struct md_rdev *rdev = NULL;
 454	struct bio *to_put = NULL;
 455	bool discard_error;
 456
 457	discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
 458
 459	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 460
 461	if (repl)
 462		rdev = conf->mirrors[dev].replacement;
 463	if (!rdev) {
 464		smp_rmb();
 465		repl = 0;
 466		rdev = conf->mirrors[dev].rdev;
 467	}
 468	/*
 469	 * this branch is our 'one mirror IO has finished' event handler:
 470	 */
 471	if (bio->bi_error && !discard_error) {
 472		if (repl)
 473			/* Never record new bad blocks to replacement,
 474			 * just fail it.
 475			 */
 476			md_error(rdev->mddev, rdev);
 477		else {
 478			set_bit(WriteErrorSeen,	&rdev->flags);
 479			if (!test_and_set_bit(WantReplacement, &rdev->flags))
 480				set_bit(MD_RECOVERY_NEEDED,
 481					&rdev->mddev->recovery);
 482
 483			dec_rdev = 0;
 484			if (test_bit(FailFast, &rdev->flags) &&
 485			    (bio->bi_opf & MD_FAILFAST)) {
 486				md_error(rdev->mddev, rdev);
 487				if (!test_bit(Faulty, &rdev->flags))
 488					/* This is the only remaining device,
 489					 * We need to retry the write without
 490					 * FailFast
 491					 */
 492					set_bit(R10BIO_WriteError, &r10_bio->state);
 493				else {
 494					r10_bio->devs[slot].bio = NULL;
 495					to_put = bio;
 496					dec_rdev = 1;
 497				}
 498			} else
 499				set_bit(R10BIO_WriteError, &r10_bio->state);
 500		}
 501	} else {
 502		/*
 503		 * Set R10BIO_Uptodate in our master bio, so that
 504		 * we will return a good error code for to the higher
 505		 * levels even if IO on some other mirrored buffer fails.
 506		 *
 507		 * The 'master' represents the composite IO operation to
 508		 * user-side. So if something waits for IO, then it will
 509		 * wait for the 'master' bio.
 510		 */
 511		sector_t first_bad;
 512		int bad_sectors;
 513
 514		/*
 515		 * Do not set R10BIO_Uptodate if the current device is
 516		 * rebuilding or Faulty. This is because we cannot use
 517		 * such device for properly reading the data back (we could
 518		 * potentially use it, if the current write would have felt
 519		 * before rdev->recovery_offset, but for simplicity we don't
 520		 * check this here.
 521		 */
 522		if (test_bit(In_sync, &rdev->flags) &&
 523		    !test_bit(Faulty, &rdev->flags))
 524			set_bit(R10BIO_Uptodate, &r10_bio->state);
 525
 526		/* Maybe we can clear some bad blocks. */
 527		if (is_badblock(rdev,
 528				r10_bio->devs[slot].addr,
 529				r10_bio->sectors,
 530				&first_bad, &bad_sectors) && !discard_error) {
 531			bio_put(bio);
 532			if (repl)
 533				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
 534			else
 535				r10_bio->devs[slot].bio = IO_MADE_GOOD;
 536			dec_rdev = 0;
 537			set_bit(R10BIO_MadeGood, &r10_bio->state);
 538		}
 539	}
 540
 541	/*
 542	 *
 543	 * Let's see if all mirrored write operations have finished
 544	 * already.
 545	 */
 546	one_write_done(r10_bio);
 547	if (dec_rdev)
 548		rdev_dec_pending(rdev, conf->mddev);
 549	if (to_put)
 550		bio_put(to_put);
 551}
 552
 
 553/*
 554 * RAID10 layout manager
 555 * As well as the chunksize and raid_disks count, there are two
 556 * parameters: near_copies and far_copies.
 557 * near_copies * far_copies must be <= raid_disks.
 558 * Normally one of these will be 1.
 559 * If both are 1, we get raid0.
 560 * If near_copies == raid_disks, we get raid1.
 561 *
 562 * Chunks are laid out in raid0 style with near_copies copies of the
 563 * first chunk, followed by near_copies copies of the next chunk and
 564 * so on.
 565 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 566 * as described above, we start again with a device offset of near_copies.
 567 * So we effectively have another copy of the whole array further down all
 568 * the drives, but with blocks on different drives.
 569 * With this layout, and block is never stored twice on the one device.
 570 *
 571 * raid10_find_phys finds the sector offset of a given virtual sector
 572 * on each device that it is on.
 573 *
 574 * raid10_find_virt does the reverse mapping, from a device and a
 575 * sector offset to a virtual address
 576 */
 577
 578static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
 579{
 580	int n,f;
 581	sector_t sector;
 582	sector_t chunk;
 583	sector_t stripe;
 584	int dev;
 
 585	int slot = 0;
 586	int last_far_set_start, last_far_set_size;
 587
 588	last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 589	last_far_set_start *= geo->far_set_size;
 590
 591	last_far_set_size = geo->far_set_size;
 592	last_far_set_size += (geo->raid_disks % geo->far_set_size);
 593
 594	/* now calculate first sector/dev */
 595	chunk = r10bio->sector >> geo->chunk_shift;
 596	sector = r10bio->sector & geo->chunk_mask;
 597
 598	chunk *= geo->near_copies;
 599	stripe = chunk;
 600	dev = sector_div(stripe, geo->raid_disks);
 601	if (geo->far_offset)
 602		stripe *= geo->far_copies;
 603
 604	sector += stripe << geo->chunk_shift;
 605
 606	/* and calculate all the others */
 607	for (n = 0; n < geo->near_copies; n++) {
 608		int d = dev;
 609		int set;
 610		sector_t s = sector;
 
 611		r10bio->devs[slot].devnum = d;
 612		r10bio->devs[slot].addr = s;
 613		slot++;
 614
 615		for (f = 1; f < geo->far_copies; f++) {
 616			set = d / geo->far_set_size;
 617			d += geo->near_copies;
 618
 619			if ((geo->raid_disks % geo->far_set_size) &&
 620			    (d > last_far_set_start)) {
 621				d -= last_far_set_start;
 622				d %= last_far_set_size;
 623				d += last_far_set_start;
 624			} else {
 625				d %= geo->far_set_size;
 626				d += geo->far_set_size * set;
 627			}
 628			s += geo->stride;
 629			r10bio->devs[slot].devnum = d;
 630			r10bio->devs[slot].addr = s;
 631			slot++;
 632		}
 633		dev++;
 634		if (dev >= geo->raid_disks) {
 635			dev = 0;
 636			sector += (geo->chunk_mask + 1);
 637		}
 638	}
 
 639}
 640
 641static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 642{
 643	struct geom *geo = &conf->geo;
 644
 645	if (conf->reshape_progress != MaxSector &&
 646	    ((r10bio->sector >= conf->reshape_progress) !=
 647	     conf->mddev->reshape_backwards)) {
 648		set_bit(R10BIO_Previous, &r10bio->state);
 649		geo = &conf->prev;
 650	} else
 651		clear_bit(R10BIO_Previous, &r10bio->state);
 652
 653	__raid10_find_phys(geo, r10bio);
 654}
 655
 656static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 657{
 658	sector_t offset, chunk, vchunk;
 659	/* Never use conf->prev as this is only called during resync
 660	 * or recovery, so reshape isn't happening
 661	 */
 662	struct geom *geo = &conf->geo;
 663	int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
 664	int far_set_size = geo->far_set_size;
 665	int last_far_set_start;
 666
 667	if (geo->raid_disks % geo->far_set_size) {
 668		last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
 669		last_far_set_start *= geo->far_set_size;
 670
 671		if (dev >= last_far_set_start) {
 672			far_set_size = geo->far_set_size;
 673			far_set_size += (geo->raid_disks % geo->far_set_size);
 674			far_set_start = last_far_set_start;
 675		}
 676	}
 677
 678	offset = sector & geo->chunk_mask;
 679	if (geo->far_offset) {
 680		int fc;
 681		chunk = sector >> geo->chunk_shift;
 682		fc = sector_div(chunk, geo->far_copies);
 683		dev -= fc * geo->near_copies;
 684		if (dev < far_set_start)
 685			dev += far_set_size;
 686	} else {
 687		while (sector >= geo->stride) {
 688			sector -= geo->stride;
 689			if (dev < (geo->near_copies + far_set_start))
 690				dev += far_set_size - geo->near_copies;
 691			else
 692				dev -= geo->near_copies;
 693		}
 694		chunk = sector >> geo->chunk_shift;
 695	}
 696	vchunk = chunk * geo->raid_disks + dev;
 697	sector_div(vchunk, geo->near_copies);
 698	return (vchunk << geo->chunk_shift) + offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699}
 700
 701/*
 702 * This routine returns the disk from which the requested read should
 703 * be done. There is a per-array 'next expected sequential IO' sector
 704 * number - if this matches on the next IO then we use the last disk.
 705 * There is also a per-disk 'last know head position' sector that is
 706 * maintained from IRQ contexts, both the normal and the resync IO
 707 * completion handlers update this position correctly. If there is no
 708 * perfect sequential match then we pick the disk whose head is closest.
 709 *
 710 * If there are 2 mirrors in the same 2 devices, performance degrades
 711 * because position is mirror, not device based.
 712 *
 713 * The rdev for the device selected will have nr_pending incremented.
 714 */
 715
 716/*
 717 * FIXME: possibly should rethink readbalancing and do it differently
 718 * depending on near_copies / far_copies geometry.
 719 */
 720static struct md_rdev *read_balance(struct r10conf *conf,
 721				    struct r10bio *r10_bio,
 722				    int *max_sectors)
 723{
 724	const sector_t this_sector = r10_bio->sector;
 725	int disk, slot;
 726	int sectors = r10_bio->sectors;
 727	int best_good_sectors;
 728	sector_t new_distance, best_dist;
 729	struct md_rdev *best_rdev, *rdev = NULL;
 730	int do_balance;
 731	int best_slot;
 732	struct geom *geo = &conf->geo;
 733
 734	raid10_find_phys(conf, r10_bio);
 735	rcu_read_lock();
 
 736	sectors = r10_bio->sectors;
 737	best_slot = -1;
 738	best_rdev = NULL;
 739	best_dist = MaxSector;
 740	best_good_sectors = 0;
 741	do_balance = 1;
 742	clear_bit(R10BIO_FailFast, &r10_bio->state);
 743	/*
 744	 * Check if we can balance. We can balance on the whole
 745	 * device if no resync is going on (recovery is ok), or below
 746	 * the resync window. We take the first readable disk when
 747	 * above the resync window.
 748	 */
 749	if (conf->mddev->recovery_cp < MaxSector
 750	    && (this_sector + sectors >= conf->next_resync))
 751		do_balance = 0;
 752
 753	for (slot = 0; slot < conf->copies ; slot++) {
 754		sector_t first_bad;
 755		int bad_sectors;
 756		sector_t dev_sector;
 757
 758		if (r10_bio->devs[slot].bio == IO_BLOCKED)
 759			continue;
 760		disk = r10_bio->devs[slot].devnum;
 761		rdev = rcu_dereference(conf->mirrors[disk].replacement);
 762		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
 763		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 764			rdev = rcu_dereference(conf->mirrors[disk].rdev);
 765		if (rdev == NULL ||
 766		    test_bit(Faulty, &rdev->flags))
 767			continue;
 768		if (!test_bit(In_sync, &rdev->flags) &&
 769		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 770			continue;
 771
 772		dev_sector = r10_bio->devs[slot].addr;
 773		if (is_badblock(rdev, dev_sector, sectors,
 774				&first_bad, &bad_sectors)) {
 775			if (best_dist < MaxSector)
 776				/* Already have a better slot */
 777				continue;
 778			if (first_bad <= dev_sector) {
 779				/* Cannot read here.  If this is the
 780				 * 'primary' device, then we must not read
 781				 * beyond 'bad_sectors' from another device.
 782				 */
 783				bad_sectors -= (dev_sector - first_bad);
 784				if (!do_balance && sectors > bad_sectors)
 785					sectors = bad_sectors;
 786				if (best_good_sectors > sectors)
 787					best_good_sectors = sectors;
 788			} else {
 789				sector_t good_sectors =
 790					first_bad - dev_sector;
 791				if (good_sectors > best_good_sectors) {
 792					best_good_sectors = good_sectors;
 793					best_slot = slot;
 794					best_rdev = rdev;
 795				}
 796				if (!do_balance)
 797					/* Must read from here */
 798					break;
 799			}
 800			continue;
 801		} else
 802			best_good_sectors = sectors;
 803
 804		if (!do_balance)
 805			break;
 806
 807		if (best_slot >= 0)
 808			/* At least 2 disks to choose from so failfast is OK */
 809			set_bit(R10BIO_FailFast, &r10_bio->state);
 810		/* This optimisation is debatable, and completely destroys
 811		 * sequential read speed for 'far copies' arrays.  So only
 812		 * keep it for 'near' arrays, and review those later.
 813		 */
 814		if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
 815			new_distance = 0;
 816
 817		/* for far > 1 always use the lowest address */
 818		else if (geo->far_copies > 1)
 819			new_distance = r10_bio->devs[slot].addr;
 820		else
 821			new_distance = abs(r10_bio->devs[slot].addr -
 822					   conf->mirrors[disk].head_position);
 823		if (new_distance < best_dist) {
 824			best_dist = new_distance;
 825			best_slot = slot;
 826			best_rdev = rdev;
 827		}
 828	}
 829	if (slot >= conf->copies) {
 830		slot = best_slot;
 831		rdev = best_rdev;
 832	}
 833
 834	if (slot >= 0) {
 
 
 
 
 835		atomic_inc(&rdev->nr_pending);
 
 
 
 
 
 
 
 836		r10_bio->read_slot = slot;
 837	} else
 838		rdev = NULL;
 839	rcu_read_unlock();
 840	*max_sectors = best_good_sectors;
 841
 842	return rdev;
 843}
 844
 845static int raid10_congested(struct mddev *mddev, int bits)
 846{
 847	struct r10conf *conf = mddev->private;
 
 848	int i, ret = 0;
 849
 850	if ((bits & (1 << WB_async_congested)) &&
 851	    conf->pending_count >= max_queued_requests)
 852		return 1;
 853
 854	rcu_read_lock();
 855	for (i = 0;
 856	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
 857		     && ret == 0;
 858	     i++) {
 859		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 860		if (rdev && !test_bit(Faulty, &rdev->flags)) {
 861			struct request_queue *q = bdev_get_queue(rdev->bdev);
 862
 863			ret |= bdi_congested(&q->backing_dev_info, bits);
 864		}
 865	}
 866	rcu_read_unlock();
 867	return ret;
 868}
 869
 870static void flush_pending_writes(struct r10conf *conf)
 871{
 872	/* Any writes that have been queued but are awaiting
 873	 * bitmap updates get flushed here.
 874	 */
 875	spin_lock_irq(&conf->device_lock);
 876
 877	if (conf->pending_bio_list.head) {
 878		struct bio *bio;
 879		bio = bio_list_get(&conf->pending_bio_list);
 880		conf->pending_count = 0;
 881		spin_unlock_irq(&conf->device_lock);
 882		/* flush any pending bitmap writes to disk
 883		 * before proceeding w/ I/O */
 884		bitmap_unplug(conf->mddev->bitmap);
 885		wake_up(&conf->wait_barrier);
 886
 887		while (bio) { /* submit pending writes */
 888			struct bio *next = bio->bi_next;
 889			struct md_rdev *rdev = (void*)bio->bi_bdev;
 890			bio->bi_next = NULL;
 891			bio->bi_bdev = rdev->bdev;
 892			if (test_bit(Faulty, &rdev->flags)) {
 893				bio->bi_error = -EIO;
 894				bio_endio(bio);
 895			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
 896					    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 897				/* Just ignore it */
 898				bio_endio(bio);
 899			else
 900				generic_make_request(bio);
 901			bio = next;
 902		}
 903	} else
 904		spin_unlock_irq(&conf->device_lock);
 905}
 906
 907/* Barriers....
 908 * Sometimes we need to suspend IO while we do something else,
 909 * either some resync/recovery, or reconfigure the array.
 910 * To do this we raise a 'barrier'.
 911 * The 'barrier' is a counter that can be raised multiple times
 912 * to count how many activities are happening which preclude
 913 * normal IO.
 914 * We can only raise the barrier if there is no pending IO.
 915 * i.e. if nr_pending == 0.
 916 * We choose only to raise the barrier if no-one is waiting for the
 917 * barrier to go down.  This means that as soon as an IO request
 918 * is ready, no other operations which require a barrier will start
 919 * until the IO request has had a chance.
 920 *
 921 * So: regular IO calls 'wait_barrier'.  When that returns there
 922 *    is no backgroup IO happening,  It must arrange to call
 923 *    allow_barrier when it has finished its IO.
 924 * backgroup IO calls must call raise_barrier.  Once that returns
 925 *    there is no normal IO happeing.  It must arrange to call
 926 *    lower_barrier when the particular background IO completes.
 927 */
 928
 929static void raise_barrier(struct r10conf *conf, int force)
 930{
 931	BUG_ON(force && !conf->barrier);
 932	spin_lock_irq(&conf->resync_lock);
 933
 934	/* Wait until no block IO is waiting (unless 'force') */
 935	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
 936			    conf->resync_lock);
 937
 938	/* block any new IO from starting */
 939	conf->barrier++;
 940
 941	/* Now wait for all pending IO to complete */
 942	wait_event_lock_irq(conf->wait_barrier,
 943			    !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
 944			    conf->resync_lock);
 945
 946	spin_unlock_irq(&conf->resync_lock);
 947}
 948
 949static void lower_barrier(struct r10conf *conf)
 950{
 951	unsigned long flags;
 952	spin_lock_irqsave(&conf->resync_lock, flags);
 953	conf->barrier--;
 954	spin_unlock_irqrestore(&conf->resync_lock, flags);
 955	wake_up(&conf->wait_barrier);
 956}
 957
 958static void wait_barrier(struct r10conf *conf)
 959{
 960	spin_lock_irq(&conf->resync_lock);
 961	if (conf->barrier) {
 962		conf->nr_waiting++;
 963		/* Wait for the barrier to drop.
 964		 * However if there are already pending
 965		 * requests (preventing the barrier from
 966		 * rising completely), and the
 967		 * pre-process bio queue isn't empty,
 968		 * then don't wait, as we need to empty
 969		 * that queue to get the nr_pending
 970		 * count down.
 971		 */
 972		raid10_log(conf->mddev, "wait barrier");
 973		wait_event_lock_irq(conf->wait_barrier,
 974				    !conf->barrier ||
 975				    (atomic_read(&conf->nr_pending) &&
 976				     current->bio_list &&
 977				     (!bio_list_empty(&current->bio_list[0]) ||
 978				      !bio_list_empty(&current->bio_list[1]))),
 979				    conf->resync_lock);
 980		conf->nr_waiting--;
 981		if (!conf->nr_waiting)
 982			wake_up(&conf->wait_barrier);
 983	}
 984	atomic_inc(&conf->nr_pending);
 985	spin_unlock_irq(&conf->resync_lock);
 986}
 987
 988static void allow_barrier(struct r10conf *conf)
 989{
 990	if ((atomic_dec_and_test(&conf->nr_pending)) ||
 991			(conf->array_freeze_pending))
 992		wake_up(&conf->wait_barrier);
 
 
 993}
 994
 995static void freeze_array(struct r10conf *conf, int extra)
 996{
 997	/* stop syncio and normal IO and wait for everything to
 998	 * go quiet.
 999	 * We increment barrier and nr_waiting, and then
1000	 * wait until nr_pending match nr_queued+extra
1001	 * This is called in the context of one normal IO request
1002	 * that has failed. Thus any sync request that might be pending
1003	 * will be blocked by nr_pending, and we need to wait for
1004	 * pending IO requests to complete or be queued for re-try.
1005	 * Thus the number queued (nr_queued) plus this request (extra)
1006	 * must match the number of pending IOs (nr_pending) before
1007	 * we continue.
1008	 */
1009	spin_lock_irq(&conf->resync_lock);
1010	conf->array_freeze_pending++;
1011	conf->barrier++;
1012	conf->nr_waiting++;
1013	wait_event_lock_irq_cmd(conf->wait_barrier,
1014				atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
1015				conf->resync_lock,
1016				flush_pending_writes(conf));
1017
1018	conf->array_freeze_pending--;
1019	spin_unlock_irq(&conf->resync_lock);
1020}
1021
1022static void unfreeze_array(struct r10conf *conf)
1023{
1024	/* reverse the effect of the freeze */
1025	spin_lock_irq(&conf->resync_lock);
1026	conf->barrier--;
1027	conf->nr_waiting--;
1028	wake_up(&conf->wait_barrier);
1029	spin_unlock_irq(&conf->resync_lock);
1030}
1031
1032static sector_t choose_data_offset(struct r10bio *r10_bio,
1033				   struct md_rdev *rdev)
1034{
1035	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1036	    test_bit(R10BIO_Previous, &r10_bio->state))
1037		return rdev->data_offset;
1038	else
1039		return rdev->new_data_offset;
1040}
 
 
 
 
 
 
 
 
 
 
 
 
 
1041
1042struct raid10_plug_cb {
1043	struct blk_plug_cb	cb;
1044	struct bio_list		pending;
1045	int			pending_cnt;
1046};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047
1048static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1049{
1050	struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1051						   cb);
1052	struct mddev *mddev = plug->cb.data;
1053	struct r10conf *conf = mddev->private;
1054	struct bio *bio;
1055
1056	if (from_schedule || current->bio_list) {
1057		spin_lock_irq(&conf->device_lock);
1058		bio_list_merge(&conf->pending_bio_list, &plug->pending);
1059		conf->pending_count += plug->pending_cnt;
1060		spin_unlock_irq(&conf->device_lock);
1061		wake_up(&conf->wait_barrier);
1062		md_wakeup_thread(mddev->thread);
1063		kfree(plug);
1064		return;
1065	}
1066
1067	/* we aren't scheduling, so we can do the write-out directly. */
1068	bio = bio_list_get(&plug->pending);
1069	bitmap_unplug(mddev->bitmap);
1070	wake_up(&conf->wait_barrier);
 
 
1071
1072	while (bio) { /* submit pending writes */
1073		struct bio *next = bio->bi_next;
1074		struct md_rdev *rdev = (void*)bio->bi_bdev;
1075		bio->bi_next = NULL;
1076		bio->bi_bdev = rdev->bdev;
1077		if (test_bit(Faulty, &rdev->flags)) {
1078			bio->bi_error = -EIO;
1079			bio_endio(bio);
1080		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
1081				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1082			/* Just ignore it */
1083			bio_endio(bio);
1084		else
1085			generic_make_request(bio);
1086		bio = next;
1087	}
1088	kfree(plug);
1089}
1090
1091static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1092				struct r10bio *r10_bio)
1093{
1094	struct r10conf *conf = mddev->private;
1095	struct bio *read_bio;
1096	const int op = bio_op(bio);
1097	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1098	int sectors_handled;
1099	int max_sectors;
1100	sector_t sectors;
1101	struct md_rdev *rdev;
1102	int slot;
1103
1104	/*
1105	 * Register the new request and wait if the reconstruction
1106	 * thread has put up a bar for new requests.
1107	 * Continue immediately if no resync is active currently.
1108	 */
1109	wait_barrier(conf);
1110
1111	sectors = bio_sectors(bio);
1112	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1113	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1114	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1115		/*
1116		 * IO spans the reshape position.  Need to wait for reshape to
1117		 * pass
1118		 */
1119		raid10_log(conf->mddev, "wait reshape");
1120		allow_barrier(conf);
1121		wait_event(conf->wait_barrier,
1122			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1123			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1124			   sectors);
1125		wait_barrier(conf);
1126	}
1127
1128read_again:
1129	rdev = read_balance(conf, r10_bio, &max_sectors);
1130	if (!rdev) {
1131		raid_end_bio_io(r10_bio);
1132		return;
1133	}
1134	slot = r10_bio->read_slot;
 
 
1135
1136	read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1137	bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1138		 max_sectors);
1139
1140	r10_bio->devs[slot].bio = read_bio;
1141	r10_bio->devs[slot].rdev = rdev;
1142
1143	read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1144		choose_data_offset(r10_bio, rdev);
1145	read_bio->bi_bdev = rdev->bdev;
1146	read_bio->bi_end_io = raid10_end_read_request;
1147	bio_set_op_attrs(read_bio, op, do_sync);
1148	if (test_bit(FailFast, &rdev->flags) &&
1149	    test_bit(R10BIO_FailFast, &r10_bio->state))
1150	        read_bio->bi_opf |= MD_FAILFAST;
1151	read_bio->bi_private = r10_bio;
1152
1153	if (mddev->gendisk)
1154	        trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1155	                              read_bio, disk_devt(mddev->gendisk),
1156	                              r10_bio->sector);
1157	if (max_sectors < r10_bio->sectors) {
1158		/*
1159		 * Could not read all from this device, so we will need another
1160		 * r10_bio.
1161		 */
1162		sectors_handled = (r10_bio->sector + max_sectors
1163				   - bio->bi_iter.bi_sector);
1164		r10_bio->sectors = max_sectors;
1165		spin_lock_irq(&conf->device_lock);
1166		if (bio->bi_phys_segments == 0)
1167			bio->bi_phys_segments = 2;
1168		else
1169			bio->bi_phys_segments++;
1170		spin_unlock_irq(&conf->device_lock);
1171		/*
1172		 * Cannot call generic_make_request directly as that will be
1173		 * queued in __generic_make_request and subsequent
1174		 * mempool_alloc might block waiting for it.  so hand bio over
1175		 * to raid10d.
1176		 */
1177		reschedule_retry(r10_bio);
 
1178
1179		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
 
 
 
 
 
 
 
1180
1181		r10_bio->master_bio = bio;
1182		r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1183		r10_bio->state = 0;
1184		r10_bio->mddev = mddev;
1185		r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1186		goto read_again;
1187	} else
1188		generic_make_request(read_bio);
1189	return;
1190}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1191
1192static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1193				 struct r10bio *r10_bio)
1194{
1195	struct r10conf *conf = mddev->private;
1196	int i;
1197	const int op = bio_op(bio);
1198	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1199	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1200	unsigned long flags;
1201	struct md_rdev *blocked_rdev;
1202	struct blk_plug_cb *cb;
1203	struct raid10_plug_cb *plug = NULL;
1204	sector_t sectors;
1205	int sectors_handled;
1206	int max_sectors;
1207
1208	md_write_start(mddev, bio);
 
 
 
 
 
 
 
 
 
 
1209
1210	/*
1211	 * Register the new request and wait if the reconstruction
1212	 * thread has put up a bar for new requests.
1213	 * Continue immediately if no resync is active currently.
1214	 */
1215	wait_barrier(conf);
1216
1217	sectors = bio_sectors(bio);
1218	while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1219	    bio->bi_iter.bi_sector < conf->reshape_progress &&
1220	    bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1221		/*
1222		 * IO spans the reshape position.  Need to wait for reshape to
1223		 * pass
1224		 */
1225		raid10_log(conf->mddev, "wait reshape");
1226		allow_barrier(conf);
1227		wait_event(conf->wait_barrier,
1228			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
1229			   conf->reshape_progress >= bio->bi_iter.bi_sector +
1230			   sectors);
1231		wait_barrier(conf);
1232	}
1233
1234	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1235	    (mddev->reshape_backwards
1236	     ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1237		bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1238	     : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1239		bio->bi_iter.bi_sector < conf->reshape_progress))) {
1240		/* Need to update reshape_position in metadata */
1241		mddev->reshape_position = conf->reshape_progress;
1242		set_mask_bits(&mddev->sb_flags, 0,
1243			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1244		md_wakeup_thread(mddev->thread);
1245		raid10_log(conf->mddev, "wait reshape metadata");
1246		wait_event(mddev->sb_wait,
1247			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1248
1249		conf->reshape_safe = mddev->reshape_position;
1250	}
1251
1252	if (conf->pending_count >= max_queued_requests) {
1253		md_wakeup_thread(mddev->thread);
1254		raid10_log(mddev, "wait queued");
1255		wait_event(conf->wait_barrier,
1256			   conf->pending_count < max_queued_requests);
1257	}
1258	/* first select target devices under rcu_lock and
1259	 * inc refcount on their rdev.  Record them by setting
1260	 * bios[x] to bio
1261	 * If there are known/acknowledged bad blocks on any device
1262	 * on which we have seen a write error, we want to avoid
1263	 * writing to those blocks.  This potentially requires several
1264	 * writes to write around the bad blocks.  Each set of writes
1265	 * gets its own r10_bio with a set of bios attached.  The number
1266	 * of r10_bios is recored in bio->bi_phys_segments just as with
1267	 * the read case.
1268	 */
 
1269
1270	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1271	raid10_find_phys(conf, r10_bio);
1272retry_write:
1273	blocked_rdev = NULL;
1274	rcu_read_lock();
1275	max_sectors = r10_bio->sectors;
1276
1277	for (i = 0;  i < conf->copies; i++) {
1278		int d = r10_bio->devs[i].devnum;
1279		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1280		struct md_rdev *rrdev = rcu_dereference(
1281			conf->mirrors[d].replacement);
1282		if (rdev == rrdev)
1283			rrdev = NULL;
1284		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1285			atomic_inc(&rdev->nr_pending);
1286			blocked_rdev = rdev;
1287			break;
1288		}
1289		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1290			atomic_inc(&rrdev->nr_pending);
1291			blocked_rdev = rrdev;
1292			break;
1293		}
1294		if (rdev && (test_bit(Faulty, &rdev->flags)))
1295			rdev = NULL;
1296		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
1297			rrdev = NULL;
1298
1299		r10_bio->devs[i].bio = NULL;
1300		r10_bio->devs[i].repl_bio = NULL;
1301
1302		if (!rdev && !rrdev) {
1303			set_bit(R10BIO_Degraded, &r10_bio->state);
1304			continue;
1305		}
1306		if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1307			sector_t first_bad;
1308			sector_t dev_sector = r10_bio->devs[i].addr;
1309			int bad_sectors;
1310			int is_bad;
1311
1312			is_bad = is_badblock(rdev, dev_sector, max_sectors,
 
1313					     &first_bad, &bad_sectors);
1314			if (is_bad < 0) {
1315				/* Mustn't write here until the bad block
1316				 * is acknowledged
1317				 */
1318				atomic_inc(&rdev->nr_pending);
1319				set_bit(BlockedBadBlocks, &rdev->flags);
1320				blocked_rdev = rdev;
1321				break;
1322			}
1323			if (is_bad && first_bad <= dev_sector) {
1324				/* Cannot write here at all */
1325				bad_sectors -= (dev_sector - first_bad);
1326				if (bad_sectors < max_sectors)
1327					/* Mustn't write more than bad_sectors
1328					 * to other devices yet
1329					 */
1330					max_sectors = bad_sectors;
1331				/* We don't set R10BIO_Degraded as that
1332				 * only applies if the disk is missing,
1333				 * so it might be re-added, and we want to
1334				 * know to recover this chunk.
1335				 * In this case the device is here, and the
1336				 * fact that this chunk is not in-sync is
1337				 * recorded in the bad block log.
1338				 */
1339				continue;
1340			}
1341			if (is_bad) {
1342				int good_sectors = first_bad - dev_sector;
1343				if (good_sectors < max_sectors)
1344					max_sectors = good_sectors;
1345			}
1346		}
1347		if (rdev) {
1348			r10_bio->devs[i].bio = bio;
1349			atomic_inc(&rdev->nr_pending);
1350		}
1351		if (rrdev) {
1352			r10_bio->devs[i].repl_bio = bio;
1353			atomic_inc(&rrdev->nr_pending);
1354		}
1355	}
1356	rcu_read_unlock();
1357
1358	if (unlikely(blocked_rdev)) {
1359		/* Have to wait for this device to get unblocked, then retry */
1360		int j;
1361		int d;
1362
1363		for (j = 0; j < i; j++) {
1364			if (r10_bio->devs[j].bio) {
1365				d = r10_bio->devs[j].devnum;
1366				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1367			}
1368			if (r10_bio->devs[j].repl_bio) {
1369				struct md_rdev *rdev;
1370				d = r10_bio->devs[j].devnum;
1371				rdev = conf->mirrors[d].replacement;
1372				if (!rdev) {
1373					/* Race with remove_disk */
1374					smp_mb();
1375					rdev = conf->mirrors[d].rdev;
1376				}
1377				rdev_dec_pending(rdev, mddev);
1378			}
1379		}
1380		allow_barrier(conf);
1381		raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1382		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1383		wait_barrier(conf);
1384		goto retry_write;
1385	}
1386
1387	if (max_sectors < r10_bio->sectors) {
1388		/* We are splitting this into multiple parts, so
1389		 * we need to prepare for allocating another r10_bio.
1390		 */
1391		r10_bio->sectors = max_sectors;
1392		spin_lock_irq(&conf->device_lock);
1393		if (bio->bi_phys_segments == 0)
1394			bio->bi_phys_segments = 2;
1395		else
1396			bio->bi_phys_segments++;
1397		spin_unlock_irq(&conf->device_lock);
1398	}
1399	sectors_handled = r10_bio->sector + max_sectors -
1400		bio->bi_iter.bi_sector;
1401
1402	atomic_set(&r10_bio->remaining, 1);
1403	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1404
1405	for (i = 0; i < conf->copies; i++) {
1406		struct bio *mbio;
1407		int d = r10_bio->devs[i].devnum;
1408		if (r10_bio->devs[i].bio) {
1409			struct md_rdev *rdev = conf->mirrors[d].rdev;
1410			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1411			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1412				 max_sectors);
1413			r10_bio->devs[i].bio = mbio;
1414
1415			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr+
1416					   choose_data_offset(r10_bio, rdev));
1417			mbio->bi_bdev = rdev->bdev;
1418			mbio->bi_end_io	= raid10_end_write_request;
1419			bio_set_op_attrs(mbio, op, do_sync | do_fua);
1420			if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
1421			    enough(conf, d))
1422				mbio->bi_opf |= MD_FAILFAST;
1423			mbio->bi_private = r10_bio;
1424
1425			if (conf->mddev->gendisk)
1426				trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1427						      mbio, disk_devt(conf->mddev->gendisk),
1428						      r10_bio->sector);
1429			/* flush_pending_writes() needs access to the rdev so...*/
1430			mbio->bi_bdev = (void*)rdev;
1431
1432			atomic_inc(&r10_bio->remaining);
 
 
 
 
 
 
 
 
 
 
1433
1434			cb = blk_check_plugged(raid10_unplug, mddev,
1435					       sizeof(*plug));
1436			if (cb)
1437				plug = container_of(cb, struct raid10_plug_cb,
1438						    cb);
1439			else
1440				plug = NULL;
1441			spin_lock_irqsave(&conf->device_lock, flags);
1442			if (plug) {
1443				bio_list_add(&plug->pending, mbio);
1444				plug->pending_cnt++;
1445			} else {
1446				bio_list_add(&conf->pending_bio_list, mbio);
1447				conf->pending_count++;
1448			}
1449			spin_unlock_irqrestore(&conf->device_lock, flags);
1450			if (!plug)
1451				md_wakeup_thread(mddev->thread);
1452		}
1453
1454		if (r10_bio->devs[i].repl_bio) {
1455			struct md_rdev *rdev = conf->mirrors[d].replacement;
1456			if (rdev == NULL) {
1457				/* Replacement just got moved to main 'rdev' */
1458				smp_mb();
1459				rdev = conf->mirrors[d].rdev;
1460			}
1461			mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1462			bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1463				 max_sectors);
1464			r10_bio->devs[i].repl_bio = mbio;
1465
1466			mbio->bi_iter.bi_sector	= (r10_bio->devs[i].addr +
1467					   choose_data_offset(r10_bio, rdev));
1468			mbio->bi_bdev = rdev->bdev;
1469			mbio->bi_end_io	= raid10_end_write_request;
1470			bio_set_op_attrs(mbio, op, do_sync | do_fua);
1471			mbio->bi_private = r10_bio;
1472
1473			if (conf->mddev->gendisk)
1474				trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
1475						      mbio, disk_devt(conf->mddev->gendisk),
1476						      r10_bio->sector);
1477			/* flush_pending_writes() needs access to the rdev so...*/
1478			mbio->bi_bdev = (void*)rdev;
1479
1480			atomic_inc(&r10_bio->remaining);
1481			spin_lock_irqsave(&conf->device_lock, flags);
1482			bio_list_add(&conf->pending_bio_list, mbio);
1483			conf->pending_count++;
1484			spin_unlock_irqrestore(&conf->device_lock, flags);
1485			if (!mddev_check_plugged(mddev))
1486				md_wakeup_thread(mddev->thread);
1487		}
1488	}
1489
1490	/* Don't remove the bias on 'remaining' (one_write_done) until
1491	 * after checking if we need to go around again.
1492	 */
1493
1494	if (sectors_handled < bio_sectors(bio)) {
1495		one_write_done(r10_bio);
1496		/* We need another r10_bio.  It has already been counted
1497		 * in bio->bi_phys_segments.
1498		 */
1499		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1500
1501		r10_bio->master_bio = bio;
1502		r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1503
1504		r10_bio->mddev = mddev;
1505		r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1506		r10_bio->state = 0;
1507		goto retry_write;
1508	}
1509	one_write_done(r10_bio);
1510}
1511
1512static void __make_request(struct mddev *mddev, struct bio *bio)
1513{
1514	struct r10conf *conf = mddev->private;
1515	struct r10bio *r10_bio;
1516
1517	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1518
1519	r10_bio->master_bio = bio;
1520	r10_bio->sectors = bio_sectors(bio);
1521
1522	r10_bio->mddev = mddev;
1523	r10_bio->sector = bio->bi_iter.bi_sector;
1524	r10_bio->state = 0;
1525
1526	/*
1527	 * We might need to issue multiple reads to different devices if there
1528	 * are bad blocks around, so we keep track of the number of reads in
1529	 * bio->bi_phys_segments.  If this is 0, there is only one r10_bio and
1530	 * no locking will be needed when the request completes.  If it is
1531	 * non-zero, then it is the number of not-completed requests.
1532	 */
1533	bio->bi_phys_segments = 0;
1534	bio_clear_flag(bio, BIO_SEG_VALID);
1535
1536	if (bio_data_dir(bio) == READ)
1537		raid10_read_request(mddev, bio, r10_bio);
1538	else
1539		raid10_write_request(mddev, bio, r10_bio);
1540}
1541
1542static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1543{
1544	struct r10conf *conf = mddev->private;
1545	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1546	int chunk_sects = chunk_mask + 1;
1547
1548	struct bio *split;
1549
1550	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1551		md_flush_request(mddev, bio);
1552		return;
1553	}
1554
1555	do {
1556
1557		/*
1558		 * If this request crosses a chunk boundary, we need to split
1559		 * it.
1560		 */
1561		if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1562			     bio_sectors(bio) > chunk_sects
1563			     && (conf->geo.near_copies < conf->geo.raid_disks
1564				 || conf->prev.near_copies <
1565				 conf->prev.raid_disks))) {
1566			split = bio_split(bio, chunk_sects -
1567					  (bio->bi_iter.bi_sector &
1568					   (chunk_sects - 1)),
1569					  GFP_NOIO, fs_bio_set);
1570			bio_chain(split, bio);
1571		} else {
1572			split = bio;
1573		}
1574
1575		/*
1576		 * If a bio is splitted, the first part of bio will pass
1577		 * barrier but the bio is queued in current->bio_list (see
1578		 * generic_make_request). If there is a raise_barrier() called
1579		 * here, the second part of bio can't pass barrier. But since
1580		 * the first part bio isn't dispatched to underlaying disks
1581		 * yet, the barrier is never released, hence raise_barrier will
1582		 * alays wait. We have a deadlock.
1583		 * Note, this only happens in read path. For write path, the
1584		 * first part of bio is dispatched in a schedule() call
1585		 * (because of blk plug) or offloaded to raid10d.
1586		 * Quitting from the function immediately can change the bio
1587		 * order queued in bio_list and avoid the deadlock.
1588		 */
1589		__make_request(mddev, split);
1590		if (split != bio && bio_data_dir(bio) == READ) {
1591			generic_make_request(bio);
1592			break;
1593		}
1594	} while (split != bio);
1595
1596	/* In case raid10d snuck in to freeze_array */
1597	wake_up(&conf->wait_barrier);
 
 
 
 
1598}
1599
1600static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1601{
1602	struct r10conf *conf = mddev->private;
1603	int i;
1604
1605	if (conf->geo.near_copies < conf->geo.raid_disks)
1606		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1607	if (conf->geo.near_copies > 1)
1608		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1609	if (conf->geo.far_copies > 1) {
1610		if (conf->geo.far_offset)
1611			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1612		else
1613			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1614		if (conf->geo.far_set_size != conf->geo.raid_disks)
1615			seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1616	}
1617	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1618					conf->geo.raid_disks - mddev->degraded);
1619	rcu_read_lock();
1620	for (i = 0; i < conf->geo.raid_disks; i++) {
1621		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1622		seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1623	}
1624	rcu_read_unlock();
1625	seq_printf(seq, "]");
1626}
1627
1628/* check if there are enough drives for
1629 * every block to appear on atleast one.
1630 * Don't consider the device numbered 'ignore'
1631 * as we might be about to remove it.
1632 */
1633static int _enough(struct r10conf *conf, int previous, int ignore)
1634{
1635	int first = 0;
1636	int has_enough = 0;
1637	int disks, ncopies;
1638	if (previous) {
1639		disks = conf->prev.raid_disks;
1640		ncopies = conf->prev.near_copies;
1641	} else {
1642		disks = conf->geo.raid_disks;
1643		ncopies = conf->geo.near_copies;
1644	}
1645
1646	rcu_read_lock();
1647	do {
1648		int n = conf->copies;
1649		int cnt = 0;
1650		int this = first;
1651		while (n--) {
1652			struct md_rdev *rdev;
1653			if (this != ignore &&
1654			    (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1655			    test_bit(In_sync, &rdev->flags))
1656				cnt++;
1657			this = (this+1) % disks;
1658		}
1659		if (cnt == 0)
1660			goto out;
1661		first = (first + ncopies) % disks;
1662	} while (first != 0);
1663	has_enough = 1;
1664out:
1665	rcu_read_unlock();
1666	return has_enough;
1667}
1668
1669static int enough(struct r10conf *conf, int ignore)
1670{
1671	/* when calling 'enough', both 'prev' and 'geo' must
1672	 * be stable.
1673	 * This is ensured if ->reconfig_mutex or ->device_lock
1674	 * is held.
1675	 */
1676	return _enough(conf, 0, ignore) &&
1677		_enough(conf, 1, ignore);
1678}
1679
1680static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1681{
1682	char b[BDEVNAME_SIZE];
1683	struct r10conf *conf = mddev->private;
1684	unsigned long flags;
1685
1686	/*
1687	 * If it is not operational, then we have already marked it as dead
1688	 * else if it is the last working disks, ignore the error, let the
1689	 * next level up know.
1690	 * else mark the drive as failed
1691	 */
1692	spin_lock_irqsave(&conf->device_lock, flags);
1693	if (test_bit(In_sync, &rdev->flags)
1694	    && !enough(conf, rdev->raid_disk)) {
1695		/*
1696		 * Don't fail the drive, just return an IO error.
1697		 */
 
 
 
 
 
1698		spin_unlock_irqrestore(&conf->device_lock, flags);
1699		return;
 
 
 
1700	}
1701	if (test_and_clear_bit(In_sync, &rdev->flags))
1702		mddev->degraded++;
1703	/*
1704	 * If recovery is running, make sure it aborts.
1705	 */
1706	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1707	set_bit(Blocked, &rdev->flags);
1708	set_bit(Faulty, &rdev->flags);
1709	set_mask_bits(&mddev->sb_flags, 0,
1710		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1711	spin_unlock_irqrestore(&conf->device_lock, flags);
1712	pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
1713		"md/raid10:%s: Operation continuing on %d devices.\n",
1714		mdname(mddev), bdevname(rdev->bdev, b),
1715		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1716}
1717
1718static void print_conf(struct r10conf *conf)
1719{
1720	int i;
1721	struct md_rdev *rdev;
1722
1723	pr_debug("RAID10 conf printout:\n");
1724	if (!conf) {
1725		pr_debug("(!conf)\n");
1726		return;
1727	}
1728	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1729		 conf->geo.raid_disks);
1730
1731	/* This is only called with ->reconfix_mutex held, so
1732	 * rcu protection of rdev is not needed */
1733	for (i = 0; i < conf->geo.raid_disks; i++) {
1734		char b[BDEVNAME_SIZE];
1735		rdev = conf->mirrors[i].rdev;
1736		if (rdev)
1737			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1738				 i, !test_bit(In_sync, &rdev->flags),
1739				 !test_bit(Faulty, &rdev->flags),
1740				 bdevname(rdev->bdev,b));
1741	}
1742}
1743
1744static void close_sync(struct r10conf *conf)
1745{
1746	wait_barrier(conf);
1747	allow_barrier(conf);
1748
1749	mempool_destroy(conf->r10buf_pool);
1750	conf->r10buf_pool = NULL;
1751}
1752
1753static int raid10_spare_active(struct mddev *mddev)
1754{
1755	int i;
1756	struct r10conf *conf = mddev->private;
1757	struct raid10_info *tmp;
1758	int count = 0;
1759	unsigned long flags;
1760
1761	/*
1762	 * Find all non-in_sync disks within the RAID10 configuration
1763	 * and mark them in_sync
1764	 */
1765	for (i = 0; i < conf->geo.raid_disks; i++) {
1766		tmp = conf->mirrors + i;
1767		if (tmp->replacement
1768		    && tmp->replacement->recovery_offset == MaxSector
1769		    && !test_bit(Faulty, &tmp->replacement->flags)
1770		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1771			/* Replacement has just become active */
1772			if (!tmp->rdev
1773			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1774				count++;
1775			if (tmp->rdev) {
1776				/* Replaced device not technically faulty,
1777				 * but we need to be sure it gets removed
1778				 * and never re-added.
1779				 */
1780				set_bit(Faulty, &tmp->rdev->flags);
1781				sysfs_notify_dirent_safe(
1782					tmp->rdev->sysfs_state);
1783			}
1784			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1785		} else if (tmp->rdev
1786			   && tmp->rdev->recovery_offset == MaxSector
1787			   && !test_bit(Faulty, &tmp->rdev->flags)
1788			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1789			count++;
1790			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1791		}
1792	}
1793	spin_lock_irqsave(&conf->device_lock, flags);
1794	mddev->degraded -= count;
1795	spin_unlock_irqrestore(&conf->device_lock, flags);
1796
1797	print_conf(conf);
1798	return count;
1799}
1800
1801static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 
1802{
1803	struct r10conf *conf = mddev->private;
1804	int err = -EEXIST;
1805	int mirror;
1806	int first = 0;
1807	int last = conf->geo.raid_disks - 1;
1808
1809	if (mddev->recovery_cp < MaxSector)
1810		/* only hot-add to in-sync arrays, as recovery is
1811		 * very different from resync
1812		 */
1813		return -EBUSY;
1814	if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1815		return -EINVAL;
1816
1817	if (md_integrity_add_rdev(rdev, mddev))
1818		return -ENXIO;
1819
1820	if (rdev->raid_disk >= 0)
1821		first = last = rdev->raid_disk;
1822
1823	if (rdev->saved_raid_disk >= first &&
1824	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1825		mirror = rdev->saved_raid_disk;
1826	else
1827		mirror = first;
1828	for ( ; mirror <= last ; mirror++) {
1829		struct raid10_info *p = &conf->mirrors[mirror];
1830		if (p->recovery_disabled == mddev->recovery_disabled)
1831			continue;
1832		if (p->rdev) {
1833			if (!test_bit(WantReplacement, &p->rdev->flags) ||
1834			    p->replacement != NULL)
1835				continue;
1836			clear_bit(In_sync, &rdev->flags);
1837			set_bit(Replacement, &rdev->flags);
1838			rdev->raid_disk = mirror;
1839			err = 0;
1840			if (mddev->gendisk)
1841				disk_stack_limits(mddev->gendisk, rdev->bdev,
1842						  rdev->data_offset << 9);
1843			conf->fullsync = 1;
1844			rcu_assign_pointer(p->replacement, rdev);
1845			break;
 
1846		}
1847
1848		if (mddev->gendisk)
1849			disk_stack_limits(mddev->gendisk, rdev->bdev,
1850					  rdev->data_offset << 9);
1851
1852		p->head_position = 0;
1853		p->recovery_disabled = mddev->recovery_disabled - 1;
1854		rdev->raid_disk = mirror;
1855		err = 0;
1856		if (rdev->saved_raid_disk != mirror)
1857			conf->fullsync = 1;
1858		rcu_assign_pointer(p->rdev, rdev);
1859		break;
1860	}
1861	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1862		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1863
 
1864	print_conf(conf);
1865	return err;
1866}
1867
1868static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1869{
1870	struct r10conf *conf = mddev->private;
1871	int err = 0;
1872	int number = rdev->raid_disk;
1873	struct md_rdev **rdevp;
1874	struct raid10_info *p = conf->mirrors + number;
1875
1876	print_conf(conf);
1877	if (rdev == p->rdev)
1878		rdevp = &p->rdev;
1879	else if (rdev == p->replacement)
1880		rdevp = &p->replacement;
1881	else
1882		return 0;
1883
1884	if (test_bit(In_sync, &rdev->flags) ||
1885	    atomic_read(&rdev->nr_pending)) {
1886		err = -EBUSY;
1887		goto abort;
1888	}
1889	/* Only remove non-faulty devices if recovery
1890	 * is not possible.
1891	 */
1892	if (!test_bit(Faulty, &rdev->flags) &&
1893	    mddev->recovery_disabled != p->recovery_disabled &&
1894	    (!p->replacement || p->replacement == rdev) &&
1895	    number < conf->geo.raid_disks &&
1896	    enough(conf, -1)) {
1897		err = -EBUSY;
1898		goto abort;
1899	}
1900	*rdevp = NULL;
1901	if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1902		synchronize_rcu();
1903		if (atomic_read(&rdev->nr_pending)) {
1904			/* lost the race, try later */
1905			err = -EBUSY;
1906			*rdevp = rdev;
1907			goto abort;
1908		}
 
1909	}
1910	if (p->replacement) {
1911		/* We must have just cleared 'rdev' */
1912		p->rdev = p->replacement;
1913		clear_bit(Replacement, &p->replacement->flags);
1914		smp_mb(); /* Make sure other CPUs may see both as identical
1915			   * but will never see neither -- if they are careful.
1916			   */
1917		p->replacement = NULL;
1918		clear_bit(WantReplacement, &rdev->flags);
1919	} else
1920		/* We might have just remove the Replacement as faulty
1921		 * Clear the flag just in case
1922		 */
1923		clear_bit(WantReplacement, &rdev->flags);
1924
1925	err = md_integrity_register(mddev);
1926
1927abort:
1928
1929	print_conf(conf);
1930	return err;
1931}
1932
1933static void end_sync_read(struct bio *bio)
 
1934{
1935	struct r10bio *r10_bio = bio->bi_private;
1936	struct r10conf *conf = r10_bio->mddev->private;
1937	int d;
1938
1939	if (bio == r10_bio->master_bio) {
1940		/* this is a reshape read */
1941		d = r10_bio->read_slot; /* really the read dev */
1942	} else
1943		d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1944
1945	if (!bio->bi_error)
1946		set_bit(R10BIO_Uptodate, &r10_bio->state);
1947	else
1948		/* The write handler will notice the lack of
1949		 * R10BIO_Uptodate and record any errors etc
1950		 */
1951		atomic_add(r10_bio->sectors,
1952			   &conf->mirrors[d].rdev->corrected_errors);
1953
1954	/* for reconstruct, we always reschedule after a read.
1955	 * for resync, only after all reads
1956	 */
1957	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1958	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1959	    atomic_dec_and_test(&r10_bio->remaining)) {
1960		/* we have read all the blocks,
1961		 * do the comparison in process context in raid10d
1962		 */
1963		reschedule_retry(r10_bio);
1964	}
1965}
1966
1967static void end_sync_request(struct r10bio *r10_bio)
1968{
1969	struct mddev *mddev = r10_bio->mddev;
1970
1971	while (atomic_dec_and_test(&r10_bio->remaining)) {
1972		if (r10_bio->master_bio == NULL) {
1973			/* the primary of several recovery bios */
1974			sector_t s = r10_bio->sectors;
1975			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1976			    test_bit(R10BIO_WriteError, &r10_bio->state))
1977				reschedule_retry(r10_bio);
1978			else
1979				put_buf(r10_bio);
1980			md_done_sync(mddev, s, 1);
1981			break;
1982		} else {
1983			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1984			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1985			    test_bit(R10BIO_WriteError, &r10_bio->state))
1986				reschedule_retry(r10_bio);
1987			else
1988				put_buf(r10_bio);
1989			r10_bio = r10_bio2;
1990		}
1991	}
1992}
1993
1994static void end_sync_write(struct bio *bio)
1995{
1996	struct r10bio *r10_bio = bio->bi_private;
1997	struct mddev *mddev = r10_bio->mddev;
1998	struct r10conf *conf = mddev->private;
 
1999	int d;
2000	sector_t first_bad;
2001	int bad_sectors;
2002	int slot;
2003	int repl;
2004	struct md_rdev *rdev = NULL;
2005
2006	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2007	if (repl)
2008		rdev = conf->mirrors[d].replacement;
2009	else
2010		rdev = conf->mirrors[d].rdev;
2011
2012	if (bio->bi_error) {
2013		if (repl)
2014			md_error(mddev, rdev);
2015		else {
2016			set_bit(WriteErrorSeen, &rdev->flags);
2017			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2018				set_bit(MD_RECOVERY_NEEDED,
2019					&rdev->mddev->recovery);
2020			set_bit(R10BIO_WriteError, &r10_bio->state);
2021		}
2022	} else if (is_badblock(rdev,
2023			     r10_bio->devs[slot].addr,
2024			     r10_bio->sectors,
2025			     &first_bad, &bad_sectors))
2026		set_bit(R10BIO_MadeGood, &r10_bio->state);
2027
2028	rdev_dec_pending(rdev, mddev);
2029
2030	end_sync_request(r10_bio);
2031}
2032
2033/*
2034 * Note: sync and recover and handled very differently for raid10
2035 * This code is for resync.
2036 * For resync, we read through virtual addresses and read all blocks.
2037 * If there is any error, we schedule a write.  The lowest numbered
2038 * drive is authoritative.
2039 * However requests come for physical address, so we need to map.
2040 * For every physical address there are raid_disks/copies virtual addresses,
2041 * which is always are least one, but is not necessarly an integer.
2042 * This means that a physical address can span multiple chunks, so we may
2043 * have to submit multiple io requests for a single sync request.
2044 */
2045/*
2046 * We check if all blocks are in-sync and only write to blocks that
2047 * aren't in sync
2048 */
2049static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2050{
2051	struct r10conf *conf = mddev->private;
2052	int i, first;
2053	struct bio *tbio, *fbio;
2054	int vcnt;
2055
2056	atomic_set(&r10_bio->remaining, 1);
2057
2058	/* find the first device with a block */
2059	for (i=0; i<conf->copies; i++)
2060		if (!r10_bio->devs[i].bio->bi_error)
2061			break;
2062
2063	if (i == conf->copies)
2064		goto done;
2065
2066	first = i;
2067	fbio = r10_bio->devs[i].bio;
2068	fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2069	fbio->bi_iter.bi_idx = 0;
2070
2071	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2072	/* now find blocks with errors */
2073	for (i=0 ; i < conf->copies ; i++) {
2074		int  j, d;
2075		struct md_rdev *rdev;
2076
2077		tbio = r10_bio->devs[i].bio;
2078
2079		if (tbio->bi_end_io != end_sync_read)
2080			continue;
2081		if (i == first)
2082			continue;
2083		d = r10_bio->devs[i].devnum;
2084		rdev = conf->mirrors[d].rdev;
2085		if (!r10_bio->devs[i].bio->bi_error) {
2086			/* We know that the bi_io_vec layout is the same for
2087			 * both 'first' and 'i', so we just compare them.
2088			 * All vec entries are PAGE_SIZE;
2089			 */
2090			int sectors = r10_bio->sectors;
2091			for (j = 0; j < vcnt; j++) {
2092				int len = PAGE_SIZE;
2093				if (sectors < (len / 512))
2094					len = sectors * 512;
2095				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2096					   page_address(tbio->bi_io_vec[j].bv_page),
2097					   len))
2098					break;
2099				sectors -= len/512;
2100			}
2101			if (j == vcnt)
2102				continue;
2103			atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2104			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2105				/* Don't fix anything. */
2106				continue;
2107		} else if (test_bit(FailFast, &rdev->flags)) {
2108			/* Just give up on this device */
2109			md_error(rdev->mddev, rdev);
2110			continue;
2111		}
2112		/* Ok, we need to write this bio, either to correct an
2113		 * inconsistency or to correct an unreadable block.
2114		 * First we need to fixup bv_offset, bv_len and
2115		 * bi_vecs, as the read request might have corrupted these
2116		 */
2117		bio_reset(tbio);
2118
2119		tbio->bi_vcnt = vcnt;
2120		tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
 
 
 
 
 
 
2121		tbio->bi_private = r10_bio;
2122		tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
 
 
 
 
 
 
 
 
 
2123		tbio->bi_end_io = end_sync_write;
2124		bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
2125
2126		bio_copy_data(tbio, fbio);
2127
 
2128		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2129		atomic_inc(&r10_bio->remaining);
2130		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2131
2132		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2133			tbio->bi_opf |= MD_FAILFAST;
2134		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2135		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2136		generic_make_request(tbio);
2137	}
2138
2139	/* Now write out to any replacement devices
2140	 * that are active
2141	 */
2142	for (i = 0; i < conf->copies; i++) {
2143		int d;
2144
2145		tbio = r10_bio->devs[i].repl_bio;
2146		if (!tbio || !tbio->bi_end_io)
2147			continue;
2148		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2149		    && r10_bio->devs[i].bio != fbio)
2150			bio_copy_data(tbio, fbio);
2151		d = r10_bio->devs[i].devnum;
2152		atomic_inc(&r10_bio->remaining);
2153		md_sync_acct(conf->mirrors[d].replacement->bdev,
2154			     bio_sectors(tbio));
2155		generic_make_request(tbio);
2156	}
2157
2158done:
2159	if (atomic_dec_and_test(&r10_bio->remaining)) {
2160		md_done_sync(mddev, r10_bio->sectors, 1);
2161		put_buf(r10_bio);
2162	}
2163}
2164
2165/*
2166 * Now for the recovery code.
2167 * Recovery happens across physical sectors.
2168 * We recover all non-is_sync drives by finding the virtual address of
2169 * each, and then choose a working drive that also has that virt address.
2170 * There is a separate r10_bio for each non-in_sync drive.
2171 * Only the first two slots are in use. The first for reading,
2172 * The second for writing.
2173 *
2174 */
2175static void fix_recovery_read_error(struct r10bio *r10_bio)
2176{
2177	/* We got a read error during recovery.
2178	 * We repeat the read in smaller page-sized sections.
2179	 * If a read succeeds, write it to the new device or record
2180	 * a bad block if we cannot.
2181	 * If a read fails, record a bad block on both old and
2182	 * new devices.
2183	 */
2184	struct mddev *mddev = r10_bio->mddev;
2185	struct r10conf *conf = mddev->private;
2186	struct bio *bio = r10_bio->devs[0].bio;
2187	sector_t sect = 0;
2188	int sectors = r10_bio->sectors;
2189	int idx = 0;
2190	int dr = r10_bio->devs[0].devnum;
2191	int dw = r10_bio->devs[1].devnum;
2192
2193	while (sectors) {
2194		int s = sectors;
2195		struct md_rdev *rdev;
2196		sector_t addr;
2197		int ok;
2198
2199		if (s > (PAGE_SIZE>>9))
2200			s = PAGE_SIZE >> 9;
2201
2202		rdev = conf->mirrors[dr].rdev;
2203		addr = r10_bio->devs[0].addr + sect,
2204		ok = sync_page_io(rdev,
2205				  addr,
2206				  s << 9,
2207				  bio->bi_io_vec[idx].bv_page,
2208				  REQ_OP_READ, 0, false);
2209		if (ok) {
2210			rdev = conf->mirrors[dw].rdev;
2211			addr = r10_bio->devs[1].addr + sect;
2212			ok = sync_page_io(rdev,
2213					  addr,
2214					  s << 9,
2215					  bio->bi_io_vec[idx].bv_page,
2216					  REQ_OP_WRITE, 0, false);
2217			if (!ok) {
2218				set_bit(WriteErrorSeen, &rdev->flags);
2219				if (!test_and_set_bit(WantReplacement,
2220						      &rdev->flags))
2221					set_bit(MD_RECOVERY_NEEDED,
2222						&rdev->mddev->recovery);
2223			}
2224		}
2225		if (!ok) {
2226			/* We don't worry if we cannot set a bad block -
2227			 * it really is bad so there is no loss in not
2228			 * recording it yet
2229			 */
2230			rdev_set_badblocks(rdev, addr, s, 0);
2231
2232			if (rdev != conf->mirrors[dw].rdev) {
2233				/* need bad block on destination too */
2234				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2235				addr = r10_bio->devs[1].addr + sect;
2236				ok = rdev_set_badblocks(rdev2, addr, s, 0);
2237				if (!ok) {
2238					/* just abort the recovery */
2239					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
2240						  mdname(mddev));
 
 
2241
2242					conf->mirrors[dw].recovery_disabled
2243						= mddev->recovery_disabled;
2244					set_bit(MD_RECOVERY_INTR,
2245						&mddev->recovery);
2246					break;
2247				}
2248			}
2249		}
2250
2251		sectors -= s;
2252		sect += s;
2253		idx++;
2254	}
2255}
2256
2257static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2258{
2259	struct r10conf *conf = mddev->private;
2260	int d;
2261	struct bio *wbio, *wbio2;
2262
2263	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2264		fix_recovery_read_error(r10_bio);
2265		end_sync_request(r10_bio);
2266		return;
2267	}
2268
2269	/*
2270	 * share the pages with the first bio
2271	 * and submit the write request
2272	 */
 
2273	d = r10_bio->devs[1].devnum;
2274	wbio = r10_bio->devs[1].bio;
2275	wbio2 = r10_bio->devs[1].repl_bio;
2276	/* Need to test wbio2->bi_end_io before we call
2277	 * generic_make_request as if the former is NULL,
2278	 * the latter is free to free wbio2.
2279	 */
2280	if (wbio2 && !wbio2->bi_end_io)
2281		wbio2 = NULL;
2282	if (wbio->bi_end_io) {
2283		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2284		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2285		generic_make_request(wbio);
2286	}
2287	if (wbio2) {
2288		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2289		md_sync_acct(conf->mirrors[d].replacement->bdev,
2290			     bio_sectors(wbio2));
2291		generic_make_request(wbio2);
2292	}
2293}
2294
 
2295/*
2296 * Used by fix_read_error() to decay the per rdev read_errors.
2297 * We halve the read error count for every hour that has elapsed
2298 * since the last recorded read error.
2299 *
2300 */
2301static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2302{
2303	long cur_time_mon;
2304	unsigned long hours_since_last;
2305	unsigned int read_errors = atomic_read(&rdev->read_errors);
2306
2307	cur_time_mon = ktime_get_seconds();
2308
2309	if (rdev->last_read_error == 0) {
 
2310		/* first time we've seen a read error */
2311		rdev->last_read_error = cur_time_mon;
2312		return;
2313	}
2314
2315	hours_since_last = (long)(cur_time_mon -
2316			    rdev->last_read_error) / 3600;
2317
2318	rdev->last_read_error = cur_time_mon;
2319
2320	/*
2321	 * if hours_since_last is > the number of bits in read_errors
2322	 * just set read errors to 0. We do this to avoid
2323	 * overflowing the shift of read_errors by hours_since_last.
2324	 */
2325	if (hours_since_last >= 8 * sizeof(read_errors))
2326		atomic_set(&rdev->read_errors, 0);
2327	else
2328		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2329}
2330
2331static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2332			    int sectors, struct page *page, int rw)
2333{
2334	sector_t first_bad;
2335	int bad_sectors;
2336
2337	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2338	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2339		return -1;
2340	if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
2341		/* success */
2342		return 1;
2343	if (rw == WRITE) {
2344		set_bit(WriteErrorSeen, &rdev->flags);
2345		if (!test_and_set_bit(WantReplacement, &rdev->flags))
2346			set_bit(MD_RECOVERY_NEEDED,
2347				&rdev->mddev->recovery);
2348	}
2349	/* need to record an error - either for the block or the device */
2350	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2351		md_error(rdev->mddev, rdev);
2352	return 0;
2353}
2354
2355/*
2356 * This is a kernel thread which:
2357 *
2358 *	1.	Retries failed read operations on working mirrors.
2359 *	2.	Updates the raid superblock when problems encounter.
2360 *	3.	Performs writes following reads for array synchronising.
2361 */
2362
2363static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2364{
2365	int sect = 0; /* Offset from r10_bio->sector */
2366	int sectors = r10_bio->sectors;
2367	struct md_rdev*rdev;
2368	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2369	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2370
2371	/* still own a reference to this rdev, so it cannot
2372	 * have been cleared recently.
2373	 */
2374	rdev = conf->mirrors[d].rdev;
2375
2376	if (test_bit(Faulty, &rdev->flags))
2377		/* drive has already been failed, just ignore any
2378		   more fix_read_error() attempts */
2379		return;
2380
2381	check_decay_read_errors(mddev, rdev);
2382	atomic_inc(&rdev->read_errors);
2383	if (atomic_read(&rdev->read_errors) > max_read_errors) {
2384		char b[BDEVNAME_SIZE];
2385		bdevname(rdev->bdev, b);
2386
2387		pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
2388			  mdname(mddev), b,
2389			  atomic_read(&rdev->read_errors), max_read_errors);
2390		pr_notice("md/raid10:%s: %s: Failing raid device\n",
2391			  mdname(mddev), b);
2392		md_error(mddev, rdev);
2393		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
 
 
2394		return;
2395	}
2396
2397	while(sectors) {
2398		int s = sectors;
2399		int sl = r10_bio->read_slot;
2400		int success = 0;
2401		int start;
2402
2403		if (s > (PAGE_SIZE>>9))
2404			s = PAGE_SIZE >> 9;
2405
2406		rcu_read_lock();
2407		do {
2408			sector_t first_bad;
2409			int bad_sectors;
2410
2411			d = r10_bio->devs[sl].devnum;
2412			rdev = rcu_dereference(conf->mirrors[d].rdev);
2413			if (rdev &&
2414			    test_bit(In_sync, &rdev->flags) &&
2415			    !test_bit(Faulty, &rdev->flags) &&
2416			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2417					&first_bad, &bad_sectors) == 0) {
2418				atomic_inc(&rdev->nr_pending);
2419				rcu_read_unlock();
2420				success = sync_page_io(rdev,
2421						       r10_bio->devs[sl].addr +
2422						       sect,
2423						       s<<9,
2424						       conf->tmppage,
2425						       REQ_OP_READ, 0, false);
2426				rdev_dec_pending(rdev, mddev);
2427				rcu_read_lock();
2428				if (success)
2429					break;
2430			}
2431			sl++;
2432			if (sl == conf->copies)
2433				sl = 0;
2434		} while (!success && sl != r10_bio->read_slot);
2435		rcu_read_unlock();
2436
2437		if (!success) {
2438			/* Cannot read from anywhere, just mark the block
2439			 * as bad on the first device to discourage future
2440			 * reads.
2441			 */
2442			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2443			rdev = conf->mirrors[dn].rdev;
2444
2445			if (!rdev_set_badblocks(
2446				    rdev,
2447				    r10_bio->devs[r10_bio->read_slot].addr
2448				    + sect,
2449				    s, 0)) {
2450				md_error(mddev, rdev);
2451				r10_bio->devs[r10_bio->read_slot].bio
2452					= IO_BLOCKED;
2453			}
2454			break;
2455		}
2456
2457		start = sl;
2458		/* write it back and re-read */
2459		rcu_read_lock();
2460		while (sl != r10_bio->read_slot) {
2461			char b[BDEVNAME_SIZE];
2462
2463			if (sl==0)
2464				sl = conf->copies;
2465			sl--;
2466			d = r10_bio->devs[sl].devnum;
2467			rdev = rcu_dereference(conf->mirrors[d].rdev);
2468			if (!rdev ||
2469			    test_bit(Faulty, &rdev->flags) ||
2470			    !test_bit(In_sync, &rdev->flags))
2471				continue;
2472
2473			atomic_inc(&rdev->nr_pending);
2474			rcu_read_unlock();
2475			if (r10_sync_page_io(rdev,
2476					     r10_bio->devs[sl].addr +
2477					     sect,
2478					     s, conf->tmppage, WRITE)
2479			    == 0) {
2480				/* Well, this device is dead */
2481				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
2482					  mdname(mddev), s,
2483					  (unsigned long long)(
2484						  sect +
2485						  choose_data_offset(r10_bio,
2486								     rdev)),
2487					  bdevname(rdev->bdev, b));
2488				pr_notice("md/raid10:%s: %s: failing drive\n",
2489					  mdname(mddev),
2490					  bdevname(rdev->bdev, b));
 
 
2491			}
2492			rdev_dec_pending(rdev, mddev);
2493			rcu_read_lock();
2494		}
2495		sl = start;
2496		while (sl != r10_bio->read_slot) {
2497			char b[BDEVNAME_SIZE];
2498
2499			if (sl==0)
2500				sl = conf->copies;
2501			sl--;
2502			d = r10_bio->devs[sl].devnum;
2503			rdev = rcu_dereference(conf->mirrors[d].rdev);
2504			if (!rdev ||
2505			    test_bit(Faulty, &rdev->flags) ||
2506			    !test_bit(In_sync, &rdev->flags))
2507				continue;
2508
2509			atomic_inc(&rdev->nr_pending);
2510			rcu_read_unlock();
2511			switch (r10_sync_page_io(rdev,
2512					     r10_bio->devs[sl].addr +
2513					     sect,
2514					     s, conf->tmppage,
2515						 READ)) {
2516			case 0:
2517				/* Well, this device is dead */
2518				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
 
 
 
2519				       mdname(mddev), s,
2520				       (unsigned long long)(
2521					       sect +
2522					       choose_data_offset(r10_bio, rdev)),
2523				       bdevname(rdev->bdev, b));
2524				pr_notice("md/raid10:%s: %s: failing drive\n",
 
2525				       mdname(mddev),
2526				       bdevname(rdev->bdev, b));
2527				break;
2528			case 1:
2529				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
 
 
2530				       mdname(mddev), s,
2531				       (unsigned long long)(
2532					       sect +
2533					       choose_data_offset(r10_bio, rdev)),
2534				       bdevname(rdev->bdev, b));
2535				atomic_add(s, &rdev->corrected_errors);
2536			}
2537
2538			rdev_dec_pending(rdev, mddev);
2539			rcu_read_lock();
2540		}
2541		rcu_read_unlock();
2542
2543		sectors -= s;
2544		sect += s;
2545	}
2546}
2547
2548static int narrow_write_error(struct r10bio *r10_bio, int i)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2549{
2550	struct bio *bio = r10_bio->master_bio;
2551	struct mddev *mddev = r10_bio->mddev;
2552	struct r10conf *conf = mddev->private;
2553	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2554	/* bio has the data to be written to slot 'i' where
2555	 * we just recently had a write error.
2556	 * We repeatedly clone the bio and trim down to one block,
2557	 * then try the write.  Where the write fails we record
2558	 * a bad block.
2559	 * It is conceivable that the bio doesn't exactly align with
2560	 * blocks.  We must handle this.
2561	 *
2562	 * We currently own a reference to the rdev.
2563	 */
2564
2565	int block_sectors;
2566	sector_t sector;
2567	int sectors;
2568	int sect_to_write = r10_bio->sectors;
2569	int ok = 1;
2570
2571	if (rdev->badblocks.shift < 0)
2572		return 0;
2573
2574	block_sectors = roundup(1 << rdev->badblocks.shift,
2575				bdev_logical_block_size(rdev->bdev) >> 9);
2576	sector = r10_bio->sector;
2577	sectors = ((r10_bio->sector + block_sectors)
2578		   & ~(sector_t)(block_sectors - 1))
2579		- sector;
2580
2581	while (sect_to_write) {
2582		struct bio *wbio;
2583		sector_t wsector;
2584		if (sectors > sect_to_write)
2585			sectors = sect_to_write;
2586		/* Write at 'sector' for 'sectors' */
2587		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2588		bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2589		wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2590		wbio->bi_iter.bi_sector = wsector +
2591				   choose_data_offset(r10_bio, rdev);
2592		wbio->bi_bdev = rdev->bdev;
2593		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2594
2595		if (submit_bio_wait(wbio) < 0)
2596			/* Failure! */
2597			ok = rdev_set_badblocks(rdev, wsector,
2598						sectors, 0)
2599				&& ok;
2600
2601		bio_put(wbio);
2602		sect_to_write -= sectors;
2603		sector += sectors;
2604		sectors = block_sectors;
2605	}
2606	return ok;
2607}
2608
2609static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2610{
2611	int slot = r10_bio->read_slot;
 
2612	struct bio *bio;
2613	struct r10conf *conf = mddev->private;
2614	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2615	char b[BDEVNAME_SIZE];
2616	unsigned long do_sync;
2617	int max_sectors;
2618	dev_t bio_dev;
2619	sector_t bio_last_sector;
2620
2621	/* we got a read error. Maybe the drive is bad.  Maybe just
2622	 * the block and we can fix it.
2623	 * We freeze all other IO, and try reading the block from
2624	 * other devices.  When we find one, we re-write
2625	 * and check it that fixes the read error.
2626	 * This is all done synchronously while the array is
2627	 * frozen.
2628	 */
2629	bio = r10_bio->devs[slot].bio;
2630	bdevname(bio->bi_bdev, b);
2631	bio_dev = bio->bi_bdev->bd_dev;
2632	bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
2633	bio_put(bio);
2634	r10_bio->devs[slot].bio = NULL;
2635
2636	if (mddev->ro)
2637		r10_bio->devs[slot].bio = IO_BLOCKED;
2638	else if (!test_bit(FailFast, &rdev->flags)) {
2639		freeze_array(conf, 1);
2640		fix_read_error(conf, mddev, r10_bio);
2641		unfreeze_array(conf);
2642	} else
2643		md_error(mddev, rdev);
2644
2645	rdev_dec_pending(rdev, mddev);
2646
 
 
 
 
2647read_more:
2648	rdev = read_balance(conf, r10_bio, &max_sectors);
2649	if (rdev == NULL) {
2650		pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
2651				    mdname(mddev), b,
2652				    (unsigned long long)r10_bio->sector);
 
2653		raid_end_bio_io(r10_bio);
 
2654		return;
2655	}
2656
2657	do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
 
 
2658	slot = r10_bio->read_slot;
2659	pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
2660			   mdname(mddev),
2661			   bdevname(rdev->bdev, b),
2662			   (unsigned long long)r10_bio->sector);
 
 
 
 
2663	bio = bio_clone_mddev(r10_bio->master_bio,
2664			      GFP_NOIO, mddev);
2665	bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
 
 
2666	r10_bio->devs[slot].bio = bio;
2667	r10_bio->devs[slot].rdev = rdev;
2668	bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2669		+ choose_data_offset(r10_bio, rdev);
2670	bio->bi_bdev = rdev->bdev;
2671	bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
2672	if (test_bit(FailFast, &rdev->flags) &&
2673	    test_bit(R10BIO_FailFast, &r10_bio->state))
2674		bio->bi_opf |= MD_FAILFAST;
2675	bio->bi_private = r10_bio;
2676	bio->bi_end_io = raid10_end_read_request;
2677	trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
2678			      bio, bio_dev,
2679			      bio_last_sector - r10_bio->sectors);
2680
2681	if (max_sectors < r10_bio->sectors) {
2682		/* Drat - have to split this up more */
2683		struct bio *mbio = r10_bio->master_bio;
2684		int sectors_handled =
2685			r10_bio->sector + max_sectors
2686			- mbio->bi_iter.bi_sector;
2687		r10_bio->sectors = max_sectors;
2688		spin_lock_irq(&conf->device_lock);
2689		if (mbio->bi_phys_segments == 0)
2690			mbio->bi_phys_segments = 2;
2691		else
2692			mbio->bi_phys_segments++;
2693		spin_unlock_irq(&conf->device_lock);
2694		generic_make_request(bio);
 
2695
2696		r10_bio = mempool_alloc(conf->r10bio_pool,
2697					GFP_NOIO);
2698		r10_bio->master_bio = mbio;
2699		r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
 
2700		r10_bio->state = 0;
2701		set_bit(R10BIO_ReadError,
2702			&r10_bio->state);
2703		r10_bio->mddev = mddev;
2704		r10_bio->sector = mbio->bi_iter.bi_sector
2705			+ sectors_handled;
2706
2707		goto read_more;
2708	} else
2709		generic_make_request(bio);
2710}
2711
2712static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2713{
2714	/* Some sort of write request has finished and it
2715	 * succeeded in writing where we thought there was a
2716	 * bad block.  So forget the bad block.
2717	 * Or possibly if failed and we need to record
2718	 * a bad block.
2719	 */
2720	int m;
2721	struct md_rdev *rdev;
2722
2723	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2724	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2725		for (m = 0; m < conf->copies; m++) {
2726			int dev = r10_bio->devs[m].devnum;
2727			rdev = conf->mirrors[dev].rdev;
2728			if (r10_bio->devs[m].bio == NULL)
2729				continue;
2730			if (!r10_bio->devs[m].bio->bi_error) {
2731				rdev_clear_badblocks(
2732					rdev,
2733					r10_bio->devs[m].addr,
2734					r10_bio->sectors, 0);
2735			} else {
2736				if (!rdev_set_badblocks(
2737					    rdev,
2738					    r10_bio->devs[m].addr,
2739					    r10_bio->sectors, 0))
2740					md_error(conf->mddev, rdev);
2741			}
2742			rdev = conf->mirrors[dev].replacement;
2743			if (r10_bio->devs[m].repl_bio == NULL)
2744				continue;
2745
2746			if (!r10_bio->devs[m].repl_bio->bi_error) {
2747				rdev_clear_badblocks(
2748					rdev,
2749					r10_bio->devs[m].addr,
2750					r10_bio->sectors, 0);
2751			} else {
2752				if (!rdev_set_badblocks(
2753					    rdev,
2754					    r10_bio->devs[m].addr,
2755					    r10_bio->sectors, 0))
2756					md_error(conf->mddev, rdev);
2757			}
2758		}
2759		put_buf(r10_bio);
2760	} else {
2761		bool fail = false;
2762		for (m = 0; m < conf->copies; m++) {
2763			int dev = r10_bio->devs[m].devnum;
2764			struct bio *bio = r10_bio->devs[m].bio;
2765			rdev = conf->mirrors[dev].rdev;
2766			if (bio == IO_MADE_GOOD) {
2767				rdev_clear_badblocks(
2768					rdev,
2769					r10_bio->devs[m].addr,
2770					r10_bio->sectors, 0);
2771				rdev_dec_pending(rdev, conf->mddev);
2772			} else if (bio != NULL && bio->bi_error) {
2773				fail = true;
2774				if (!narrow_write_error(r10_bio, m)) {
2775					md_error(conf->mddev, rdev);
2776					set_bit(R10BIO_Degraded,
2777						&r10_bio->state);
2778				}
2779				rdev_dec_pending(rdev, conf->mddev);
2780			}
2781			bio = r10_bio->devs[m].repl_bio;
2782			rdev = conf->mirrors[dev].replacement;
2783			if (rdev && bio == IO_MADE_GOOD) {
2784				rdev_clear_badblocks(
2785					rdev,
2786					r10_bio->devs[m].addr,
2787					r10_bio->sectors, 0);
2788				rdev_dec_pending(rdev, conf->mddev);
2789			}
2790		}
2791		if (fail) {
2792			spin_lock_irq(&conf->device_lock);
2793			list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2794			conf->nr_queued++;
2795			spin_unlock_irq(&conf->device_lock);
2796			md_wakeup_thread(conf->mddev->thread);
2797		} else {
2798			if (test_bit(R10BIO_WriteError,
2799				     &r10_bio->state))
2800				close_write(r10_bio);
2801			raid_end_bio_io(r10_bio);
2802		}
 
 
 
 
2803	}
2804}
2805
2806static void raid10d(struct md_thread *thread)
2807{
2808	struct mddev *mddev = thread->mddev;
2809	struct r10bio *r10_bio;
2810	unsigned long flags;
2811	struct r10conf *conf = mddev->private;
2812	struct list_head *head = &conf->retry_list;
2813	struct blk_plug plug;
2814
2815	md_check_recovery(mddev);
2816
2817	if (!list_empty_careful(&conf->bio_end_io_list) &&
2818	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2819		LIST_HEAD(tmp);
2820		spin_lock_irqsave(&conf->device_lock, flags);
2821		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2822			while (!list_empty(&conf->bio_end_io_list)) {
2823				list_move(conf->bio_end_io_list.prev, &tmp);
2824				conf->nr_queued--;
2825			}
2826		}
2827		spin_unlock_irqrestore(&conf->device_lock, flags);
2828		while (!list_empty(&tmp)) {
2829			r10_bio = list_first_entry(&tmp, struct r10bio,
2830						   retry_list);
2831			list_del(&r10_bio->retry_list);
2832			if (mddev->degraded)
2833				set_bit(R10BIO_Degraded, &r10_bio->state);
2834
2835			if (test_bit(R10BIO_WriteError,
2836				     &r10_bio->state))
2837				close_write(r10_bio);
2838			raid_end_bio_io(r10_bio);
2839		}
2840	}
2841
2842	blk_start_plug(&plug);
2843	for (;;) {
2844
2845		flush_pending_writes(conf);
2846
2847		spin_lock_irqsave(&conf->device_lock, flags);
2848		if (list_empty(head)) {
2849			spin_unlock_irqrestore(&conf->device_lock, flags);
2850			break;
2851		}
2852		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2853		list_del(head->prev);
2854		conf->nr_queued--;
2855		spin_unlock_irqrestore(&conf->device_lock, flags);
2856
2857		mddev = r10_bio->mddev;
2858		conf = mddev->private;
2859		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2860		    test_bit(R10BIO_WriteError, &r10_bio->state))
2861			handle_write_completed(conf, r10_bio);
2862		else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2863			reshape_request_write(mddev, r10_bio);
2864		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2865			sync_request_write(mddev, r10_bio);
2866		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2867			recovery_request_write(mddev, r10_bio);
2868		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2869			handle_read_error(mddev, r10_bio);
2870		else {
2871			/* just a partial read to be scheduled from a
2872			 * separate context
2873			 */
2874			int slot = r10_bio->read_slot;
2875			generic_make_request(r10_bio->devs[slot].bio);
2876		}
2877
2878		cond_resched();
2879		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2880			md_check_recovery(mddev);
2881	}
2882	blk_finish_plug(&plug);
2883}
2884
2885static int init_resync(struct r10conf *conf)
 
2886{
2887	int buffs;
2888	int i;
2889
2890	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2891	BUG_ON(conf->r10buf_pool);
2892	conf->have_replacement = 0;
2893	for (i = 0; i < conf->geo.raid_disks; i++)
2894		if (conf->mirrors[i].replacement)
2895			conf->have_replacement = 1;
2896	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2897	if (!conf->r10buf_pool)
2898		return -ENOMEM;
2899	conf->next_resync = 0;
2900	return 0;
2901}
2902
2903/*
2904 * perform a "sync" on one "block"
2905 *
2906 * We need to make sure that no normal I/O request - particularly write
2907 * requests - conflict with active sync requests.
2908 *
2909 * This is achieved by tracking pending requests and a 'barrier' concept
2910 * that can be installed to exclude normal IO requests.
2911 *
2912 * Resync and recovery are handled very differently.
2913 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2914 *
2915 * For resync, we iterate over virtual addresses, read all copies,
2916 * and update if there are differences.  If only one copy is live,
2917 * skip it.
2918 * For recovery, we iterate over physical addresses, read a good
2919 * value for each non-in_sync drive, and over-write.
2920 *
2921 * So, for recovery we may have several outstanding complex requests for a
2922 * given address, one for each out-of-sync device.  We model this by allocating
2923 * a number of r10_bio structures, one for each out-of-sync device.
2924 * As we setup these structures, we collect all bio's together into a list
2925 * which we then process collectively to add pages, and then process again
2926 * to pass to generic_make_request.
2927 *
2928 * The r10_bio structures are linked using a borrowed master_bio pointer.
2929 * This link is counted in ->remaining.  When the r10_bio that points to NULL
2930 * has its remaining count decremented to 0, the whole complex operation
2931 * is complete.
2932 *
2933 */
2934
2935static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2936			     int *skipped)
2937{
2938	struct r10conf *conf = mddev->private;
2939	struct r10bio *r10_bio;
2940	struct bio *biolist = NULL, *bio;
2941	sector_t max_sector, nr_sectors;
2942	int i;
2943	int max_sync;
2944	sector_t sync_blocks;
2945	sector_t sectors_skipped = 0;
2946	int chunks_skipped = 0;
2947	sector_t chunk_mask = conf->geo.chunk_mask;
2948
2949	if (!conf->r10buf_pool)
2950		if (init_resync(conf))
2951			return 0;
2952
2953	/*
2954	 * Allow skipping a full rebuild for incremental assembly
2955	 * of a clean array, like RAID1 does.
2956	 */
2957	if (mddev->bitmap == NULL &&
2958	    mddev->recovery_cp == MaxSector &&
2959	    mddev->reshape_position == MaxSector &&
2960	    !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2961	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2962	    !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2963	    conf->fullsync == 0) {
2964		*skipped = 1;
2965		return mddev->dev_sectors - sector_nr;
2966	}
2967
2968 skipped:
2969	max_sector = mddev->dev_sectors;
2970	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2971	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2972		max_sector = mddev->resync_max_sectors;
2973	if (sector_nr >= max_sector) {
2974		/* If we aborted, we need to abort the
2975		 * sync on the 'current' bitmap chucks (there can
2976		 * be several when recovering multiple devices).
2977		 * as we may have started syncing it but not finished.
2978		 * We can find the current address in
2979		 * mddev->curr_resync, but for recovery,
2980		 * we need to convert that to several
2981		 * virtual addresses.
2982		 */
2983		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2984			end_reshape(conf);
2985			close_sync(conf);
2986			return 0;
2987		}
2988
2989		if (mddev->curr_resync < max_sector) { /* aborted */
2990			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2991				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2992						&sync_blocks, 1);
2993			else for (i = 0; i < conf->geo.raid_disks; i++) {
2994				sector_t sect =
2995					raid10_find_virt(conf, mddev->curr_resync, i);
2996				bitmap_end_sync(mddev->bitmap, sect,
2997						&sync_blocks, 1);
2998			}
2999		} else {
3000			/* completed sync */
3001			if ((!mddev->bitmap || conf->fullsync)
3002			    && conf->have_replacement
3003			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3004				/* Completed a full sync so the replacements
3005				 * are now fully recovered.
3006				 */
3007				rcu_read_lock();
3008				for (i = 0; i < conf->geo.raid_disks; i++) {
3009					struct md_rdev *rdev =
3010						rcu_dereference(conf->mirrors[i].replacement);
3011					if (rdev)
3012						rdev->recovery_offset = MaxSector;
3013				}
3014				rcu_read_unlock();
3015			}
3016			conf->fullsync = 0;
3017		}
3018		bitmap_close_sync(mddev->bitmap);
3019		close_sync(conf);
3020		*skipped = 1;
3021		return sectors_skipped;
3022	}
3023
3024	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3025		return reshape_request(mddev, sector_nr, skipped);
3026
3027	if (chunks_skipped >= conf->geo.raid_disks) {
3028		/* if there has been nothing to do on any drive,
3029		 * then there is nothing to do at all..
3030		 */
3031		*skipped = 1;
3032		return (max_sector - sector_nr) + sectors_skipped;
3033	}
3034
3035	if (max_sector > mddev->resync_max)
3036		max_sector = mddev->resync_max; /* Don't do IO beyond here */
3037
3038	/* make sure whole request will fit in a chunk - if chunks
3039	 * are meaningful
3040	 */
3041	if (conf->geo.near_copies < conf->geo.raid_disks &&
3042	    max_sector > (sector_nr | chunk_mask))
3043		max_sector = (sector_nr | chunk_mask) + 1;
3044
3045	/*
3046	 * If there is non-resync activity waiting for a turn, then let it
3047	 * though before starting on this new sync request.
3048	 */
3049	if (conf->nr_waiting)
3050		schedule_timeout_uninterruptible(1);
3051
3052	/* Again, very different code for resync and recovery.
3053	 * Both must result in an r10bio with a list of bios that
3054	 * have bi_end_io, bi_sector, bi_bdev set,
3055	 * and bi_private set to the r10bio.
3056	 * For recovery, we may actually create several r10bios
3057	 * with 2 bios in each, that correspond to the bios in the main one.
3058	 * In this case, the subordinate r10bios link back through a
3059	 * borrowed master_bio pointer, and the counter in the master
3060	 * includes a ref from each subordinate.
3061	 */
3062	/* First, we decide what to do and set ->bi_end_io
3063	 * To end_sync_read if we want to read, and
3064	 * end_sync_write if we will want to write.
3065	 */
3066
3067	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
3068	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3069		/* recovery... the complicated one */
3070		int j;
3071		r10_bio = NULL;
3072
3073		for (i = 0 ; i < conf->geo.raid_disks; i++) {
3074			int still_degraded;
3075			struct r10bio *rb2;
3076			sector_t sect;
3077			int must_sync;
3078			int any_working;
3079			struct raid10_info *mirror = &conf->mirrors[i];
3080			struct md_rdev *mrdev, *mreplace;
3081
3082			rcu_read_lock();
3083			mrdev = rcu_dereference(mirror->rdev);
3084			mreplace = rcu_dereference(mirror->replacement);
3085
3086			if ((mrdev == NULL ||
3087			     test_bit(Faulty, &mrdev->flags) ||
3088			     test_bit(In_sync, &mrdev->flags)) &&
3089			    (mreplace == NULL ||
3090			     test_bit(Faulty, &mreplace->flags))) {
3091				rcu_read_unlock();
3092				continue;
3093			}
3094
3095			still_degraded = 0;
3096			/* want to reconstruct this device */
3097			rb2 = r10_bio;
3098			sect = raid10_find_virt(conf, sector_nr, i);
3099			if (sect >= mddev->resync_max_sectors) {
3100				/* last stripe is not complete - don't
3101				 * try to recover this sector.
3102				 */
3103				rcu_read_unlock();
3104				continue;
3105			}
3106			if (mreplace && test_bit(Faulty, &mreplace->flags))
3107				mreplace = NULL;
3108			/* Unless we are doing a full sync, or a replacement
3109			 * we only need to recover the block if it is set in
3110			 * the bitmap
3111			 */
3112			must_sync = bitmap_start_sync(mddev->bitmap, sect,
3113						      &sync_blocks, 1);
3114			if (sync_blocks < max_sync)
3115				max_sync = sync_blocks;
3116			if (!must_sync &&
3117			    mreplace == NULL &&
3118			    !conf->fullsync) {
3119				/* yep, skip the sync_blocks here, but don't assume
3120				 * that there will never be anything to do here
3121				 */
3122				chunks_skipped = -1;
3123				rcu_read_unlock();
3124				continue;
3125			}
3126			atomic_inc(&mrdev->nr_pending);
3127			if (mreplace)
3128				atomic_inc(&mreplace->nr_pending);
3129			rcu_read_unlock();
3130
3131			r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3132			r10_bio->state = 0;
3133			raise_barrier(conf, rb2 != NULL);
3134			atomic_set(&r10_bio->remaining, 0);
3135
3136			r10_bio->master_bio = (struct bio*)rb2;
3137			if (rb2)
3138				atomic_inc(&rb2->remaining);
3139			r10_bio->mddev = mddev;
3140			set_bit(R10BIO_IsRecover, &r10_bio->state);
3141			r10_bio->sector = sect;
3142
3143			raid10_find_phys(conf, r10_bio);
3144
3145			/* Need to check if the array will still be
3146			 * degraded
3147			 */
3148			rcu_read_lock();
3149			for (j = 0; j < conf->geo.raid_disks; j++) {
3150				struct md_rdev *rdev = rcu_dereference(
3151					conf->mirrors[j].rdev);
3152				if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3153					still_degraded = 1;
3154					break;
3155				}
3156			}
3157
3158			must_sync = bitmap_start_sync(mddev->bitmap, sect,
3159						      &sync_blocks, still_degraded);
3160
3161			any_working = 0;
3162			for (j=0; j<conf->copies;j++) {
3163				int k;
3164				int d = r10_bio->devs[j].devnum;
3165				sector_t from_addr, to_addr;
3166				struct md_rdev *rdev =
3167					rcu_dereference(conf->mirrors[d].rdev);
3168				sector_t sector, first_bad;
3169				int bad_sectors;
3170				if (!rdev ||
3171				    !test_bit(In_sync, &rdev->flags))
3172					continue;
3173				/* This is where we read from */
3174				any_working = 1;
 
3175				sector = r10_bio->devs[j].addr;
3176
3177				if (is_badblock(rdev, sector, max_sync,
3178						&first_bad, &bad_sectors)) {
3179					if (first_bad > sector)
3180						max_sync = first_bad - sector;
3181					else {
3182						bad_sectors -= (sector
3183								- first_bad);
3184						if (max_sync > bad_sectors)
3185							max_sync = bad_sectors;
3186						continue;
3187					}
3188				}
3189				bio = r10_bio->devs[0].bio;
3190				bio_reset(bio);
3191				bio->bi_next = biolist;
3192				biolist = bio;
3193				bio->bi_private = r10_bio;
3194				bio->bi_end_io = end_sync_read;
3195				bio_set_op_attrs(bio, REQ_OP_READ, 0);
3196				if (test_bit(FailFast, &rdev->flags))
3197					bio->bi_opf |= MD_FAILFAST;
3198				from_addr = r10_bio->devs[j].addr;
3199				bio->bi_iter.bi_sector = from_addr +
3200					rdev->data_offset;
3201				bio->bi_bdev = rdev->bdev;
3202				atomic_inc(&rdev->nr_pending);
3203				/* and we write to 'i' (if not in_sync) */
 
3204
3205				for (k=0; k<conf->copies; k++)
3206					if (r10_bio->devs[k].devnum == i)
3207						break;
3208				BUG_ON(k == conf->copies);
 
 
 
 
 
 
3209				to_addr = r10_bio->devs[k].addr;
 
 
 
 
3210				r10_bio->devs[0].devnum = d;
3211				r10_bio->devs[0].addr = from_addr;
3212				r10_bio->devs[1].devnum = i;
3213				r10_bio->devs[1].addr = to_addr;
3214
3215				if (!test_bit(In_sync, &mrdev->flags)) {
3216					bio = r10_bio->devs[1].bio;
3217					bio_reset(bio);
3218					bio->bi_next = biolist;
3219					biolist = bio;
3220					bio->bi_private = r10_bio;
3221					bio->bi_end_io = end_sync_write;
3222					bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3223					bio->bi_iter.bi_sector = to_addr
3224						+ mrdev->data_offset;
3225					bio->bi_bdev = mrdev->bdev;
3226					atomic_inc(&r10_bio->remaining);
3227				} else
3228					r10_bio->devs[1].bio->bi_end_io = NULL;
3229
3230				/* and maybe write to replacement */
3231				bio = r10_bio->devs[1].repl_bio;
3232				if (bio)
3233					bio->bi_end_io = NULL;
3234				/* Note: if mreplace != NULL, then bio
3235				 * cannot be NULL as r10buf_pool_alloc will
3236				 * have allocated it.
3237				 * So the second test here is pointless.
3238				 * But it keeps semantic-checkers happy, and
3239				 * this comment keeps human reviewers
3240				 * happy.
3241				 */
3242				if (mreplace == NULL || bio == NULL ||
3243				    test_bit(Faulty, &mreplace->flags))
3244					break;
3245				bio_reset(bio);
3246				bio->bi_next = biolist;
3247				biolist = bio;
3248				bio->bi_private = r10_bio;
3249				bio->bi_end_io = end_sync_write;
3250				bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3251				bio->bi_iter.bi_sector = to_addr +
3252					mreplace->data_offset;
3253				bio->bi_bdev = mreplace->bdev;
3254				atomic_inc(&r10_bio->remaining);
3255				break;
3256			}
3257			rcu_read_unlock();
3258			if (j == conf->copies) {
3259				/* Cannot recover, so abort the recovery or
3260				 * record a bad block */
 
 
 
 
3261				if (any_working) {
3262					/* problem is that there are bad blocks
3263					 * on other device(s)
3264					 */
3265					int k;
3266					for (k = 0; k < conf->copies; k++)
3267						if (r10_bio->devs[k].devnum == i)
3268							break;
3269					if (!test_bit(In_sync,
3270						      &mrdev->flags)
3271					    && !rdev_set_badblocks(
3272						    mrdev,
3273						    r10_bio->devs[k].addr,
3274						    max_sync, 0))
3275						any_working = 0;
3276					if (mreplace &&
3277					    !rdev_set_badblocks(
3278						    mreplace,
3279						    r10_bio->devs[k].addr,
3280						    max_sync, 0))
3281						any_working = 0;
3282				}
3283				if (!any_working)  {
3284					if (!test_and_set_bit(MD_RECOVERY_INTR,
3285							      &mddev->recovery))
3286						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
 
3287						       mdname(mddev));
3288					mirror->recovery_disabled
3289						= mddev->recovery_disabled;
3290				}
3291				put_buf(r10_bio);
3292				if (rb2)
3293					atomic_dec(&rb2->remaining);
3294				r10_bio = rb2;
3295				rdev_dec_pending(mrdev, mddev);
3296				if (mreplace)
3297					rdev_dec_pending(mreplace, mddev);
3298				break;
3299			}
3300			rdev_dec_pending(mrdev, mddev);
3301			if (mreplace)
3302				rdev_dec_pending(mreplace, mddev);
3303			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3304				/* Only want this if there is elsewhere to
3305				 * read from. 'j' is currently the first
3306				 * readable copy.
3307				 */
3308				int targets = 1;
3309				for (; j < conf->copies; j++) {
3310					int d = r10_bio->devs[j].devnum;
3311					if (conf->mirrors[d].rdev &&
3312					    test_bit(In_sync,
3313						      &conf->mirrors[d].rdev->flags))
3314						targets++;
3315				}
3316				if (targets == 1)
3317					r10_bio->devs[0].bio->bi_opf
3318						&= ~MD_FAILFAST;
3319			}
3320		}
3321		if (biolist == NULL) {
3322			while (r10_bio) {
3323				struct r10bio *rb2 = r10_bio;
3324				r10_bio = (struct r10bio*) rb2->master_bio;
3325				rb2->master_bio = NULL;
3326				put_buf(rb2);
3327			}
3328			goto giveup;
3329		}
3330	} else {
3331		/* resync. Schedule a read for every block at this virt offset */
3332		int count = 0;
3333
3334		bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
3335
3336		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3337				       &sync_blocks, mddev->degraded) &&
3338		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3339						 &mddev->recovery)) {
3340			/* We can skip this block */
3341			*skipped = 1;
3342			return sync_blocks + sectors_skipped;
3343		}
3344		if (sync_blocks < max_sync)
3345			max_sync = sync_blocks;
3346		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3347		r10_bio->state = 0;
3348
3349		r10_bio->mddev = mddev;
3350		atomic_set(&r10_bio->remaining, 0);
3351		raise_barrier(conf, 0);
3352		conf->next_resync = sector_nr;
3353
3354		r10_bio->master_bio = NULL;
3355		r10_bio->sector = sector_nr;
3356		set_bit(R10BIO_IsSync, &r10_bio->state);
3357		raid10_find_phys(conf, r10_bio);
3358		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3359
3360		for (i = 0; i < conf->copies; i++) {
3361			int d = r10_bio->devs[i].devnum;
3362			sector_t first_bad, sector;
3363			int bad_sectors;
3364			struct md_rdev *rdev;
3365
3366			if (r10_bio->devs[i].repl_bio)
3367				r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3368
3369			bio = r10_bio->devs[i].bio;
3370			bio_reset(bio);
3371			bio->bi_error = -EIO;
3372			rcu_read_lock();
3373			rdev = rcu_dereference(conf->mirrors[d].rdev);
3374			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3375				rcu_read_unlock();
3376				continue;
3377			}
3378			sector = r10_bio->devs[i].addr;
3379			if (is_badblock(rdev, sector, max_sync,
 
3380					&first_bad, &bad_sectors)) {
3381				if (first_bad > sector)
3382					max_sync = first_bad - sector;
3383				else {
3384					bad_sectors -= (sector - first_bad);
3385					if (max_sync > bad_sectors)
3386						max_sync = bad_sectors;
3387					rcu_read_unlock();
3388					continue;
3389				}
3390			}
3391			atomic_inc(&rdev->nr_pending);
3392			atomic_inc(&r10_bio->remaining);
3393			bio->bi_next = biolist;
3394			biolist = bio;
3395			bio->bi_private = r10_bio;
3396			bio->bi_end_io = end_sync_read;
3397			bio_set_op_attrs(bio, REQ_OP_READ, 0);
3398			if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
3399				bio->bi_opf |= MD_FAILFAST;
3400			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3401			bio->bi_bdev = rdev->bdev;
3402			count++;
3403
3404			rdev = rcu_dereference(conf->mirrors[d].replacement);
3405			if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3406				rcu_read_unlock();
3407				continue;
3408			}
3409			atomic_inc(&rdev->nr_pending);
3410			rcu_read_unlock();
3411
3412			/* Need to set up for writing to the replacement */
3413			bio = r10_bio->devs[i].repl_bio;
3414			bio_reset(bio);
3415			bio->bi_error = -EIO;
3416
3417			sector = r10_bio->devs[i].addr;
3418			bio->bi_next = biolist;
3419			biolist = bio;
3420			bio->bi_private = r10_bio;
3421			bio->bi_end_io = end_sync_write;
3422			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3423			if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
3424				bio->bi_opf |= MD_FAILFAST;
3425			bio->bi_iter.bi_sector = sector + rdev->data_offset;
3426			bio->bi_bdev = rdev->bdev;
3427			count++;
3428		}
3429
3430		if (count < 2) {
3431			for (i=0; i<conf->copies; i++) {
3432				int d = r10_bio->devs[i].devnum;
3433				if (r10_bio->devs[i].bio->bi_end_io)
3434					rdev_dec_pending(conf->mirrors[d].rdev,
3435							 mddev);
3436				if (r10_bio->devs[i].repl_bio &&
3437				    r10_bio->devs[i].repl_bio->bi_end_io)
3438					rdev_dec_pending(
3439						conf->mirrors[d].replacement,
3440						mddev);
3441			}
3442			put_buf(r10_bio);
3443			biolist = NULL;
3444			goto giveup;
3445		}
3446	}
3447
 
 
 
 
 
 
 
 
 
 
 
3448	nr_sectors = 0;
3449	if (sector_nr + max_sync < max_sector)
3450		max_sector = sector_nr + max_sync;
3451	do {
3452		struct page *page;
3453		int len = PAGE_SIZE;
3454		if (sector_nr + (len>>9) > max_sector)
3455			len = (max_sector - sector_nr) << 9;
3456		if (len == 0)
3457			break;
3458		for (bio= biolist ; bio ; bio=bio->bi_next) {
3459			struct bio *bio2;
3460			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3461			if (bio_add_page(bio, page, len, 0))
3462				continue;
3463
3464			/* stop here */
3465			bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3466			for (bio2 = biolist;
3467			     bio2 && bio2 != bio;
3468			     bio2 = bio2->bi_next) {
3469				/* remove last page from this bio */
3470				bio2->bi_vcnt--;
3471				bio2->bi_iter.bi_size -= len;
3472				bio_clear_flag(bio2, BIO_SEG_VALID);
3473			}
3474			goto bio_full;
3475		}
3476		nr_sectors += len>>9;
3477		sector_nr += len>>9;
3478	} while (biolist->bi_vcnt < RESYNC_PAGES);
3479 bio_full:
3480	r10_bio->sectors = nr_sectors;
3481
3482	while (biolist) {
3483		bio = biolist;
3484		biolist = biolist->bi_next;
3485
3486		bio->bi_next = NULL;
3487		r10_bio = bio->bi_private;
3488		r10_bio->sectors = nr_sectors;
3489
3490		if (bio->bi_end_io == end_sync_read) {
3491			md_sync_acct(bio->bi_bdev, nr_sectors);
3492			bio->bi_error = 0;
3493			generic_make_request(bio);
3494		}
3495	}
3496
3497	if (sectors_skipped)
3498		/* pretend they weren't skipped, it makes
3499		 * no important difference in this case
3500		 */
3501		md_done_sync(mddev, sectors_skipped, 1);
3502
3503	return sectors_skipped + nr_sectors;
3504 giveup:
3505	/* There is nowhere to write, so all non-sync
3506	 * drives must be failed or in resync, all drives
3507	 * have a bad block, so try the next chunk...
3508	 */
3509	if (sector_nr + max_sync < max_sector)
3510		max_sector = sector_nr + max_sync;
3511
3512	sectors_skipped += (max_sector - sector_nr);
3513	chunks_skipped ++;
3514	sector_nr = max_sector;
3515	goto skipped;
3516}
3517
3518static sector_t
3519raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3520{
3521	sector_t size;
3522	struct r10conf *conf = mddev->private;
3523
3524	if (!raid_disks)
3525		raid_disks = min(conf->geo.raid_disks,
3526				 conf->prev.raid_disks);
3527	if (!sectors)
3528		sectors = conf->dev_sectors;
3529
3530	size = sectors >> conf->geo.chunk_shift;
3531	sector_div(size, conf->geo.far_copies);
3532	size = size * raid_disks;
3533	sector_div(size, conf->geo.near_copies);
3534
3535	return size << conf->geo.chunk_shift;
3536}
3537
3538static void calc_sectors(struct r10conf *conf, sector_t size)
3539{
3540	/* Calculate the number of sectors-per-device that will
3541	 * actually be used, and set conf->dev_sectors and
3542	 * conf->stride
3543	 */
3544
3545	size = size >> conf->geo.chunk_shift;
3546	sector_div(size, conf->geo.far_copies);
3547	size = size * conf->geo.raid_disks;
3548	sector_div(size, conf->geo.near_copies);
3549	/* 'size' is now the number of chunks in the array */
3550	/* calculate "used chunks per device" */
3551	size = size * conf->copies;
3552
3553	/* We need to round up when dividing by raid_disks to
3554	 * get the stride size.
3555	 */
3556	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3557
3558	conf->dev_sectors = size << conf->geo.chunk_shift;
3559
3560	if (conf->geo.far_offset)
3561		conf->geo.stride = 1 << conf->geo.chunk_shift;
3562	else {
3563		sector_div(size, conf->geo.far_copies);
3564		conf->geo.stride = size << conf->geo.chunk_shift;
3565	}
3566}
3567
3568enum geo_type {geo_new, geo_old, geo_start};
3569static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3570{
 
3571	int nc, fc, fo;
3572	int layout, chunk, disks;
3573	switch (new) {
3574	case geo_old:
3575		layout = mddev->layout;
3576		chunk = mddev->chunk_sectors;
3577		disks = mddev->raid_disks - mddev->delta_disks;
3578		break;
3579	case geo_new:
3580		layout = mddev->new_layout;
3581		chunk = mddev->new_chunk_sectors;
3582		disks = mddev->raid_disks;
3583		break;
3584	default: /* avoid 'may be unused' warnings */
3585	case geo_start: /* new when starting reshape - raid_disks not
3586			 * updated yet. */
3587		layout = mddev->new_layout;
3588		chunk = mddev->new_chunk_sectors;
3589		disks = mddev->raid_disks + mddev->delta_disks;
3590		break;
3591	}
3592	if (layout >> 19)
3593		return -1;
3594	if (chunk < (PAGE_SIZE >> 9) ||
3595	    !is_power_of_2(chunk))
3596		return -2;
3597	nc = layout & 255;
3598	fc = (layout >> 8) & 255;
3599	fo = layout & (1<<16);
3600	geo->raid_disks = disks;
3601	geo->near_copies = nc;
3602	geo->far_copies = fc;
3603	geo->far_offset = fo;
3604	switch (layout >> 17) {
3605	case 0:	/* original layout.  simple but not always optimal */
3606		geo->far_set_size = disks;
3607		break;
3608	case 1: /* "improved" layout which was buggy.  Hopefully no-one is
3609		 * actually using this, but leave code here just in case.*/
3610		geo->far_set_size = disks/fc;
3611		WARN(geo->far_set_size < fc,
3612		     "This RAID10 layout does not provide data safety - please backup and create new array\n");
3613		break;
3614	case 2: /* "improved" layout fixed to match documentation */
3615		geo->far_set_size = fc * nc;
3616		break;
3617	default: /* Not a valid layout */
3618		return -1;
3619	}
3620	geo->chunk_mask = chunk - 1;
3621	geo->chunk_shift = ffz(~chunk);
3622	return nc*fc;
3623}
3624
3625static struct r10conf *setup_conf(struct mddev *mddev)
3626{
3627	struct r10conf *conf = NULL;
3628	int err = -EINVAL;
3629	struct geom geo;
3630	int copies;
3631
3632	copies = setup_geo(&geo, mddev, geo_new);
3633
3634	if (copies == -2) {
3635		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
3636			mdname(mddev), PAGE_SIZE);
 
 
3637		goto out;
3638	}
3639
3640	if (copies < 2 || copies > mddev->raid_disks) {
3641		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3642			mdname(mddev), mddev->new_layout);
 
 
 
 
 
3643		goto out;
3644	}
3645
3646	err = -ENOMEM;
3647	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3648	if (!conf)
3649		goto out;
3650
3651	/* FIXME calc properly */
3652	conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
3653							    max(0,-mddev->delta_disks)),
3654				GFP_KERNEL);
3655	if (!conf->mirrors)
3656		goto out;
3657
3658	conf->tmppage = alloc_page(GFP_KERNEL);
3659	if (!conf->tmppage)
3660		goto out;
3661
3662	conf->geo = geo;
3663	conf->copies = copies;
 
 
 
 
 
 
 
3664	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3665					   r10bio_pool_free, conf);
3666	if (!conf->r10bio_pool)
3667		goto out;
3668
3669	calc_sectors(conf, mddev->dev_sectors);
3670	if (mddev->reshape_position == MaxSector) {
3671		conf->prev = conf->geo;
3672		conf->reshape_progress = MaxSector;
3673	} else {
3674		if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3675			err = -EINVAL;
3676			goto out;
3677		}
3678		conf->reshape_progress = mddev->reshape_position;
3679		if (conf->prev.far_offset)
3680			conf->prev.stride = 1 << conf->prev.chunk_shift;
3681		else
3682			/* far_copies must be 1 */
3683			conf->prev.stride = conf->dev_sectors;
3684	}
3685	conf->reshape_safe = conf->reshape_progress;
 
 
 
 
 
 
3686	spin_lock_init(&conf->device_lock);
3687	INIT_LIST_HEAD(&conf->retry_list);
3688	INIT_LIST_HEAD(&conf->bio_end_io_list);
3689
3690	spin_lock_init(&conf->resync_lock);
3691	init_waitqueue_head(&conf->wait_barrier);
3692	atomic_set(&conf->nr_pending, 0);
3693
3694	conf->thread = md_register_thread(raid10d, mddev, "raid10");
3695	if (!conf->thread)
3696		goto out;
3697
3698	conf->mddev = mddev;
3699	return conf;
3700
3701 out:
 
 
3702	if (conf) {
3703		mempool_destroy(conf->r10bio_pool);
 
3704		kfree(conf->mirrors);
3705		safe_put_page(conf->tmppage);
3706		kfree(conf);
3707	}
3708	return ERR_PTR(err);
3709}
3710
3711static int raid10_run(struct mddev *mddev)
3712{
3713	struct r10conf *conf;
3714	int i, disk_idx, chunk_size;
3715	struct raid10_info *disk;
3716	struct md_rdev *rdev;
3717	sector_t size;
3718	sector_t min_offset_diff = 0;
3719	int first = 1;
3720	bool discard_supported = false;
 
 
 
3721
3722	if (mddev->private == NULL) {
3723		conf = setup_conf(mddev);
3724		if (IS_ERR(conf))
3725			return PTR_ERR(conf);
3726		mddev->private = conf;
3727	}
3728	conf = mddev->private;
3729	if (!conf)
3730		goto out;
3731
3732	mddev->thread = conf->thread;
3733	conf->thread = NULL;
3734
3735	chunk_size = mddev->chunk_sectors << 9;
3736	if (mddev->queue) {
3737		blk_queue_max_discard_sectors(mddev->queue,
3738					      mddev->chunk_sectors);
3739		blk_queue_max_write_same_sectors(mddev->queue, 0);
3740		blk_queue_io_min(mddev->queue, chunk_size);
3741		if (conf->geo.raid_disks % conf->geo.near_copies)
3742			blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3743		else
3744			blk_queue_io_opt(mddev->queue, chunk_size *
3745					 (conf->geo.raid_disks / conf->geo.near_copies));
3746	}
3747
3748	rdev_for_each(rdev, mddev) {
3749		long long diff;
3750		struct request_queue *q;
3751
3752		disk_idx = rdev->raid_disk;
3753		if (disk_idx < 0)
3754			continue;
3755		if (disk_idx >= conf->geo.raid_disks &&
3756		    disk_idx >= conf->prev.raid_disks)
3757			continue;
3758		disk = conf->mirrors + disk_idx;
3759
3760		if (test_bit(Replacement, &rdev->flags)) {
3761			if (disk->replacement)
3762				goto out_free_conf;
3763			disk->replacement = rdev;
3764		} else {
3765			if (disk->rdev)
3766				goto out_free_conf;
3767			disk->rdev = rdev;
 
 
 
3768		}
3769		q = bdev_get_queue(rdev->bdev);
3770		diff = (rdev->new_data_offset - rdev->data_offset);
3771		if (!mddev->reshape_backwards)
3772			diff = -diff;
3773		if (diff < 0)
3774			diff = 0;
3775		if (first || diff < min_offset_diff)
3776			min_offset_diff = diff;
3777
3778		if (mddev->gendisk)
3779			disk_stack_limits(mddev->gendisk, rdev->bdev,
3780					  rdev->data_offset << 9);
3781
3782		disk->head_position = 0;
3783
3784		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3785			discard_supported = true;
3786	}
3787
3788	if (mddev->queue) {
3789		if (discard_supported)
3790			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3791						mddev->queue);
3792		else
3793			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3794						  mddev->queue);
3795	}
3796	/* need to check that every block has at least one working mirror */
3797	if (!enough(conf, -1)) {
3798		pr_err("md/raid10:%s: not enough operational mirrors.\n",
3799		       mdname(mddev));
3800		goto out_free_conf;
3801	}
3802
3803	if (conf->reshape_progress != MaxSector) {
3804		/* must ensure that shape change is supported */
3805		if (conf->geo.far_copies != 1 &&
3806		    conf->geo.far_offset == 0)
3807			goto out_free_conf;
3808		if (conf->prev.far_copies != 1 &&
3809		    conf->prev.far_offset == 0)
3810			goto out_free_conf;
3811	}
3812
3813	mddev->degraded = 0;
3814	for (i = 0;
3815	     i < conf->geo.raid_disks
3816		     || i < conf->prev.raid_disks;
3817	     i++) {
3818
3819		disk = conf->mirrors + i;
3820
3821		if (!disk->rdev && disk->replacement) {
3822			/* The replacement is all we have - use it */
3823			disk->rdev = disk->replacement;
3824			disk->replacement = NULL;
3825			clear_bit(Replacement, &disk->rdev->flags);
3826		}
3827
3828		if (!disk->rdev ||
3829		    !test_bit(In_sync, &disk->rdev->flags)) {
3830			disk->head_position = 0;
3831			mddev->degraded++;
3832			if (disk->rdev &&
3833			    disk->rdev->saved_raid_disk < 0)
3834				conf->fullsync = 1;
3835		}
3836		disk->recovery_disabled = mddev->recovery_disabled - 1;
3837	}
3838
3839	if (mddev->recovery_cp != MaxSector)
3840		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
3841			  mdname(mddev));
3842	pr_info("md/raid10:%s: active with %d out of %d devices\n",
3843		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3844		conf->geo.raid_disks);
 
 
3845	/*
3846	 * Ok, everything is just fine now
3847	 */
3848	mddev->dev_sectors = conf->dev_sectors;
3849	size = raid10_size(mddev, 0, 0);
3850	md_set_array_sectors(mddev, size);
3851	mddev->resync_max_sectors = size;
3852	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3853
3854	if (mddev->queue) {
3855		int stripe = conf->geo.raid_disks *
 
 
 
 
 
 
 
3856			((mddev->chunk_sectors << 9) / PAGE_SIZE);
 
 
 
 
3857
3858		/* Calculate max read-ahead size.
3859		 * We need to readahead at least twice a whole stripe....
3860		 * maybe...
3861		 */
3862		stripe /= conf->geo.near_copies;
3863		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3864			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3865	}
3866
3867	if (md_integrity_register(mddev))
3868		goto out_free_conf;
3869
3870	if (conf->reshape_progress != MaxSector) {
3871		unsigned long before_length, after_length;
3872
3873		before_length = ((1 << conf->prev.chunk_shift) *
3874				 conf->prev.far_copies);
3875		after_length = ((1 << conf->geo.chunk_shift) *
3876				conf->geo.far_copies);
3877
3878		if (max(before_length, after_length) > min_offset_diff) {
3879			/* This cannot work */
3880			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
3881			goto out_free_conf;
3882		}
3883		conf->offset_diff = min_offset_diff;
3884
3885		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3886		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3887		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3888		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3889		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3890							"reshape");
3891	}
3892
3893	return 0;
3894
3895out_free_conf:
3896	md_unregister_thread(&mddev->thread);
3897	mempool_destroy(conf->r10bio_pool);
 
3898	safe_put_page(conf->tmppage);
3899	kfree(conf->mirrors);
3900	kfree(conf);
3901	mddev->private = NULL;
3902out:
3903	return -EIO;
3904}
3905
3906static void raid10_free(struct mddev *mddev, void *priv)
3907{
3908	struct r10conf *conf = priv;
3909
3910	mempool_destroy(conf->r10bio_pool);
3911	safe_put_page(conf->tmppage);
 
 
 
 
 
3912	kfree(conf->mirrors);
3913	kfree(conf->mirrors_old);
3914	kfree(conf->mirrors_new);
3915	kfree(conf);
 
 
3916}
3917
3918static void raid10_quiesce(struct mddev *mddev, int state)
3919{
3920	struct r10conf *conf = mddev->private;
3921
3922	switch(state) {
3923	case 1:
3924		raise_barrier(conf, 0);
3925		break;
3926	case 0:
3927		lower_barrier(conf);
3928		break;
3929	}
3930}
3931
3932static int raid10_resize(struct mddev *mddev, sector_t sectors)
3933{
3934	/* Resize of 'far' arrays is not supported.
3935	 * For 'near' and 'offset' arrays we can set the
3936	 * number of sectors used to be an appropriate multiple
3937	 * of the chunk size.
3938	 * For 'offset', this is far_copies*chunksize.
3939	 * For 'near' the multiplier is the LCM of
3940	 * near_copies and raid_disks.
3941	 * So if far_copies > 1 && !far_offset, fail.
3942	 * Else find LCM(raid_disks, near_copy)*far_copies and
3943	 * multiply by chunk_size.  Then round to this number.
3944	 * This is mostly done by raid10_size()
3945	 */
3946	struct r10conf *conf = mddev->private;
3947	sector_t oldsize, size;
3948
3949	if (mddev->reshape_position != MaxSector)
3950		return -EBUSY;
3951
3952	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3953		return -EINVAL;
3954
3955	oldsize = raid10_size(mddev, 0, 0);
3956	size = raid10_size(mddev, sectors, 0);
3957	if (mddev->external_size &&
3958	    mddev->array_sectors > size)
3959		return -EINVAL;
3960	if (mddev->bitmap) {
3961		int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3962		if (ret)
3963			return ret;
3964	}
3965	md_set_array_sectors(mddev, size);
3966	if (mddev->queue) {
3967		set_capacity(mddev->gendisk, mddev->array_sectors);
3968		revalidate_disk(mddev->gendisk);
3969	}
3970	if (sectors > mddev->dev_sectors &&
3971	    mddev->recovery_cp > oldsize) {
3972		mddev->recovery_cp = oldsize;
3973		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3974	}
3975	calc_sectors(conf, sectors);
3976	mddev->dev_sectors = conf->dev_sectors;
3977	mddev->resync_max_sectors = size;
3978	return 0;
3979}
3980
3981static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
3982{
3983	struct md_rdev *rdev;
3984	struct r10conf *conf;
3985
3986	if (mddev->degraded > 0) {
3987		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
3988			mdname(mddev));
3989		return ERR_PTR(-EINVAL);
3990	}
3991	sector_div(size, devs);
3992
3993	/* Set new parameters */
3994	mddev->new_level = 10;
3995	/* new layout: far_copies = 1, near_copies = 2 */
3996	mddev->new_layout = (1<<8) + 2;
3997	mddev->new_chunk_sectors = mddev->chunk_sectors;
3998	mddev->delta_disks = mddev->raid_disks;
3999	mddev->raid_disks *= 2;
4000	/* make sure it will be not marked as dirty */
4001	mddev->recovery_cp = MaxSector;
4002	mddev->dev_sectors = size;
4003
4004	conf = setup_conf(mddev);
4005	if (!IS_ERR(conf)) {
4006		rdev_for_each(rdev, mddev)
4007			if (rdev->raid_disk >= 0) {
4008				rdev->new_raid_disk = rdev->raid_disk * 2;
4009				rdev->sectors = size;
4010			}
4011		conf->barrier = 1;
4012	}
4013
4014	return conf;
4015}
4016
4017static void *raid10_takeover(struct mddev *mddev)
4018{
4019	struct r0conf *raid0_conf;
4020
4021	/* raid10 can take over:
4022	 *  raid0 - providing it has only two drives
4023	 */
4024	if (mddev->level == 0) {
4025		/* for raid0 takeover only one zone is supported */
4026		raid0_conf = mddev->private;
4027		if (raid0_conf->nr_strip_zones > 1) {
4028			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
4029				mdname(mddev));
 
4030			return ERR_PTR(-EINVAL);
4031		}
4032		return raid10_takeover_raid0(mddev,
4033			raid0_conf->strip_zone->zone_end,
4034			raid0_conf->strip_zone->nb_dev);
4035	}
4036	return ERR_PTR(-EINVAL);
4037}
4038
4039static int raid10_check_reshape(struct mddev *mddev)
4040{
4041	/* Called when there is a request to change
4042	 * - layout (to ->new_layout)
4043	 * - chunk size (to ->new_chunk_sectors)
4044	 * - raid_disks (by delta_disks)
4045	 * or when trying to restart a reshape that was ongoing.
4046	 *
4047	 * We need to validate the request and possibly allocate
4048	 * space if that might be an issue later.
4049	 *
4050	 * Currently we reject any reshape of a 'far' mode array,
4051	 * allow chunk size to change if new is generally acceptable,
4052	 * allow raid_disks to increase, and allow
4053	 * a switch between 'near' mode and 'offset' mode.
4054	 */
4055	struct r10conf *conf = mddev->private;
4056	struct geom geo;
4057
4058	if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4059		return -EINVAL;
4060
4061	if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4062		/* mustn't change number of copies */
4063		return -EINVAL;
4064	if (geo.far_copies > 1 && !geo.far_offset)
4065		/* Cannot switch to 'far' mode */
4066		return -EINVAL;
4067
4068	if (mddev->array_sectors & geo.chunk_mask)
4069			/* not factor of array size */
4070			return -EINVAL;
4071
4072	if (!enough(conf, -1))
4073		return -EINVAL;
4074
4075	kfree(conf->mirrors_new);
4076	conf->mirrors_new = NULL;
4077	if (mddev->delta_disks > 0) {
4078		/* allocate new 'mirrors' list */
4079		conf->mirrors_new = kzalloc(
4080			sizeof(struct raid10_info)
4081			*(mddev->raid_disks +
4082			  mddev->delta_disks),
4083			GFP_KERNEL);
4084		if (!conf->mirrors_new)
4085			return -ENOMEM;
4086	}
4087	return 0;
4088}
4089
4090/*
4091 * Need to check if array has failed when deciding whether to:
4092 *  - start an array
4093 *  - remove non-faulty devices
4094 *  - add a spare
4095 *  - allow a reshape
4096 * This determination is simple when no reshape is happening.
4097 * However if there is a reshape, we need to carefully check
4098 * both the before and after sections.
4099 * This is because some failed devices may only affect one
4100 * of the two sections, and some non-in_sync devices may
4101 * be insync in the section most affected by failed devices.
4102 */
4103static int calc_degraded(struct r10conf *conf)
4104{
4105	int degraded, degraded2;
4106	int i;
4107
4108	rcu_read_lock();
4109	degraded = 0;
4110	/* 'prev' section first */
4111	for (i = 0; i < conf->prev.raid_disks; i++) {
4112		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4113		if (!rdev || test_bit(Faulty, &rdev->flags))
4114			degraded++;
4115		else if (!test_bit(In_sync, &rdev->flags))
4116			/* When we can reduce the number of devices in
4117			 * an array, this might not contribute to
4118			 * 'degraded'.  It does now.
4119			 */
4120			degraded++;
4121	}
4122	rcu_read_unlock();
4123	if (conf->geo.raid_disks == conf->prev.raid_disks)
4124		return degraded;
4125	rcu_read_lock();
4126	degraded2 = 0;
4127	for (i = 0; i < conf->geo.raid_disks; i++) {
4128		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4129		if (!rdev || test_bit(Faulty, &rdev->flags))
4130			degraded2++;
4131		else if (!test_bit(In_sync, &rdev->flags)) {
4132			/* If reshape is increasing the number of devices,
4133			 * this section has already been recovered, so
4134			 * it doesn't contribute to degraded.
4135			 * else it does.
4136			 */
4137			if (conf->geo.raid_disks <= conf->prev.raid_disks)
4138				degraded2++;
4139		}
4140	}
4141	rcu_read_unlock();
4142	if (degraded2 > degraded)
4143		return degraded2;
4144	return degraded;
4145}
4146
4147static int raid10_start_reshape(struct mddev *mddev)
4148{
4149	/* A 'reshape' has been requested. This commits
4150	 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4151	 * This also checks if there are enough spares and adds them
4152	 * to the array.
4153	 * We currently require enough spares to make the final
4154	 * array non-degraded.  We also require that the difference
4155	 * between old and new data_offset - on each device - is
4156	 * enough that we never risk over-writing.
4157	 */
4158
4159	unsigned long before_length, after_length;
4160	sector_t min_offset_diff = 0;
4161	int first = 1;
4162	struct geom new;
4163	struct r10conf *conf = mddev->private;
4164	struct md_rdev *rdev;
4165	int spares = 0;
4166	int ret;
4167
4168	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4169		return -EBUSY;
4170
4171	if (setup_geo(&new, mddev, geo_start) != conf->copies)
4172		return -EINVAL;
4173
4174	before_length = ((1 << conf->prev.chunk_shift) *
4175			 conf->prev.far_copies);
4176	after_length = ((1 << conf->geo.chunk_shift) *
4177			conf->geo.far_copies);
4178
4179	rdev_for_each(rdev, mddev) {
4180		if (!test_bit(In_sync, &rdev->flags)
4181		    && !test_bit(Faulty, &rdev->flags))
4182			spares++;
4183		if (rdev->raid_disk >= 0) {
4184			long long diff = (rdev->new_data_offset
4185					  - rdev->data_offset);
4186			if (!mddev->reshape_backwards)
4187				diff = -diff;
4188			if (diff < 0)
4189				diff = 0;
4190			if (first || diff < min_offset_diff)
4191				min_offset_diff = diff;
4192		}
4193	}
4194
4195	if (max(before_length, after_length) > min_offset_diff)
4196		return -EINVAL;
4197
4198	if (spares < mddev->delta_disks)
4199		return -EINVAL;
4200
4201	conf->offset_diff = min_offset_diff;
4202	spin_lock_irq(&conf->device_lock);
4203	if (conf->mirrors_new) {
4204		memcpy(conf->mirrors_new, conf->mirrors,
4205		       sizeof(struct raid10_info)*conf->prev.raid_disks);
4206		smp_mb();
4207		kfree(conf->mirrors_old);
4208		conf->mirrors_old = conf->mirrors;
4209		conf->mirrors = conf->mirrors_new;
4210		conf->mirrors_new = NULL;
4211	}
4212	setup_geo(&conf->geo, mddev, geo_start);
4213	smp_mb();
4214	if (mddev->reshape_backwards) {
4215		sector_t size = raid10_size(mddev, 0, 0);
4216		if (size < mddev->array_sectors) {
4217			spin_unlock_irq(&conf->device_lock);
4218			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
4219				mdname(mddev));
4220			return -EINVAL;
4221		}
4222		mddev->resync_max_sectors = size;
4223		conf->reshape_progress = size;
4224	} else
4225		conf->reshape_progress = 0;
4226	conf->reshape_safe = conf->reshape_progress;
4227	spin_unlock_irq(&conf->device_lock);
4228
4229	if (mddev->delta_disks && mddev->bitmap) {
4230		ret = bitmap_resize(mddev->bitmap,
4231				    raid10_size(mddev, 0,
4232						conf->geo.raid_disks),
4233				    0, 0);
4234		if (ret)
4235			goto abort;
4236	}
4237	if (mddev->delta_disks > 0) {
4238		rdev_for_each(rdev, mddev)
4239			if (rdev->raid_disk < 0 &&
4240			    !test_bit(Faulty, &rdev->flags)) {
4241				if (raid10_add_disk(mddev, rdev) == 0) {
4242					if (rdev->raid_disk >=
4243					    conf->prev.raid_disks)
4244						set_bit(In_sync, &rdev->flags);
4245					else
4246						rdev->recovery_offset = 0;
4247
4248					if (sysfs_link_rdev(mddev, rdev))
4249						/* Failure here  is OK */;
4250				}
4251			} else if (rdev->raid_disk >= conf->prev.raid_disks
4252				   && !test_bit(Faulty, &rdev->flags)) {
4253				/* This is a spare that was manually added */
4254				set_bit(In_sync, &rdev->flags);
4255			}
4256	}
4257	/* When a reshape changes the number of devices,
4258	 * ->degraded is measured against the larger of the
4259	 * pre and  post numbers.
4260	 */
4261	spin_lock_irq(&conf->device_lock);
4262	mddev->degraded = calc_degraded(conf);
4263	spin_unlock_irq(&conf->device_lock);
4264	mddev->raid_disks = conf->geo.raid_disks;
4265	mddev->reshape_position = conf->reshape_progress;
4266	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4267
4268	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4269	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4270	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4271	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4272	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4273
4274	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4275						"reshape");
4276	if (!mddev->sync_thread) {
4277		ret = -EAGAIN;
4278		goto abort;
4279	}
4280	conf->reshape_checkpoint = jiffies;
4281	md_wakeup_thread(mddev->sync_thread);
4282	md_new_event(mddev);
4283	return 0;
4284
4285abort:
4286	mddev->recovery = 0;
4287	spin_lock_irq(&conf->device_lock);
4288	conf->geo = conf->prev;
4289	mddev->raid_disks = conf->geo.raid_disks;
4290	rdev_for_each(rdev, mddev)
4291		rdev->new_data_offset = rdev->data_offset;
4292	smp_wmb();
4293	conf->reshape_progress = MaxSector;
4294	conf->reshape_safe = MaxSector;
4295	mddev->reshape_position = MaxSector;
4296	spin_unlock_irq(&conf->device_lock);
4297	return ret;
4298}
4299
4300/* Calculate the last device-address that could contain
4301 * any block from the chunk that includes the array-address 's'
4302 * and report the next address.
4303 * i.e. the address returned will be chunk-aligned and after
4304 * any data that is in the chunk containing 's'.
4305 */
4306static sector_t last_dev_address(sector_t s, struct geom *geo)
4307{
4308	s = (s | geo->chunk_mask) + 1;
4309	s >>= geo->chunk_shift;
4310	s *= geo->near_copies;
4311	s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4312	s *= geo->far_copies;
4313	s <<= geo->chunk_shift;
4314	return s;
4315}
4316
4317/* Calculate the first device-address that could contain
4318 * any block from the chunk that includes the array-address 's'.
4319 * This too will be the start of a chunk
4320 */
4321static sector_t first_dev_address(sector_t s, struct geom *geo)
4322{
4323	s >>= geo->chunk_shift;
4324	s *= geo->near_copies;
4325	sector_div(s, geo->raid_disks);
4326	s *= geo->far_copies;
4327	s <<= geo->chunk_shift;
4328	return s;
4329}
4330
4331static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4332				int *skipped)
4333{
4334	/* We simply copy at most one chunk (smallest of old and new)
4335	 * at a time, possibly less if that exceeds RESYNC_PAGES,
4336	 * or we hit a bad block or something.
4337	 * This might mean we pause for normal IO in the middle of
4338	 * a chunk, but that is not a problem as mddev->reshape_position
4339	 * can record any location.
4340	 *
4341	 * If we will want to write to a location that isn't
4342	 * yet recorded as 'safe' (i.e. in metadata on disk) then
4343	 * we need to flush all reshape requests and update the metadata.
4344	 *
4345	 * When reshaping forwards (e.g. to more devices), we interpret
4346	 * 'safe' as the earliest block which might not have been copied
4347	 * down yet.  We divide this by previous stripe size and multiply
4348	 * by previous stripe length to get lowest device offset that we
4349	 * cannot write to yet.
4350	 * We interpret 'sector_nr' as an address that we want to write to.
4351	 * From this we use last_device_address() to find where we might
4352	 * write to, and first_device_address on the  'safe' position.
4353	 * If this 'next' write position is after the 'safe' position,
4354	 * we must update the metadata to increase the 'safe' position.
4355	 *
4356	 * When reshaping backwards, we round in the opposite direction
4357	 * and perform the reverse test:  next write position must not be
4358	 * less than current safe position.
4359	 *
4360	 * In all this the minimum difference in data offsets
4361	 * (conf->offset_diff - always positive) allows a bit of slack,
4362	 * so next can be after 'safe', but not by more than offset_diff
4363	 *
4364	 * We need to prepare all the bios here before we start any IO
4365	 * to ensure the size we choose is acceptable to all devices.
4366	 * The means one for each copy for write-out and an extra one for
4367	 * read-in.
4368	 * We store the read-in bio in ->master_bio and the others in
4369	 * ->devs[x].bio and ->devs[x].repl_bio.
4370	 */
4371	struct r10conf *conf = mddev->private;
4372	struct r10bio *r10_bio;
4373	sector_t next, safe, last;
4374	int max_sectors;
4375	int nr_sectors;
4376	int s;
4377	struct md_rdev *rdev;
4378	int need_flush = 0;
4379	struct bio *blist;
4380	struct bio *bio, *read_bio;
4381	int sectors_done = 0;
4382
4383	if (sector_nr == 0) {
4384		/* If restarting in the middle, skip the initial sectors */
4385		if (mddev->reshape_backwards &&
4386		    conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4387			sector_nr = (raid10_size(mddev, 0, 0)
4388				     - conf->reshape_progress);
4389		} else if (!mddev->reshape_backwards &&
4390			   conf->reshape_progress > 0)
4391			sector_nr = conf->reshape_progress;
4392		if (sector_nr) {
4393			mddev->curr_resync_completed = sector_nr;
4394			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4395			*skipped = 1;
4396			return sector_nr;
4397		}
4398	}
4399
4400	/* We don't use sector_nr to track where we are up to
4401	 * as that doesn't work well for ->reshape_backwards.
4402	 * So just use ->reshape_progress.
4403	 */
4404	if (mddev->reshape_backwards) {
4405		/* 'next' is the earliest device address that we might
4406		 * write to for this chunk in the new layout
4407		 */
4408		next = first_dev_address(conf->reshape_progress - 1,
4409					 &conf->geo);
4410
4411		/* 'safe' is the last device address that we might read from
4412		 * in the old layout after a restart
4413		 */
4414		safe = last_dev_address(conf->reshape_safe - 1,
4415					&conf->prev);
4416
4417		if (next + conf->offset_diff < safe)
4418			need_flush = 1;
4419
4420		last = conf->reshape_progress - 1;
4421		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4422					       & conf->prev.chunk_mask);
4423		if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4424			sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4425	} else {
4426		/* 'next' is after the last device address that we
4427		 * might write to for this chunk in the new layout
4428		 */
4429		next = last_dev_address(conf->reshape_progress, &conf->geo);
4430
4431		/* 'safe' is the earliest device address that we might
4432		 * read from in the old layout after a restart
4433		 */
4434		safe = first_dev_address(conf->reshape_safe, &conf->prev);
4435
4436		/* Need to update metadata if 'next' might be beyond 'safe'
4437		 * as that would possibly corrupt data
4438		 */
4439		if (next > safe + conf->offset_diff)
4440			need_flush = 1;
4441
4442		sector_nr = conf->reshape_progress;
4443		last  = sector_nr | (conf->geo.chunk_mask
4444				     & conf->prev.chunk_mask);
4445
4446		if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4447			last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4448	}
4449
4450	if (need_flush ||
4451	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4452		/* Need to update reshape_position in metadata */
4453		wait_barrier(conf);
4454		mddev->reshape_position = conf->reshape_progress;
4455		if (mddev->reshape_backwards)
4456			mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4457				- conf->reshape_progress;
4458		else
4459			mddev->curr_resync_completed = conf->reshape_progress;
4460		conf->reshape_checkpoint = jiffies;
4461		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4462		md_wakeup_thread(mddev->thread);
4463		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4464			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4465		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4466			allow_barrier(conf);
4467			return sectors_done;
4468		}
4469		conf->reshape_safe = mddev->reshape_position;
4470		allow_barrier(conf);
4471	}
4472
4473read_more:
4474	/* Now schedule reads for blocks from sector_nr to last */
4475	r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4476	r10_bio->state = 0;
4477	raise_barrier(conf, sectors_done != 0);
4478	atomic_set(&r10_bio->remaining, 0);
4479	r10_bio->mddev = mddev;
4480	r10_bio->sector = sector_nr;
4481	set_bit(R10BIO_IsReshape, &r10_bio->state);
4482	r10_bio->sectors = last - sector_nr + 1;
4483	rdev = read_balance(conf, r10_bio, &max_sectors);
4484	BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4485
4486	if (!rdev) {
4487		/* Cannot read from here, so need to record bad blocks
4488		 * on all the target devices.
4489		 */
4490		// FIXME
4491		mempool_free(r10_bio, conf->r10buf_pool);
4492		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4493		return sectors_done;
4494	}
4495
4496	read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4497
4498	read_bio->bi_bdev = rdev->bdev;
4499	read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4500			       + rdev->data_offset);
4501	read_bio->bi_private = r10_bio;
4502	read_bio->bi_end_io = end_sync_read;
4503	bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
4504	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4505	read_bio->bi_error = 0;
4506	read_bio->bi_vcnt = 0;
4507	read_bio->bi_iter.bi_size = 0;
4508	r10_bio->master_bio = read_bio;
4509	r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4510
4511	/* Now find the locations in the new layout */
4512	__raid10_find_phys(&conf->geo, r10_bio);
4513
4514	blist = read_bio;
4515	read_bio->bi_next = NULL;
4516
4517	rcu_read_lock();
4518	for (s = 0; s < conf->copies*2; s++) {
4519		struct bio *b;
4520		int d = r10_bio->devs[s/2].devnum;
4521		struct md_rdev *rdev2;
4522		if (s&1) {
4523			rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4524			b = r10_bio->devs[s/2].repl_bio;
4525		} else {
4526			rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4527			b = r10_bio->devs[s/2].bio;
4528		}
4529		if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4530			continue;
4531
4532		bio_reset(b);
4533		b->bi_bdev = rdev2->bdev;
4534		b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4535			rdev2->new_data_offset;
4536		b->bi_private = r10_bio;
4537		b->bi_end_io = end_reshape_write;
4538		bio_set_op_attrs(b, REQ_OP_WRITE, 0);
4539		b->bi_next = blist;
4540		blist = b;
4541	}
4542
4543	/* Now add as many pages as possible to all of these bios. */
4544
4545	nr_sectors = 0;
4546	for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4547		struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4548		int len = (max_sectors - s) << 9;
4549		if (len > PAGE_SIZE)
4550			len = PAGE_SIZE;
4551		for (bio = blist; bio ; bio = bio->bi_next) {
4552			struct bio *bio2;
4553			if (bio_add_page(bio, page, len, 0))
4554				continue;
4555
4556			/* Didn't fit, must stop */
4557			for (bio2 = blist;
4558			     bio2 && bio2 != bio;
4559			     bio2 = bio2->bi_next) {
4560				/* Remove last page from this bio */
4561				bio2->bi_vcnt--;
4562				bio2->bi_iter.bi_size -= len;
4563				bio_clear_flag(bio2, BIO_SEG_VALID);
4564			}
4565			goto bio_full;
4566		}
4567		sector_nr += len >> 9;
4568		nr_sectors += len >> 9;
4569	}
4570bio_full:
4571	rcu_read_unlock();
4572	r10_bio->sectors = nr_sectors;
4573
4574	/* Now submit the read */
4575	md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4576	atomic_inc(&r10_bio->remaining);
4577	read_bio->bi_next = NULL;
4578	generic_make_request(read_bio);
4579	sector_nr += nr_sectors;
4580	sectors_done += nr_sectors;
4581	if (sector_nr <= last)
4582		goto read_more;
4583
4584	/* Now that we have done the whole section we can
4585	 * update reshape_progress
4586	 */
4587	if (mddev->reshape_backwards)
4588		conf->reshape_progress -= sectors_done;
4589	else
4590		conf->reshape_progress += sectors_done;
4591
4592	return sectors_done;
4593}
4594
4595static void end_reshape_request(struct r10bio *r10_bio);
4596static int handle_reshape_read_error(struct mddev *mddev,
4597				     struct r10bio *r10_bio);
4598static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4599{
4600	/* Reshape read completed.  Hopefully we have a block
4601	 * to write out.
4602	 * If we got a read error then we do sync 1-page reads from
4603	 * elsewhere until we find the data - or give up.
4604	 */
4605	struct r10conf *conf = mddev->private;
4606	int s;
4607
4608	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4609		if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4610			/* Reshape has been aborted */
4611			md_done_sync(mddev, r10_bio->sectors, 0);
4612			return;
4613		}
4614
4615	/* We definitely have the data in the pages, schedule the
4616	 * writes.
4617	 */
4618	atomic_set(&r10_bio->remaining, 1);
4619	for (s = 0; s < conf->copies*2; s++) {
4620		struct bio *b;
4621		int d = r10_bio->devs[s/2].devnum;
4622		struct md_rdev *rdev;
4623		rcu_read_lock();
4624		if (s&1) {
4625			rdev = rcu_dereference(conf->mirrors[d].replacement);
4626			b = r10_bio->devs[s/2].repl_bio;
4627		} else {
4628			rdev = rcu_dereference(conf->mirrors[d].rdev);
4629			b = r10_bio->devs[s/2].bio;
4630		}
4631		if (!rdev || test_bit(Faulty, &rdev->flags)) {
4632			rcu_read_unlock();
4633			continue;
4634		}
4635		atomic_inc(&rdev->nr_pending);
4636		rcu_read_unlock();
4637		md_sync_acct(b->bi_bdev, r10_bio->sectors);
4638		atomic_inc(&r10_bio->remaining);
4639		b->bi_next = NULL;
4640		generic_make_request(b);
4641	}
4642	end_reshape_request(r10_bio);
4643}
4644
4645static void end_reshape(struct r10conf *conf)
4646{
4647	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4648		return;
4649
4650	spin_lock_irq(&conf->device_lock);
4651	conf->prev = conf->geo;
4652	md_finish_reshape(conf->mddev);
4653	smp_wmb();
4654	conf->reshape_progress = MaxSector;
4655	conf->reshape_safe = MaxSector;
4656	spin_unlock_irq(&conf->device_lock);
4657
4658	/* read-ahead size must cover two whole stripes, which is
4659	 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4660	 */
4661	if (conf->mddev->queue) {
4662		int stripe = conf->geo.raid_disks *
4663			((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4664		stripe /= conf->geo.near_copies;
4665		if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4666			conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4667	}
4668	conf->fullsync = 0;
4669}
4670
4671static int handle_reshape_read_error(struct mddev *mddev,
4672				     struct r10bio *r10_bio)
4673{
4674	/* Use sync reads to get the blocks from somewhere else */
4675	int sectors = r10_bio->sectors;
4676	struct r10conf *conf = mddev->private;
4677	struct {
4678		struct r10bio r10_bio;
4679		struct r10dev devs[conf->copies];
4680	} on_stack;
4681	struct r10bio *r10b = &on_stack.r10_bio;
4682	int slot = 0;
4683	int idx = 0;
4684	struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4685
4686	r10b->sector = r10_bio->sector;
4687	__raid10_find_phys(&conf->prev, r10b);
4688
4689	while (sectors) {
4690		int s = sectors;
4691		int success = 0;
4692		int first_slot = slot;
4693
4694		if (s > (PAGE_SIZE >> 9))
4695			s = PAGE_SIZE >> 9;
4696
4697		rcu_read_lock();
4698		while (!success) {
4699			int d = r10b->devs[slot].devnum;
4700			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4701			sector_t addr;
4702			if (rdev == NULL ||
4703			    test_bit(Faulty, &rdev->flags) ||
4704			    !test_bit(In_sync, &rdev->flags))
4705				goto failed;
4706
4707			addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4708			atomic_inc(&rdev->nr_pending);
4709			rcu_read_unlock();
4710			success = sync_page_io(rdev,
4711					       addr,
4712					       s << 9,
4713					       bvec[idx].bv_page,
4714					       REQ_OP_READ, 0, false);
4715			rdev_dec_pending(rdev, mddev);
4716			rcu_read_lock();
4717			if (success)
4718				break;
4719		failed:
4720			slot++;
4721			if (slot >= conf->copies)
4722				slot = 0;
4723			if (slot == first_slot)
4724				break;
4725		}
4726		rcu_read_unlock();
4727		if (!success) {
4728			/* couldn't read this block, must give up */
4729			set_bit(MD_RECOVERY_INTR,
4730				&mddev->recovery);
4731			return -EIO;
4732		}
4733		sectors -= s;
4734		idx++;
4735	}
4736	return 0;
4737}
4738
4739static void end_reshape_write(struct bio *bio)
4740{
4741	struct r10bio *r10_bio = bio->bi_private;
4742	struct mddev *mddev = r10_bio->mddev;
4743	struct r10conf *conf = mddev->private;
4744	int d;
4745	int slot;
4746	int repl;
4747	struct md_rdev *rdev = NULL;
4748
4749	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4750	if (repl)
4751		rdev = conf->mirrors[d].replacement;
4752	if (!rdev) {
4753		smp_mb();
4754		rdev = conf->mirrors[d].rdev;
4755	}
4756
4757	if (bio->bi_error) {
4758		/* FIXME should record badblock */
4759		md_error(mddev, rdev);
4760	}
4761
4762	rdev_dec_pending(rdev, mddev);
4763	end_reshape_request(r10_bio);
4764}
4765
4766static void end_reshape_request(struct r10bio *r10_bio)
4767{
4768	if (!atomic_dec_and_test(&r10_bio->remaining))
4769		return;
4770	md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4771	bio_put(r10_bio->master_bio);
4772	put_buf(r10_bio);
4773}
4774
4775static void raid10_finish_reshape(struct mddev *mddev)
4776{
4777	struct r10conf *conf = mddev->private;
4778
4779	if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4780		return;
4781
4782	if (mddev->delta_disks > 0) {
4783		sector_t size = raid10_size(mddev, 0, 0);
4784		md_set_array_sectors(mddev, size);
4785		if (mddev->recovery_cp > mddev->resync_max_sectors) {
4786			mddev->recovery_cp = mddev->resync_max_sectors;
4787			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4788		}
4789		mddev->resync_max_sectors = size;
4790		if (mddev->queue) {
4791			set_capacity(mddev->gendisk, mddev->array_sectors);
4792			revalidate_disk(mddev->gendisk);
4793		}
4794	} else {
4795		int d;
4796		rcu_read_lock();
4797		for (d = conf->geo.raid_disks ;
4798		     d < conf->geo.raid_disks - mddev->delta_disks;
4799		     d++) {
4800			struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4801			if (rdev)
4802				clear_bit(In_sync, &rdev->flags);
4803			rdev = rcu_dereference(conf->mirrors[d].replacement);
4804			if (rdev)
4805				clear_bit(In_sync, &rdev->flags);
4806		}
4807		rcu_read_unlock();
4808	}
4809	mddev->layout = mddev->new_layout;
4810	mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4811	mddev->reshape_position = MaxSector;
4812	mddev->delta_disks = 0;
4813	mddev->reshape_backwards = 0;
4814}
4815
4816static struct md_personality raid10_personality =
4817{
4818	.name		= "raid10",
4819	.level		= 10,
4820	.owner		= THIS_MODULE,
4821	.make_request	= raid10_make_request,
4822	.run		= raid10_run,
4823	.free		= raid10_free,
4824	.status		= raid10_status,
4825	.error_handler	= raid10_error,
4826	.hot_add_disk	= raid10_add_disk,
4827	.hot_remove_disk= raid10_remove_disk,
4828	.spare_active	= raid10_spare_active,
4829	.sync_request	= raid10_sync_request,
4830	.quiesce	= raid10_quiesce,
4831	.size		= raid10_size,
4832	.resize		= raid10_resize,
4833	.takeover	= raid10_takeover,
4834	.check_reshape	= raid10_check_reshape,
4835	.start_reshape	= raid10_start_reshape,
4836	.finish_reshape	= raid10_finish_reshape,
4837	.congested	= raid10_congested,
4838};
4839
4840static int __init raid_init(void)
4841{
4842	return register_md_personality(&raid10_personality);
4843}
4844
4845static void raid_exit(void)
4846{
4847	unregister_md_personality(&raid10_personality);
4848}
4849
4850module_init(raid_init);
4851module_exit(raid_exit);
4852MODULE_LICENSE("GPL");
4853MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4854MODULE_ALIAS("md-personality-9"); /* RAID10 */
4855MODULE_ALIAS("md-raid10");
4856MODULE_ALIAS("md-level-10");
4857
4858module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);