Linux Audio

Check our new training course

Loading...
   1/*
   2 * raid5.c : Multiple Devices driver for Linux
   3 *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   4 *	   Copyright (C) 1999, 2000 Ingo Molnar
   5 *	   Copyright (C) 2002, 2003 H. Peter Anvin
   6 *
   7 * RAID-4/5/6 management functions.
   8 * Thanks to Penguin Computing for making the RAID-6 development possible
   9 * by donating a test server!
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21/*
  22 * BITMAP UNPLUGGING:
  23 *
  24 * The sequencing for updating the bitmap reliably is a little
  25 * subtle (and I got it wrong the first time) so it deserves some
  26 * explanation.
  27 *
  28 * We group bitmap updates into batches.  Each batch has a number.
  29 * We may write out several batches at once, but that isn't very important.
  30 * conf->seq_write is the number of the last batch successfully written.
  31 * conf->seq_flush is the number of the last batch that was closed to
  32 *    new additions.
  33 * When we discover that we will need to write to any block in a stripe
  34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  35 * the number of the batch it will be in. This is seq_flush+1.
  36 * When we are ready to do a write, if that batch hasn't been written yet,
  37 *   we plug the array and queue the stripe for later.
  38 * When an unplug happens, we increment bm_flush, thus closing the current
  39 *   batch.
  40 * When we notice that bm_flush > bm_write, we write out all pending updates
  41 * to the bitmap, and advance bm_write to where bm_flush was.
  42 * This may occasionally write a bit out twice, but is sure never to
  43 * miss any bits.
  44 */
  45
  46#include <linux/blkdev.h>
  47#include <linux/kthread.h>
  48#include <linux/raid/pq.h>
  49#include <linux/async_tx.h>
  50#include <linux/module.h>
  51#include <linux/async.h>
  52#include <linux/seq_file.h>
  53#include <linux/cpu.h>
  54#include <linux/slab.h>
  55#include <linux/ratelimit.h>
  56#include "md.h"
  57#include "raid5.h"
  58#include "raid0.h"
  59#include "bitmap.h"
  60
  61/*
  62 * Stripe cache
  63 */
  64
  65#define NR_STRIPES		256
  66#define STRIPE_SIZE		PAGE_SIZE
  67#define STRIPE_SHIFT		(PAGE_SHIFT - 9)
  68#define STRIPE_SECTORS		(STRIPE_SIZE>>9)
  69#define	IO_THRESHOLD		1
  70#define BYPASS_THRESHOLD	1
  71#define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
  72#define HASH_MASK		(NR_HASH - 1)
  73
  74static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
  75{
  76	int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
  77	return &conf->stripe_hashtbl[hash];
  78}
  79
  80/* bio's attached to a stripe+device for I/O are linked together in bi_sector
  81 * order without overlap.  There may be several bio's per stripe+device, and
  82 * a bio could span several devices.
  83 * When walking this list for a particular stripe+device, we must never proceed
  84 * beyond a bio that extends past this device, as the next bio might no longer
  85 * be valid.
  86 * This function is used to determine the 'next' bio in the list, given the sector
  87 * of the current stripe+device
  88 */
  89static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
  90{
  91	int sectors = bio->bi_size >> 9;
  92	if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
  93		return bio->bi_next;
  94	else
  95		return NULL;
  96}
  97
  98/*
  99 * We maintain a biased count of active stripes in the bottom 16 bits of
 100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 101 */
 102static inline int raid5_bi_phys_segments(struct bio *bio)
 103{
 104	return bio->bi_phys_segments & 0xffff;
 105}
 106
 107static inline int raid5_bi_hw_segments(struct bio *bio)
 108{
 109	return (bio->bi_phys_segments >> 16) & 0xffff;
 110}
 111
 112static inline int raid5_dec_bi_phys_segments(struct bio *bio)
 113{
 114	--bio->bi_phys_segments;
 115	return raid5_bi_phys_segments(bio);
 116}
 117
 118static inline int raid5_dec_bi_hw_segments(struct bio *bio)
 119{
 120	unsigned short val = raid5_bi_hw_segments(bio);
 121
 122	--val;
 123	bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
 124	return val;
 125}
 126
 127static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
 128{
 129	bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
 130}
 131
 132/* Find first data disk in a raid6 stripe */
 133static inline int raid6_d0(struct stripe_head *sh)
 134{
 135	if (sh->ddf_layout)
 136		/* ddf always start from first device */
 137		return 0;
 138	/* md starts just after Q block */
 139	if (sh->qd_idx == sh->disks - 1)
 140		return 0;
 141	else
 142		return sh->qd_idx + 1;
 143}
 144static inline int raid6_next_disk(int disk, int raid_disks)
 145{
 146	disk++;
 147	return (disk < raid_disks) ? disk : 0;
 148}
 149
 150/* When walking through the disks in a raid5, starting at raid6_d0,
 151 * We need to map each disk to a 'slot', where the data disks are slot
 152 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
 153 * is raid_disks-1.  This help does that mapping.
 154 */
 155static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
 156			     int *count, int syndrome_disks)
 157{
 158	int slot = *count;
 159
 160	if (sh->ddf_layout)
 161		(*count)++;
 162	if (idx == sh->pd_idx)
 163		return syndrome_disks;
 164	if (idx == sh->qd_idx)
 165		return syndrome_disks + 1;
 166	if (!sh->ddf_layout)
 167		(*count)++;
 168	return slot;
 169}
 170
 171static void return_io(struct bio *return_bi)
 172{
 173	struct bio *bi = return_bi;
 174	while (bi) {
 175
 176		return_bi = bi->bi_next;
 177		bi->bi_next = NULL;
 178		bi->bi_size = 0;
 179		bio_endio(bi, 0);
 180		bi = return_bi;
 181	}
 182}
 183
 184static void print_raid5_conf (struct r5conf *conf);
 185
 186static int stripe_operations_active(struct stripe_head *sh)
 187{
 188	return sh->check_state || sh->reconstruct_state ||
 189	       test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
 190	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 191}
 192
 193static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
 194{
 195	if (atomic_dec_and_test(&sh->count)) {
 196		BUG_ON(!list_empty(&sh->lru));
 197		BUG_ON(atomic_read(&conf->active_stripes)==0);
 198		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 199			if (test_bit(STRIPE_DELAYED, &sh->state) &&
 200			    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 201				list_add_tail(&sh->lru, &conf->delayed_list);
 202			else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 203				   sh->bm_seq - conf->seq_write > 0)
 204				list_add_tail(&sh->lru, &conf->bitmap_list);
 205			else {
 206				clear_bit(STRIPE_DELAYED, &sh->state);
 207				clear_bit(STRIPE_BIT_DELAY, &sh->state);
 208				list_add_tail(&sh->lru, &conf->handle_list);
 209			}
 210			md_wakeup_thread(conf->mddev->thread);
 211		} else {
 212			BUG_ON(stripe_operations_active(sh));
 213			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 214				if (atomic_dec_return(&conf->preread_active_stripes)
 215				    < IO_THRESHOLD)
 216					md_wakeup_thread(conf->mddev->thread);
 217			atomic_dec(&conf->active_stripes);
 218			if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
 219				list_add_tail(&sh->lru, &conf->inactive_list);
 220				wake_up(&conf->wait_for_stripe);
 221				if (conf->retry_read_aligned)
 222					md_wakeup_thread(conf->mddev->thread);
 223			}
 224		}
 225	}
 226}
 227
 228static void release_stripe(struct stripe_head *sh)
 229{
 230	struct r5conf *conf = sh->raid_conf;
 231	unsigned long flags;
 232
 233	spin_lock_irqsave(&conf->device_lock, flags);
 234	__release_stripe(conf, sh);
 235	spin_unlock_irqrestore(&conf->device_lock, flags);
 236}
 237
 238static inline void remove_hash(struct stripe_head *sh)
 239{
 240	pr_debug("remove_hash(), stripe %llu\n",
 241		(unsigned long long)sh->sector);
 242
 243	hlist_del_init(&sh->hash);
 244}
 245
 246static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
 247{
 248	struct hlist_head *hp = stripe_hash(conf, sh->sector);
 249
 250	pr_debug("insert_hash(), stripe %llu\n",
 251		(unsigned long long)sh->sector);
 252
 253	hlist_add_head(&sh->hash, hp);
 254}
 255
 256
 257/* find an idle stripe, make sure it is unhashed, and return it. */
 258static struct stripe_head *get_free_stripe(struct r5conf *conf)
 259{
 260	struct stripe_head *sh = NULL;
 261	struct list_head *first;
 262
 263	if (list_empty(&conf->inactive_list))
 264		goto out;
 265	first = conf->inactive_list.next;
 266	sh = list_entry(first, struct stripe_head, lru);
 267	list_del_init(first);
 268	remove_hash(sh);
 269	atomic_inc(&conf->active_stripes);
 270out:
 271	return sh;
 272}
 273
 274static void shrink_buffers(struct stripe_head *sh)
 275{
 276	struct page *p;
 277	int i;
 278	int num = sh->raid_conf->pool_size;
 279
 280	for (i = 0; i < num ; i++) {
 281		p = sh->dev[i].page;
 282		if (!p)
 283			continue;
 284		sh->dev[i].page = NULL;
 285		put_page(p);
 286	}
 287}
 288
 289static int grow_buffers(struct stripe_head *sh)
 290{
 291	int i;
 292	int num = sh->raid_conf->pool_size;
 293
 294	for (i = 0; i < num; i++) {
 295		struct page *page;
 296
 297		if (!(page = alloc_page(GFP_KERNEL))) {
 298			return 1;
 299		}
 300		sh->dev[i].page = page;
 301	}
 302	return 0;
 303}
 304
 305static void raid5_build_block(struct stripe_head *sh, int i, int previous);
 306static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
 307			    struct stripe_head *sh);
 308
 309static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
 310{
 311	struct r5conf *conf = sh->raid_conf;
 312	int i;
 313
 314	BUG_ON(atomic_read(&sh->count) != 0);
 315	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 316	BUG_ON(stripe_operations_active(sh));
 317
 318	pr_debug("init_stripe called, stripe %llu\n",
 319		(unsigned long long)sh->sector);
 320
 321	remove_hash(sh);
 322
 323	sh->generation = conf->generation - previous;
 324	sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
 325	sh->sector = sector;
 326	stripe_set_idx(sector, conf, previous, sh);
 327	sh->state = 0;
 328
 329
 330	for (i = sh->disks; i--; ) {
 331		struct r5dev *dev = &sh->dev[i];
 332
 333		if (dev->toread || dev->read || dev->towrite || dev->written ||
 334		    test_bit(R5_LOCKED, &dev->flags)) {
 335			printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
 336			       (unsigned long long)sh->sector, i, dev->toread,
 337			       dev->read, dev->towrite, dev->written,
 338			       test_bit(R5_LOCKED, &dev->flags));
 339			WARN_ON(1);
 340		}
 341		dev->flags = 0;
 342		raid5_build_block(sh, i, previous);
 343	}
 344	insert_hash(conf, sh);
 345}
 346
 347static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
 348					 short generation)
 349{
 350	struct stripe_head *sh;
 351	struct hlist_node *hn;
 352
 353	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
 354	hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
 355		if (sh->sector == sector && sh->generation == generation)
 356			return sh;
 357	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
 358	return NULL;
 359}
 360
 361/*
 362 * Need to check if array has failed when deciding whether to:
 363 *  - start an array
 364 *  - remove non-faulty devices
 365 *  - add a spare
 366 *  - allow a reshape
 367 * This determination is simple when no reshape is happening.
 368 * However if there is a reshape, we need to carefully check
 369 * both the before and after sections.
 370 * This is because some failed devices may only affect one
 371 * of the two sections, and some non-in_sync devices may
 372 * be insync in the section most affected by failed devices.
 373 */
 374static int calc_degraded(struct r5conf *conf)
 375{
 376	int degraded, degraded2;
 377	int i;
 378
 379	rcu_read_lock();
 380	degraded = 0;
 381	for (i = 0; i < conf->previous_raid_disks; i++) {
 382		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 383		if (rdev && test_bit(Faulty, &rdev->flags))
 384			rdev = rcu_dereference(conf->disks[i].replacement);
 385		if (!rdev || test_bit(Faulty, &rdev->flags))
 386			degraded++;
 387		else if (test_bit(In_sync, &rdev->flags))
 388			;
 389		else
 390			/* not in-sync or faulty.
 391			 * If the reshape increases the number of devices,
 392			 * this is being recovered by the reshape, so
 393			 * this 'previous' section is not in_sync.
 394			 * If the number of devices is being reduced however,
 395			 * the device can only be part of the array if
 396			 * we are reverting a reshape, so this section will
 397			 * be in-sync.
 398			 */
 399			if (conf->raid_disks >= conf->previous_raid_disks)
 400				degraded++;
 401	}
 402	rcu_read_unlock();
 403	if (conf->raid_disks == conf->previous_raid_disks)
 404		return degraded;
 405	rcu_read_lock();
 406	degraded2 = 0;
 407	for (i = 0; i < conf->raid_disks; i++) {
 408		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 409		if (rdev && test_bit(Faulty, &rdev->flags))
 410			rdev = rcu_dereference(conf->disks[i].replacement);
 411		if (!rdev || test_bit(Faulty, &rdev->flags))
 412			degraded2++;
 413		else if (test_bit(In_sync, &rdev->flags))
 414			;
 415		else
 416			/* not in-sync or faulty.
 417			 * If reshape increases the number of devices, this
 418			 * section has already been recovered, else it
 419			 * almost certainly hasn't.
 420			 */
 421			if (conf->raid_disks <= conf->previous_raid_disks)
 422				degraded2++;
 423	}
 424	rcu_read_unlock();
 425	if (degraded2 > degraded)
 426		return degraded2;
 427	return degraded;
 428}
 429
 430static int has_failed(struct r5conf *conf)
 431{
 432	int degraded;
 433
 434	if (conf->mddev->reshape_position == MaxSector)
 435		return conf->mddev->degraded > conf->max_degraded;
 436
 437	degraded = calc_degraded(conf);
 438	if (degraded > conf->max_degraded)
 439		return 1;
 440	return 0;
 441}
 442
 443static struct stripe_head *
 444get_active_stripe(struct r5conf *conf, sector_t sector,
 445		  int previous, int noblock, int noquiesce)
 446{
 447	struct stripe_head *sh;
 448
 449	pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 450
 451	spin_lock_irq(&conf->device_lock);
 452
 453	do {
 454		wait_event_lock_irq(conf->wait_for_stripe,
 455				    conf->quiesce == 0 || noquiesce,
 456				    conf->device_lock, /* nothing */);
 457		sh = __find_stripe(conf, sector, conf->generation - previous);
 458		if (!sh) {
 459			if (!conf->inactive_blocked)
 460				sh = get_free_stripe(conf);
 461			if (noblock && sh == NULL)
 462				break;
 463			if (!sh) {
 464				conf->inactive_blocked = 1;
 465				wait_event_lock_irq(conf->wait_for_stripe,
 466						    !list_empty(&conf->inactive_list) &&
 467						    (atomic_read(&conf->active_stripes)
 468						     < (conf->max_nr_stripes *3/4)
 469						     || !conf->inactive_blocked),
 470						    conf->device_lock,
 471						    );
 472				conf->inactive_blocked = 0;
 473			} else
 474				init_stripe(sh, sector, previous);
 475		} else {
 476			if (atomic_read(&sh->count)) {
 477				BUG_ON(!list_empty(&sh->lru)
 478				    && !test_bit(STRIPE_EXPANDING, &sh->state));
 479			} else {
 480				if (!test_bit(STRIPE_HANDLE, &sh->state))
 481					atomic_inc(&conf->active_stripes);
 482				if (list_empty(&sh->lru) &&
 483				    !test_bit(STRIPE_EXPANDING, &sh->state))
 484					BUG();
 485				list_del_init(&sh->lru);
 486			}
 487		}
 488	} while (sh == NULL);
 489
 490	if (sh)
 491		atomic_inc(&sh->count);
 492
 493	spin_unlock_irq(&conf->device_lock);
 494	return sh;
 495}
 496
 497/* Determine if 'data_offset' or 'new_data_offset' should be used
 498 * in this stripe_head.
 499 */
 500static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
 501{
 502	sector_t progress = conf->reshape_progress;
 503	/* Need a memory barrier to make sure we see the value
 504	 * of conf->generation, or ->data_offset that was set before
 505	 * reshape_progress was updated.
 506	 */
 507	smp_rmb();
 508	if (progress == MaxSector)
 509		return 0;
 510	if (sh->generation == conf->generation - 1)
 511		return 0;
 512	/* We are in a reshape, and this is a new-generation stripe,
 513	 * so use new_data_offset.
 514	 */
 515	return 1;
 516}
 517
 518static void
 519raid5_end_read_request(struct bio *bi, int error);
 520static void
 521raid5_end_write_request(struct bio *bi, int error);
 522
 523static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 524{
 525	struct r5conf *conf = sh->raid_conf;
 526	int i, disks = sh->disks;
 527
 528	might_sleep();
 529
 530	for (i = disks; i--; ) {
 531		int rw;
 532		int replace_only = 0;
 533		struct bio *bi, *rbi;
 534		struct md_rdev *rdev, *rrdev = NULL;
 535		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 536			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
 537				rw = WRITE_FUA;
 538			else
 539				rw = WRITE;
 540		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
 541			rw = READ;
 542		else if (test_and_clear_bit(R5_WantReplace,
 543					    &sh->dev[i].flags)) {
 544			rw = WRITE;
 545			replace_only = 1;
 546		} else
 547			continue;
 548		if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
 549			rw |= REQ_SYNC;
 550
 551		bi = &sh->dev[i].req;
 552		rbi = &sh->dev[i].rreq; /* For writing to replacement */
 553
 554		bi->bi_rw = rw;
 555		rbi->bi_rw = rw;
 556		if (rw & WRITE) {
 557			bi->bi_end_io = raid5_end_write_request;
 558			rbi->bi_end_io = raid5_end_write_request;
 559		} else
 560			bi->bi_end_io = raid5_end_read_request;
 561
 562		rcu_read_lock();
 563		rrdev = rcu_dereference(conf->disks[i].replacement);
 564		smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
 565		rdev = rcu_dereference(conf->disks[i].rdev);
 566		if (!rdev) {
 567			rdev = rrdev;
 568			rrdev = NULL;
 569		}
 570		if (rw & WRITE) {
 571			if (replace_only)
 572				rdev = NULL;
 573			if (rdev == rrdev)
 574				/* We raced and saw duplicates */
 575				rrdev = NULL;
 576		} else {
 577			if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
 578				rdev = rrdev;
 579			rrdev = NULL;
 580		}
 581
 582		if (rdev && test_bit(Faulty, &rdev->flags))
 583			rdev = NULL;
 584		if (rdev)
 585			atomic_inc(&rdev->nr_pending);
 586		if (rrdev && test_bit(Faulty, &rrdev->flags))
 587			rrdev = NULL;
 588		if (rrdev)
 589			atomic_inc(&rrdev->nr_pending);
 590		rcu_read_unlock();
 591
 592		/* We have already checked bad blocks for reads.  Now
 593		 * need to check for writes.  We never accept write errors
 594		 * on the replacement, so we don't to check rrdev.
 595		 */
 596		while ((rw & WRITE) && rdev &&
 597		       test_bit(WriteErrorSeen, &rdev->flags)) {
 598			sector_t first_bad;
 599			int bad_sectors;
 600			int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
 601					      &first_bad, &bad_sectors);
 602			if (!bad)
 603				break;
 604
 605			if (bad < 0) {
 606				set_bit(BlockedBadBlocks, &rdev->flags);
 607				if (!conf->mddev->external &&
 608				    conf->mddev->flags) {
 609					/* It is very unlikely, but we might
 610					 * still need to write out the
 611					 * bad block log - better give it
 612					 * a chance*/
 613					md_check_recovery(conf->mddev);
 614				}
 615				/*
 616				 * Because md_wait_for_blocked_rdev
 617				 * will dec nr_pending, we must
 618				 * increment it first.
 619				 */
 620				atomic_inc(&rdev->nr_pending);
 621				md_wait_for_blocked_rdev(rdev, conf->mddev);
 622			} else {
 623				/* Acknowledged bad block - skip the write */
 624				rdev_dec_pending(rdev, conf->mddev);
 625				rdev = NULL;
 626			}
 627		}
 628
 629		if (rdev) {
 630			if (s->syncing || s->expanding || s->expanded
 631			    || s->replacing)
 632				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 633
 634			set_bit(STRIPE_IO_STARTED, &sh->state);
 635
 636			bi->bi_bdev = rdev->bdev;
 637			pr_debug("%s: for %llu schedule op %ld on disc %d\n",
 638				__func__, (unsigned long long)sh->sector,
 639				bi->bi_rw, i);
 640			atomic_inc(&sh->count);
 641			if (use_new_offset(conf, sh))
 642				bi->bi_sector = (sh->sector
 643						 + rdev->new_data_offset);
 644			else
 645				bi->bi_sector = (sh->sector
 646						 + rdev->data_offset);
 647			bi->bi_flags = 1 << BIO_UPTODATE;
 648			bi->bi_idx = 0;
 649			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 650			bi->bi_io_vec[0].bv_offset = 0;
 651			bi->bi_size = STRIPE_SIZE;
 652			bi->bi_next = NULL;
 653			if (rrdev)
 654				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 655			generic_make_request(bi);
 656		}
 657		if (rrdev) {
 658			if (s->syncing || s->expanding || s->expanded
 659			    || s->replacing)
 660				md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
 661
 662			set_bit(STRIPE_IO_STARTED, &sh->state);
 663
 664			rbi->bi_bdev = rrdev->bdev;
 665			pr_debug("%s: for %llu schedule op %ld on "
 666				 "replacement disc %d\n",
 667				__func__, (unsigned long long)sh->sector,
 668				rbi->bi_rw, i);
 669			atomic_inc(&sh->count);
 670			if (use_new_offset(conf, sh))
 671				rbi->bi_sector = (sh->sector
 672						  + rrdev->new_data_offset);
 673			else
 674				rbi->bi_sector = (sh->sector
 675						  + rrdev->data_offset);
 676			rbi->bi_flags = 1 << BIO_UPTODATE;
 677			rbi->bi_idx = 0;
 678			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 679			rbi->bi_io_vec[0].bv_offset = 0;
 680			rbi->bi_size = STRIPE_SIZE;
 681			rbi->bi_next = NULL;
 682			generic_make_request(rbi);
 683		}
 684		if (!rdev && !rrdev) {
 685			if (rw & WRITE)
 686				set_bit(STRIPE_DEGRADED, &sh->state);
 687			pr_debug("skip op %ld on disc %d for sector %llu\n",
 688				bi->bi_rw, i, (unsigned long long)sh->sector);
 689			clear_bit(R5_LOCKED, &sh->dev[i].flags);
 690			set_bit(STRIPE_HANDLE, &sh->state);
 691		}
 692	}
 693}
 694
 695static struct dma_async_tx_descriptor *
 696async_copy_data(int frombio, struct bio *bio, struct page *page,
 697	sector_t sector, struct dma_async_tx_descriptor *tx)
 698{
 699	struct bio_vec *bvl;
 700	struct page *bio_page;
 701	int i;
 702	int page_offset;
 703	struct async_submit_ctl submit;
 704	enum async_tx_flags flags = 0;
 705
 706	if (bio->bi_sector >= sector)
 707		page_offset = (signed)(bio->bi_sector - sector) * 512;
 708	else
 709		page_offset = (signed)(sector - bio->bi_sector) * -512;
 710
 711	if (frombio)
 712		flags |= ASYNC_TX_FENCE;
 713	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 714
 715	bio_for_each_segment(bvl, bio, i) {
 716		int len = bvl->bv_len;
 717		int clen;
 718		int b_offset = 0;
 719
 720		if (page_offset < 0) {
 721			b_offset = -page_offset;
 722			page_offset += b_offset;
 723			len -= b_offset;
 724		}
 725
 726		if (len > 0 && page_offset + len > STRIPE_SIZE)
 727			clen = STRIPE_SIZE - page_offset;
 728		else
 729			clen = len;
 730
 731		if (clen > 0) {
 732			b_offset += bvl->bv_offset;
 733			bio_page = bvl->bv_page;
 734			if (frombio)
 735				tx = async_memcpy(page, bio_page, page_offset,
 736						  b_offset, clen, &submit);
 737			else
 738				tx = async_memcpy(bio_page, page, b_offset,
 739						  page_offset, clen, &submit);
 740		}
 741		/* chain the operations */
 742		submit.depend_tx = tx;
 743
 744		if (clen < len) /* hit end of page */
 745			break;
 746		page_offset +=  len;
 747	}
 748
 749	return tx;
 750}
 751
 752static void ops_complete_biofill(void *stripe_head_ref)
 753{
 754	struct stripe_head *sh = stripe_head_ref;
 755	struct bio *return_bi = NULL;
 756	struct r5conf *conf = sh->raid_conf;
 757	int i;
 758
 759	pr_debug("%s: stripe %llu\n", __func__,
 760		(unsigned long long)sh->sector);
 761
 762	/* clear completed biofills */
 763	spin_lock_irq(&conf->device_lock);
 764	for (i = sh->disks; i--; ) {
 765		struct r5dev *dev = &sh->dev[i];
 766
 767		/* acknowledge completion of a biofill operation */
 768		/* and check if we need to reply to a read request,
 769		 * new R5_Wantfill requests are held off until
 770		 * !STRIPE_BIOFILL_RUN
 771		 */
 772		if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
 773			struct bio *rbi, *rbi2;
 774
 775			BUG_ON(!dev->read);
 776			rbi = dev->read;
 777			dev->read = NULL;
 778			while (rbi && rbi->bi_sector <
 779				dev->sector + STRIPE_SECTORS) {
 780				rbi2 = r5_next_bio(rbi, dev->sector);
 781				if (!raid5_dec_bi_phys_segments(rbi)) {
 782					rbi->bi_next = return_bi;
 783					return_bi = rbi;
 784				}
 785				rbi = rbi2;
 786			}
 787		}
 788	}
 789	spin_unlock_irq(&conf->device_lock);
 790	clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 791
 792	return_io(return_bi);
 793
 794	set_bit(STRIPE_HANDLE, &sh->state);
 795	release_stripe(sh);
 796}
 797
 798static void ops_run_biofill(struct stripe_head *sh)
 799{
 800	struct dma_async_tx_descriptor *tx = NULL;
 801	struct r5conf *conf = sh->raid_conf;
 802	struct async_submit_ctl submit;
 803	int i;
 804
 805	pr_debug("%s: stripe %llu\n", __func__,
 806		(unsigned long long)sh->sector);
 807
 808	for (i = sh->disks; i--; ) {
 809		struct r5dev *dev = &sh->dev[i];
 810		if (test_bit(R5_Wantfill, &dev->flags)) {
 811			struct bio *rbi;
 812			spin_lock_irq(&conf->device_lock);
 813			dev->read = rbi = dev->toread;
 814			dev->toread = NULL;
 815			spin_unlock_irq(&conf->device_lock);
 816			while (rbi && rbi->bi_sector <
 817				dev->sector + STRIPE_SECTORS) {
 818				tx = async_copy_data(0, rbi, dev->page,
 819					dev->sector, tx);
 820				rbi = r5_next_bio(rbi, dev->sector);
 821			}
 822		}
 823	}
 824
 825	atomic_inc(&sh->count);
 826	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
 827	async_trigger_callback(&submit);
 828}
 829
 830static void mark_target_uptodate(struct stripe_head *sh, int target)
 831{
 832	struct r5dev *tgt;
 833
 834	if (target < 0)
 835		return;
 836
 837	tgt = &sh->dev[target];
 838	set_bit(R5_UPTODATE, &tgt->flags);
 839	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 840	clear_bit(R5_Wantcompute, &tgt->flags);
 841}
 842
 843static void ops_complete_compute(void *stripe_head_ref)
 844{
 845	struct stripe_head *sh = stripe_head_ref;
 846
 847	pr_debug("%s: stripe %llu\n", __func__,
 848		(unsigned long long)sh->sector);
 849
 850	/* mark the computed target(s) as uptodate */
 851	mark_target_uptodate(sh, sh->ops.target);
 852	mark_target_uptodate(sh, sh->ops.target2);
 853
 854	clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
 855	if (sh->check_state == check_state_compute_run)
 856		sh->check_state = check_state_compute_result;
 857	set_bit(STRIPE_HANDLE, &sh->state);
 858	release_stripe(sh);
 859}
 860
 861/* return a pointer to the address conversion region of the scribble buffer */
 862static addr_conv_t *to_addr_conv(struct stripe_head *sh,
 863				 struct raid5_percpu *percpu)
 864{
 865	return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
 866}
 867
 868static struct dma_async_tx_descriptor *
 869ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
 870{
 871	int disks = sh->disks;
 872	struct page **xor_srcs = percpu->scribble;
 873	int target = sh->ops.target;
 874	struct r5dev *tgt = &sh->dev[target];
 875	struct page *xor_dest = tgt->page;
 876	int count = 0;
 877	struct dma_async_tx_descriptor *tx;
 878	struct async_submit_ctl submit;
 879	int i;
 880
 881	pr_debug("%s: stripe %llu block: %d\n",
 882		__func__, (unsigned long long)sh->sector, target);
 883	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 884
 885	for (i = disks; i--; )
 886		if (i != target)
 887			xor_srcs[count++] = sh->dev[i].page;
 888
 889	atomic_inc(&sh->count);
 890
 891	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
 892			  ops_complete_compute, sh, to_addr_conv(sh, percpu));
 893	if (unlikely(count == 1))
 894		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
 895	else
 896		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
 897
 898	return tx;
 899}
 900
 901/* set_syndrome_sources - populate source buffers for gen_syndrome
 902 * @srcs - (struct page *) array of size sh->disks
 903 * @sh - stripe_head to parse
 904 *
 905 * Populates srcs in proper layout order for the stripe and returns the
 906 * 'count' of sources to be used in a call to async_gen_syndrome.  The P
 907 * destination buffer is recorded in srcs[count] and the Q destination
 908 * is recorded in srcs[count+1]].
 909 */
 910static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
 911{
 912	int disks = sh->disks;
 913	int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
 914	int d0_idx = raid6_d0(sh);
 915	int count;
 916	int i;
 917
 918	for (i = 0; i < disks; i++)
 919		srcs[i] = NULL;
 920
 921	count = 0;
 922	i = d0_idx;
 923	do {
 924		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 925
 926		srcs[slot] = sh->dev[i].page;
 927		i = raid6_next_disk(i, disks);
 928	} while (i != d0_idx);
 929
 930	return syndrome_disks;
 931}
 932
 933static struct dma_async_tx_descriptor *
 934ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
 935{
 936	int disks = sh->disks;
 937	struct page **blocks = percpu->scribble;
 938	int target;
 939	int qd_idx = sh->qd_idx;
 940	struct dma_async_tx_descriptor *tx;
 941	struct async_submit_ctl submit;
 942	struct r5dev *tgt;
 943	struct page *dest;
 944	int i;
 945	int count;
 946
 947	if (sh->ops.target < 0)
 948		target = sh->ops.target2;
 949	else if (sh->ops.target2 < 0)
 950		target = sh->ops.target;
 951	else
 952		/* we should only have one valid target */
 953		BUG();
 954	BUG_ON(target < 0);
 955	pr_debug("%s: stripe %llu block: %d\n",
 956		__func__, (unsigned long long)sh->sector, target);
 957
 958	tgt = &sh->dev[target];
 959	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 960	dest = tgt->page;
 961
 962	atomic_inc(&sh->count);
 963
 964	if (target == qd_idx) {
 965		count = set_syndrome_sources(blocks, sh);
 966		blocks[count] = NULL; /* regenerating p is not necessary */
 967		BUG_ON(blocks[count+1] != dest); /* q should already be set */
 968		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 969				  ops_complete_compute, sh,
 970				  to_addr_conv(sh, percpu));
 971		tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
 972	} else {
 973		/* Compute any data- or p-drive using XOR */
 974		count = 0;
 975		for (i = disks; i-- ; ) {
 976			if (i == target || i == qd_idx)
 977				continue;
 978			blocks[count++] = sh->dev[i].page;
 979		}
 980
 981		init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 982				  NULL, ops_complete_compute, sh,
 983				  to_addr_conv(sh, percpu));
 984		tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
 985	}
 986
 987	return tx;
 988}
 989
 990static struct dma_async_tx_descriptor *
 991ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
 992{
 993	int i, count, disks = sh->disks;
 994	int syndrome_disks = sh->ddf_layout ? disks : disks-2;
 995	int d0_idx = raid6_d0(sh);
 996	int faila = -1, failb = -1;
 997	int target = sh->ops.target;
 998	int target2 = sh->ops.target2;
 999	struct r5dev *tgt = &sh->dev[target];
1000	struct r5dev *tgt2 = &sh->dev[target2];
1001	struct dma_async_tx_descriptor *tx;
1002	struct page **blocks = percpu->scribble;
1003	struct async_submit_ctl submit;
1004
1005	pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1006		 __func__, (unsigned long long)sh->sector, target, target2);
1007	BUG_ON(target < 0 || target2 < 0);
1008	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1009	BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1010
1011	/* we need to open-code set_syndrome_sources to handle the
1012	 * slot number conversion for 'faila' and 'failb'
1013	 */
1014	for (i = 0; i < disks ; i++)
1015		blocks[i] = NULL;
1016	count = 0;
1017	i = d0_idx;
1018	do {
1019		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1020
1021		blocks[slot] = sh->dev[i].page;
1022
1023		if (i == target)
1024			faila = slot;
1025		if (i == target2)
1026			failb = slot;
1027		i = raid6_next_disk(i, disks);
1028	} while (i != d0_idx);
1029
1030	BUG_ON(faila == failb);
1031	if (failb < faila)
1032		swap(faila, failb);
1033	pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1034		 __func__, (unsigned long long)sh->sector, faila, failb);
1035
1036	atomic_inc(&sh->count);
1037
1038	if (failb == syndrome_disks+1) {
1039		/* Q disk is one of the missing disks */
1040		if (faila == syndrome_disks) {
1041			/* Missing P+Q, just recompute */
1042			init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1043					  ops_complete_compute, sh,
1044					  to_addr_conv(sh, percpu));
1045			return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1046						  STRIPE_SIZE, &submit);
1047		} else {
1048			struct page *dest;
1049			int data_target;
1050			int qd_idx = sh->qd_idx;
1051
1052			/* Missing D+Q: recompute D from P, then recompute Q */
1053			if (target == qd_idx)
1054				data_target = target2;
1055			else
1056				data_target = target;
1057
1058			count = 0;
1059			for (i = disks; i-- ; ) {
1060				if (i == data_target || i == qd_idx)
1061					continue;
1062				blocks[count++] = sh->dev[i].page;
1063			}
1064			dest = sh->dev[data_target].page;
1065			init_async_submit(&submit,
1066					  ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1067					  NULL, NULL, NULL,
1068					  to_addr_conv(sh, percpu));
1069			tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1070				       &submit);
1071
1072			count = set_syndrome_sources(blocks, sh);
1073			init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1074					  ops_complete_compute, sh,
1075					  to_addr_conv(sh, percpu));
1076			return async_gen_syndrome(blocks, 0, count+2,
1077						  STRIPE_SIZE, &submit);
1078		}
1079	} else {
1080		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1081				  ops_complete_compute, sh,
1082				  to_addr_conv(sh, percpu));
1083		if (failb == syndrome_disks) {
1084			/* We're missing D+P. */
1085			return async_raid6_datap_recov(syndrome_disks+2,
1086						       STRIPE_SIZE, faila,
1087						       blocks, &submit);
1088		} else {
1089			/* We're missing D+D. */
1090			return async_raid6_2data_recov(syndrome_disks+2,
1091						       STRIPE_SIZE, faila, failb,
1092						       blocks, &submit);
1093		}
1094	}
1095}
1096
1097
1098static void ops_complete_prexor(void *stripe_head_ref)
1099{
1100	struct stripe_head *sh = stripe_head_ref;
1101
1102	pr_debug("%s: stripe %llu\n", __func__,
1103		(unsigned long long)sh->sector);
1104}
1105
1106static struct dma_async_tx_descriptor *
1107ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1108	       struct dma_async_tx_descriptor *tx)
1109{
1110	int disks = sh->disks;
1111	struct page **xor_srcs = percpu->scribble;
1112	int count = 0, pd_idx = sh->pd_idx, i;
1113	struct async_submit_ctl submit;
1114
1115	/* existing parity data subtracted */
1116	struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1117
1118	pr_debug("%s: stripe %llu\n", __func__,
1119		(unsigned long long)sh->sector);
1120
1121	for (i = disks; i--; ) {
1122		struct r5dev *dev = &sh->dev[i];
1123		/* Only process blocks that are known to be uptodate */
1124		if (test_bit(R5_Wantdrain, &dev->flags))
1125			xor_srcs[count++] = dev->page;
1126	}
1127
1128	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1129			  ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1130	tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1131
1132	return tx;
1133}
1134
1135static struct dma_async_tx_descriptor *
1136ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1137{
1138	int disks = sh->disks;
1139	int i;
1140
1141	pr_debug("%s: stripe %llu\n", __func__,
1142		(unsigned long long)sh->sector);
1143
1144	for (i = disks; i--; ) {
1145		struct r5dev *dev = &sh->dev[i];
1146		struct bio *chosen;
1147
1148		if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1149			struct bio *wbi;
1150
1151			spin_lock_irq(&sh->raid_conf->device_lock);
1152			chosen = dev->towrite;
1153			dev->towrite = NULL;
1154			BUG_ON(dev->written);
1155			wbi = dev->written = chosen;
1156			spin_unlock_irq(&sh->raid_conf->device_lock);
1157
1158			while (wbi && wbi->bi_sector <
1159				dev->sector + STRIPE_SECTORS) {
1160				if (wbi->bi_rw & REQ_FUA)
1161					set_bit(R5_WantFUA, &dev->flags);
1162				if (wbi->bi_rw & REQ_SYNC)
1163					set_bit(R5_SyncIO, &dev->flags);
1164				tx = async_copy_data(1, wbi, dev->page,
1165					dev->sector, tx);
1166				wbi = r5_next_bio(wbi, dev->sector);
1167			}
1168		}
1169	}
1170
1171	return tx;
1172}
1173
1174static void ops_complete_reconstruct(void *stripe_head_ref)
1175{
1176	struct stripe_head *sh = stripe_head_ref;
1177	int disks = sh->disks;
1178	int pd_idx = sh->pd_idx;
1179	int qd_idx = sh->qd_idx;
1180	int i;
1181	bool fua = false, sync = false;
1182
1183	pr_debug("%s: stripe %llu\n", __func__,
1184		(unsigned long long)sh->sector);
1185
1186	for (i = disks; i--; ) {
1187		fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1188		sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1189	}
1190
1191	for (i = disks; i--; ) {
1192		struct r5dev *dev = &sh->dev[i];
1193
1194		if (dev->written || i == pd_idx || i == qd_idx) {
1195			set_bit(R5_UPTODATE, &dev->flags);
1196			if (fua)
1197				set_bit(R5_WantFUA, &dev->flags);
1198			if (sync)
1199				set_bit(R5_SyncIO, &dev->flags);
1200		}
1201	}
1202
1203	if (sh->reconstruct_state == reconstruct_state_drain_run)
1204		sh->reconstruct_state = reconstruct_state_drain_result;
1205	else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1206		sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1207	else {
1208		BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1209		sh->reconstruct_state = reconstruct_state_result;
1210	}
1211
1212	set_bit(STRIPE_HANDLE, &sh->state);
1213	release_stripe(sh);
1214}
1215
1216static void
1217ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1218		     struct dma_async_tx_descriptor *tx)
1219{
1220	int disks = sh->disks;
1221	struct page **xor_srcs = percpu->scribble;
1222	struct async_submit_ctl submit;
1223	int count = 0, pd_idx = sh->pd_idx, i;
1224	struct page *xor_dest;
1225	int prexor = 0;
1226	unsigned long flags;
1227
1228	pr_debug("%s: stripe %llu\n", __func__,
1229		(unsigned long long)sh->sector);
1230
1231	/* check if prexor is active which means only process blocks
1232	 * that are part of a read-modify-write (written)
1233	 */
1234	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1235		prexor = 1;
1236		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1237		for (i = disks; i--; ) {
1238			struct r5dev *dev = &sh->dev[i];
1239			if (dev->written)
1240				xor_srcs[count++] = dev->page;
1241		}
1242	} else {
1243		xor_dest = sh->dev[pd_idx].page;
1244		for (i = disks; i--; ) {
1245			struct r5dev *dev = &sh->dev[i];
1246			if (i != pd_idx)
1247				xor_srcs[count++] = dev->page;
1248		}
1249	}
1250
1251	/* 1/ if we prexor'd then the dest is reused as a source
1252	 * 2/ if we did not prexor then we are redoing the parity
1253	 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1254	 * for the synchronous xor case
1255	 */
1256	flags = ASYNC_TX_ACK |
1257		(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1258
1259	atomic_inc(&sh->count);
1260
1261	init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1262			  to_addr_conv(sh, percpu));
1263	if (unlikely(count == 1))
1264		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1265	else
1266		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1267}
1268
1269static void
1270ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1271		     struct dma_async_tx_descriptor *tx)
1272{
1273	struct async_submit_ctl submit;
1274	struct page **blocks = percpu->scribble;
1275	int count;
1276
1277	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1278
1279	count = set_syndrome_sources(blocks, sh);
1280
1281	atomic_inc(&sh->count);
1282
1283	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1284			  sh, to_addr_conv(sh, percpu));
1285	async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1286}
1287
1288static void ops_complete_check(void *stripe_head_ref)
1289{
1290	struct stripe_head *sh = stripe_head_ref;
1291
1292	pr_debug("%s: stripe %llu\n", __func__,
1293		(unsigned long long)sh->sector);
1294
1295	sh->check_state = check_state_check_result;
1296	set_bit(STRIPE_HANDLE, &sh->state);
1297	release_stripe(sh);
1298}
1299
1300static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1301{
1302	int disks = sh->disks;
1303	int pd_idx = sh->pd_idx;
1304	int qd_idx = sh->qd_idx;
1305	struct page *xor_dest;
1306	struct page **xor_srcs = percpu->scribble;
1307	struct dma_async_tx_descriptor *tx;
1308	struct async_submit_ctl submit;
1309	int count;
1310	int i;
1311
1312	pr_debug("%s: stripe %llu\n", __func__,
1313		(unsigned long long)sh->sector);
1314
1315	count = 0;
1316	xor_dest = sh->dev[pd_idx].page;
1317	xor_srcs[count++] = xor_dest;
1318	for (i = disks; i--; ) {
1319		if (i == pd_idx || i == qd_idx)
1320			continue;
1321		xor_srcs[count++] = sh->dev[i].page;
1322	}
1323
1324	init_async_submit(&submit, 0, NULL, NULL, NULL,
1325			  to_addr_conv(sh, percpu));
1326	tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1327			   &sh->ops.zero_sum_result, &submit);
1328
1329	atomic_inc(&sh->count);
1330	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1331	tx = async_trigger_callback(&submit);
1332}
1333
1334static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1335{
1336	struct page **srcs = percpu->scribble;
1337	struct async_submit_ctl submit;
1338	int count;
1339
1340	pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1341		(unsigned long long)sh->sector, checkp);
1342
1343	count = set_syndrome_sources(srcs, sh);
1344	if (!checkp)
1345		srcs[count] = NULL;
1346
1347	atomic_inc(&sh->count);
1348	init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1349			  sh, to_addr_conv(sh, percpu));
1350	async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1351			   &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1352}
1353
1354static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1355{
1356	int overlap_clear = 0, i, disks = sh->disks;
1357	struct dma_async_tx_descriptor *tx = NULL;
1358	struct r5conf *conf = sh->raid_conf;
1359	int level = conf->level;
1360	struct raid5_percpu *percpu;
1361	unsigned long cpu;
1362
1363	cpu = get_cpu();
1364	percpu = per_cpu_ptr(conf->percpu, cpu);
1365	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1366		ops_run_biofill(sh);
1367		overlap_clear++;
1368	}
1369
1370	if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1371		if (level < 6)
1372			tx = ops_run_compute5(sh, percpu);
1373		else {
1374			if (sh->ops.target2 < 0 || sh->ops.target < 0)
1375				tx = ops_run_compute6_1(sh, percpu);
1376			else
1377				tx = ops_run_compute6_2(sh, percpu);
1378		}
1379		/* terminate the chain if reconstruct is not set to be run */
1380		if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1381			async_tx_ack(tx);
1382	}
1383
1384	if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1385		tx = ops_run_prexor(sh, percpu, tx);
1386
1387	if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1388		tx = ops_run_biodrain(sh, tx);
1389		overlap_clear++;
1390	}
1391
1392	if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1393		if (level < 6)
1394			ops_run_reconstruct5(sh, percpu, tx);
1395		else
1396			ops_run_reconstruct6(sh, percpu, tx);
1397	}
1398
1399	if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1400		if (sh->check_state == check_state_run)
1401			ops_run_check_p(sh, percpu);
1402		else if (sh->check_state == check_state_run_q)
1403			ops_run_check_pq(sh, percpu, 0);
1404		else if (sh->check_state == check_state_run_pq)
1405			ops_run_check_pq(sh, percpu, 1);
1406		else
1407			BUG();
1408	}
1409
1410	if (overlap_clear)
1411		for (i = disks; i--; ) {
1412			struct r5dev *dev = &sh->dev[i];
1413			if (test_and_clear_bit(R5_Overlap, &dev->flags))
1414				wake_up(&sh->raid_conf->wait_for_overlap);
1415		}
1416	put_cpu();
1417}
1418
1419#ifdef CONFIG_MULTICORE_RAID456
1420static void async_run_ops(void *param, async_cookie_t cookie)
1421{
1422	struct stripe_head *sh = param;
1423	unsigned long ops_request = sh->ops.request;
1424
1425	clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1426	wake_up(&sh->ops.wait_for_ops);
1427
1428	__raid_run_ops(sh, ops_request);
1429	release_stripe(sh);
1430}
1431
1432static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1433{
1434	/* since handle_stripe can be called outside of raid5d context
1435	 * we need to ensure sh->ops.request is de-staged before another
1436	 * request arrives
1437	 */
1438	wait_event(sh->ops.wait_for_ops,
1439		   !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1440	sh->ops.request = ops_request;
1441
1442	atomic_inc(&sh->count);
1443	async_schedule(async_run_ops, sh);
1444}
1445#else
1446#define raid_run_ops __raid_run_ops
1447#endif
1448
1449static int grow_one_stripe(struct r5conf *conf)
1450{
1451	struct stripe_head *sh;
1452	sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1453	if (!sh)
1454		return 0;
1455
1456	sh->raid_conf = conf;
1457	#ifdef CONFIG_MULTICORE_RAID456
1458	init_waitqueue_head(&sh->ops.wait_for_ops);
1459	#endif
1460
1461	if (grow_buffers(sh)) {
1462		shrink_buffers(sh);
1463		kmem_cache_free(conf->slab_cache, sh);
1464		return 0;
1465	}
1466	/* we just created an active stripe so... */
1467	atomic_set(&sh->count, 1);
1468	atomic_inc(&conf->active_stripes);
1469	INIT_LIST_HEAD(&sh->lru);
1470	release_stripe(sh);
1471	return 1;
1472}
1473
1474static int grow_stripes(struct r5conf *conf, int num)
1475{
1476	struct kmem_cache *sc;
1477	int devs = max(conf->raid_disks, conf->previous_raid_disks);
1478
1479	if (conf->mddev->gendisk)
1480		sprintf(conf->cache_name[0],
1481			"raid%d-%s", conf->level, mdname(conf->mddev));
1482	else
1483		sprintf(conf->cache_name[0],
1484			"raid%d-%p", conf->level, conf->mddev);
1485	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1486
1487	conf->active_name = 0;
1488	sc = kmem_cache_create(conf->cache_name[conf->active_name],
1489			       sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1490			       0, 0, NULL);
1491	if (!sc)
1492		return 1;
1493	conf->slab_cache = sc;
1494	conf->pool_size = devs;
1495	while (num--)
1496		if (!grow_one_stripe(conf))
1497			return 1;
1498	return 0;
1499}
1500
1501/**
1502 * scribble_len - return the required size of the scribble region
1503 * @num - total number of disks in the array
1504 *
1505 * The size must be enough to contain:
1506 * 1/ a struct page pointer for each device in the array +2
1507 * 2/ room to convert each entry in (1) to its corresponding dma
1508 *    (dma_map_page()) or page (page_address()) address.
1509 *
1510 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1511 * calculate over all devices (not just the data blocks), using zeros in place
1512 * of the P and Q blocks.
1513 */
1514static size_t scribble_len(int num)
1515{
1516	size_t len;
1517
1518	len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1519
1520	return len;
1521}
1522
1523static int resize_stripes(struct r5conf *conf, int newsize)
1524{
1525	/* Make all the stripes able to hold 'newsize' devices.
1526	 * New slots in each stripe get 'page' set to a new page.
1527	 *
1528	 * This happens in stages:
1529	 * 1/ create a new kmem_cache and allocate the required number of
1530	 *    stripe_heads.
1531	 * 2/ gather all the old stripe_heads and tranfer the pages across
1532	 *    to the new stripe_heads.  This will have the side effect of
1533	 *    freezing the array as once all stripe_heads have been collected,
1534	 *    no IO will be possible.  Old stripe heads are freed once their
1535	 *    pages have been transferred over, and the old kmem_cache is
1536	 *    freed when all stripes are done.
1537	 * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
1538	 *    we simple return a failre status - no need to clean anything up.
1539	 * 4/ allocate new pages for the new slots in the new stripe_heads.
1540	 *    If this fails, we don't bother trying the shrink the
1541	 *    stripe_heads down again, we just leave them as they are.
1542	 *    As each stripe_head is processed the new one is released into
1543	 *    active service.
1544	 *
1545	 * Once step2 is started, we cannot afford to wait for a write,
1546	 * so we use GFP_NOIO allocations.
1547	 */
1548	struct stripe_head *osh, *nsh;
1549	LIST_HEAD(newstripes);
1550	struct disk_info *ndisks;
1551	unsigned long cpu;
1552	int err;
1553	struct kmem_cache *sc;
1554	int i;
1555
1556	if (newsize <= conf->pool_size)
1557		return 0; /* never bother to shrink */
1558
1559	err = md_allow_write(conf->mddev);
1560	if (err)
1561		return err;
1562
1563	/* Step 1 */
1564	sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1565			       sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1566			       0, 0, NULL);
1567	if (!sc)
1568		return -ENOMEM;
1569
1570	for (i = conf->max_nr_stripes; i; i--) {
1571		nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1572		if (!nsh)
1573			break;
1574
1575		nsh->raid_conf = conf;
1576		#ifdef CONFIG_MULTICORE_RAID456
1577		init_waitqueue_head(&nsh->ops.wait_for_ops);
1578		#endif
1579
1580		list_add(&nsh->lru, &newstripes);
1581	}
1582	if (i) {
1583		/* didn't get enough, give up */
1584		while (!list_empty(&newstripes)) {
1585			nsh = list_entry(newstripes.next, struct stripe_head, lru);
1586			list_del(&nsh->lru);
1587			kmem_cache_free(sc, nsh);
1588		}
1589		kmem_cache_destroy(sc);
1590		return -ENOMEM;
1591	}
1592	/* Step 2 - Must use GFP_NOIO now.
1593	 * OK, we have enough stripes, start collecting inactive
1594	 * stripes and copying them over
1595	 */
1596	list_for_each_entry(nsh, &newstripes, lru) {
1597		spin_lock_irq(&conf->device_lock);
1598		wait_event_lock_irq(conf->wait_for_stripe,
1599				    !list_empty(&conf->inactive_list),
1600				    conf->device_lock,
1601				    );
1602		osh = get_free_stripe(conf);
1603		spin_unlock_irq(&conf->device_lock);
1604		atomic_set(&nsh->count, 1);
1605		for(i=0; i<conf->pool_size; i++)
1606			nsh->dev[i].page = osh->dev[i].page;
1607		for( ; i<newsize; i++)
1608			nsh->dev[i].page = NULL;
1609		kmem_cache_free(conf->slab_cache, osh);
1610	}
1611	kmem_cache_destroy(conf->slab_cache);
1612
1613	/* Step 3.
1614	 * At this point, we are holding all the stripes so the array
1615	 * is completely stalled, so now is a good time to resize
1616	 * conf->disks and the scribble region
1617	 */
1618	ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1619	if (ndisks) {
1620		for (i=0; i<conf->raid_disks; i++)
1621			ndisks[i] = conf->disks[i];
1622		kfree(conf->disks);
1623		conf->disks = ndisks;
1624	} else
1625		err = -ENOMEM;
1626
1627	get_online_cpus();
1628	conf->scribble_len = scribble_len(newsize);
1629	for_each_present_cpu(cpu) {
1630		struct raid5_percpu *percpu;
1631		void *scribble;
1632
1633		percpu = per_cpu_ptr(conf->percpu, cpu);
1634		scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1635
1636		if (scribble) {
1637			kfree(percpu->scribble);
1638			percpu->scribble = scribble;
1639		} else {
1640			err = -ENOMEM;
1641			break;
1642		}
1643	}
1644	put_online_cpus();
1645
1646	/* Step 4, return new stripes to service */
1647	while(!list_empty(&newstripes)) {
1648		nsh = list_entry(newstripes.next, struct stripe_head, lru);
1649		list_del_init(&nsh->lru);
1650
1651		for (i=conf->raid_disks; i < newsize; i++)
1652			if (nsh->dev[i].page == NULL) {
1653				struct page *p = alloc_page(GFP_NOIO);
1654				nsh->dev[i].page = p;
1655				if (!p)
1656					err = -ENOMEM;
1657			}
1658		release_stripe(nsh);
1659	}
1660	/* critical section pass, GFP_NOIO no longer needed */
1661
1662	conf->slab_cache = sc;
1663	conf->active_name = 1-conf->active_name;
1664	conf->pool_size = newsize;
1665	return err;
1666}
1667
1668static int drop_one_stripe(struct r5conf *conf)
1669{
1670	struct stripe_head *sh;
1671
1672	spin_lock_irq(&conf->device_lock);
1673	sh = get_free_stripe(conf);
1674	spin_unlock_irq(&conf->device_lock);
1675	if (!sh)
1676		return 0;
1677	BUG_ON(atomic_read(&sh->count));
1678	shrink_buffers(sh);
1679	kmem_cache_free(conf->slab_cache, sh);
1680	atomic_dec(&conf->active_stripes);
1681	return 1;
1682}
1683
1684static void shrink_stripes(struct r5conf *conf)
1685{
1686	while (drop_one_stripe(conf))
1687		;
1688
1689	if (conf->slab_cache)
1690		kmem_cache_destroy(conf->slab_cache);
1691	conf->slab_cache = NULL;
1692}
1693
1694static void raid5_end_read_request(struct bio * bi, int error)
1695{
1696	struct stripe_head *sh = bi->bi_private;
1697	struct r5conf *conf = sh->raid_conf;
1698	int disks = sh->disks, i;
1699	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1700	char b[BDEVNAME_SIZE];
1701	struct md_rdev *rdev = NULL;
1702	sector_t s;
1703
1704	for (i=0 ; i<disks; i++)
1705		if (bi == &sh->dev[i].req)
1706			break;
1707
1708	pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1709		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
1710		uptodate);
1711	if (i == disks) {
1712		BUG();
1713		return;
1714	}
1715	if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1716		/* If replacement finished while this request was outstanding,
1717		 * 'replacement' might be NULL already.
1718		 * In that case it moved down to 'rdev'.
1719		 * rdev is not removed until all requests are finished.
1720		 */
1721		rdev = conf->disks[i].replacement;
1722	if (!rdev)
1723		rdev = conf->disks[i].rdev;
1724
1725	if (use_new_offset(conf, sh))
1726		s = sh->sector + rdev->new_data_offset;
1727	else
1728		s = sh->sector + rdev->data_offset;
1729	if (uptodate) {
1730		set_bit(R5_UPTODATE, &sh->dev[i].flags);
1731		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1732			/* Note that this cannot happen on a
1733			 * replacement device.  We just fail those on
1734			 * any error
1735			 */
1736			printk_ratelimited(
1737				KERN_INFO
1738				"md/raid:%s: read error corrected"
1739				" (%lu sectors at %llu on %s)\n",
1740				mdname(conf->mddev), STRIPE_SECTORS,
1741				(unsigned long long)s,
1742				bdevname(rdev->bdev, b));
1743			atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1744			clear_bit(R5_ReadError, &sh->dev[i].flags);
1745			clear_bit(R5_ReWrite, &sh->dev[i].flags);
1746		}
1747		if (atomic_read(&rdev->read_errors))
1748			atomic_set(&rdev->read_errors, 0);
1749	} else {
1750		const char *bdn = bdevname(rdev->bdev, b);
1751		int retry = 0;
1752		int set_bad = 0;
1753
1754		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1755		atomic_inc(&rdev->read_errors);
1756		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
1757			printk_ratelimited(
1758				KERN_WARNING
1759				"md/raid:%s: read error on replacement device "
1760				"(sector %llu on %s).\n",
1761				mdname(conf->mddev),
1762				(unsigned long long)s,
1763				bdn);
1764		else if (conf->mddev->degraded >= conf->max_degraded) {
1765			set_bad = 1;
1766			printk_ratelimited(
1767				KERN_WARNING
1768				"md/raid:%s: read error not correctable "
1769				"(sector %llu on %s).\n",
1770				mdname(conf->mddev),
1771				(unsigned long long)s,
1772				bdn);
1773		} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
1774			/* Oh, no!!! */
1775			set_bad = 1;
1776			printk_ratelimited(
1777				KERN_WARNING
1778				"md/raid:%s: read error NOT corrected!! "
1779				"(sector %llu on %s).\n",
1780				mdname(conf->mddev),
1781				(unsigned long long)s,
1782				bdn);
1783		} else if (atomic_read(&rdev->read_errors)
1784			 > conf->max_nr_stripes)
1785			printk(KERN_WARNING
1786			       "md/raid:%s: Too many read errors, failing device %s.\n",
1787			       mdname(conf->mddev), bdn);
1788		else
1789			retry = 1;
1790		if (retry)
1791			set_bit(R5_ReadError, &sh->dev[i].flags);
1792		else {
1793			clear_bit(R5_ReadError, &sh->dev[i].flags);
1794			clear_bit(R5_ReWrite, &sh->dev[i].flags);
1795			if (!(set_bad
1796			      && test_bit(In_sync, &rdev->flags)
1797			      && rdev_set_badblocks(
1798				      rdev, sh->sector, STRIPE_SECTORS, 0)))
1799				md_error(conf->mddev, rdev);
1800		}
1801	}
1802	rdev_dec_pending(rdev, conf->mddev);
1803	clear_bit(R5_LOCKED, &sh->dev[i].flags);
1804	set_bit(STRIPE_HANDLE, &sh->state);
1805	release_stripe(sh);
1806}
1807
1808static void raid5_end_write_request(struct bio *bi, int error)
1809{
1810	struct stripe_head *sh = bi->bi_private;
1811	struct r5conf *conf = sh->raid_conf;
1812	int disks = sh->disks, i;
1813	struct md_rdev *uninitialized_var(rdev);
1814	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1815	sector_t first_bad;
1816	int bad_sectors;
1817	int replacement = 0;
1818
1819	for (i = 0 ; i < disks; i++) {
1820		if (bi == &sh->dev[i].req) {
1821			rdev = conf->disks[i].rdev;
1822			break;
1823		}
1824		if (bi == &sh->dev[i].rreq) {
1825			rdev = conf->disks[i].replacement;
1826			if (rdev)
1827				replacement = 1;
1828			else
1829				/* rdev was removed and 'replacement'
1830				 * replaced it.  rdev is not removed
1831				 * until all requests are finished.
1832				 */
1833				rdev = conf->disks[i].rdev;
1834			break;
1835		}
1836	}
1837	pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1838		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
1839		uptodate);
1840	if (i == disks) {
1841		BUG();
1842		return;
1843	}
1844
1845	if (replacement) {
1846		if (!uptodate)
1847			md_error(conf->mddev, rdev);
1848		else if (is_badblock(rdev, sh->sector,
1849				     STRIPE_SECTORS,
1850				     &first_bad, &bad_sectors))
1851			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
1852	} else {
1853		if (!uptodate) {
1854			set_bit(WriteErrorSeen, &rdev->flags);
1855			set_bit(R5_WriteError, &sh->dev[i].flags);
1856			if (!test_and_set_bit(WantReplacement, &rdev->flags))
1857				set_bit(MD_RECOVERY_NEEDED,
1858					&rdev->mddev->recovery);
1859		} else if (is_badblock(rdev, sh->sector,
1860				       STRIPE_SECTORS,
1861				       &first_bad, &bad_sectors))
1862			set_bit(R5_MadeGood, &sh->dev[i].flags);
1863	}
1864	rdev_dec_pending(rdev, conf->mddev);
1865
1866	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
1867		clear_bit(R5_LOCKED, &sh->dev[i].flags);
1868	set_bit(STRIPE_HANDLE, &sh->state);
1869	release_stripe(sh);
1870}
1871
1872static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1873	
1874static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1875{
1876	struct r5dev *dev = &sh->dev[i];
1877
1878	bio_init(&dev->req);
1879	dev->req.bi_io_vec = &dev->vec;
1880	dev->req.bi_vcnt++;
1881	dev->req.bi_max_vecs++;
1882	dev->req.bi_private = sh;
1883	dev->vec.bv_page = dev->page;
1884
1885	bio_init(&dev->rreq);
1886	dev->rreq.bi_io_vec = &dev->rvec;
1887	dev->rreq.bi_vcnt++;
1888	dev->rreq.bi_max_vecs++;
1889	dev->rreq.bi_private = sh;
1890	dev->rvec.bv_page = dev->page;
1891
1892	dev->flags = 0;
1893	dev->sector = compute_blocknr(sh, i, previous);
1894}
1895
1896static void error(struct mddev *mddev, struct md_rdev *rdev)
1897{
1898	char b[BDEVNAME_SIZE];
1899	struct r5conf *conf = mddev->private;
1900	unsigned long flags;
1901	pr_debug("raid456: error called\n");
1902
1903	spin_lock_irqsave(&conf->device_lock, flags);
1904	clear_bit(In_sync, &rdev->flags);
1905	mddev->degraded = calc_degraded(conf);
1906	spin_unlock_irqrestore(&conf->device_lock, flags);
1907	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1908
1909	set_bit(Blocked, &rdev->flags);
1910	set_bit(Faulty, &rdev->flags);
1911	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1912	printk(KERN_ALERT
1913	       "md/raid:%s: Disk failure on %s, disabling device.\n"
1914	       "md/raid:%s: Operation continuing on %d devices.\n",
1915	       mdname(mddev),
1916	       bdevname(rdev->bdev, b),
1917	       mdname(mddev),
1918	       conf->raid_disks - mddev->degraded);
1919}
1920
1921/*
1922 * Input: a 'big' sector number,
1923 * Output: index of the data and parity disk, and the sector # in them.
1924 */
1925static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
1926				     int previous, int *dd_idx,
1927				     struct stripe_head *sh)
1928{
1929	sector_t stripe, stripe2;
1930	sector_t chunk_number;
1931	unsigned int chunk_offset;
1932	int pd_idx, qd_idx;
1933	int ddf_layout = 0;
1934	sector_t new_sector;
1935	int algorithm = previous ? conf->prev_algo
1936				 : conf->algorithm;
1937	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1938					 : conf->chunk_sectors;
1939	int raid_disks = previous ? conf->previous_raid_disks
1940				  : conf->raid_disks;
1941	int data_disks = raid_disks - conf->max_degraded;
1942
1943	/* First compute the information on this sector */
1944
1945	/*
1946	 * Compute the chunk number and the sector offset inside the chunk
1947	 */
1948	chunk_offset = sector_div(r_sector, sectors_per_chunk);
1949	chunk_number = r_sector;
1950
1951	/*
1952	 * Compute the stripe number
1953	 */
1954	stripe = chunk_number;
1955	*dd_idx = sector_div(stripe, data_disks);
1956	stripe2 = stripe;
1957	/*
1958	 * Select the parity disk based on the user selected algorithm.
1959	 */
1960	pd_idx = qd_idx = -1;
1961	switch(conf->level) {
1962	case 4:
1963		pd_idx = data_disks;
1964		break;
1965	case 5:
1966		switch (algorithm) {
1967		case ALGORITHM_LEFT_ASYMMETRIC:
1968			pd_idx = data_disks - sector_div(stripe2, raid_disks);
1969			if (*dd_idx >= pd_idx)
1970				(*dd_idx)++;
1971			break;
1972		case ALGORITHM_RIGHT_ASYMMETRIC:
1973			pd_idx = sector_div(stripe2, raid_disks);
1974			if (*dd_idx >= pd_idx)
1975				(*dd_idx)++;
1976			break;
1977		case ALGORITHM_LEFT_SYMMETRIC:
1978			pd_idx = data_disks - sector_div(stripe2, raid_disks);
1979			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1980			break;
1981		case ALGORITHM_RIGHT_SYMMETRIC:
1982			pd_idx = sector_div(stripe2, raid_disks);
1983			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1984			break;
1985		case ALGORITHM_PARITY_0:
1986			pd_idx = 0;
1987			(*dd_idx)++;
1988			break;
1989		case ALGORITHM_PARITY_N:
1990			pd_idx = data_disks;
1991			break;
1992		default:
1993			BUG();
1994		}
1995		break;
1996	case 6:
1997
1998		switch (algorithm) {
1999		case ALGORITHM_LEFT_ASYMMETRIC:
2000			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2001			qd_idx = pd_idx + 1;
2002			if (pd_idx == raid_disks-1) {
2003				(*dd_idx)++;	/* Q D D D P */
2004				qd_idx = 0;
2005			} else if (*dd_idx >= pd_idx)
2006				(*dd_idx) += 2; /* D D P Q D */
2007			break;
2008		case ALGORITHM_RIGHT_ASYMMETRIC:
2009			pd_idx = sector_div(stripe2, raid_disks);
2010			qd_idx = pd_idx + 1;
2011			if (pd_idx == raid_disks-1) {
2012				(*dd_idx)++;	/* Q D D D P */
2013				qd_idx = 0;
2014			} else if (*dd_idx >= pd_idx)
2015				(*dd_idx) += 2; /* D D P Q D */
2016			break;
2017		case ALGORITHM_LEFT_SYMMETRIC:
2018			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2019			qd_idx = (pd_idx + 1) % raid_disks;
2020			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2021			break;
2022		case ALGORITHM_RIGHT_SYMMETRIC:
2023			pd_idx = sector_div(stripe2, raid_disks);
2024			qd_idx = (pd_idx + 1) % raid_disks;
2025			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2026			break;
2027
2028		case ALGORITHM_PARITY_0:
2029			pd_idx = 0;
2030			qd_idx = 1;
2031			(*dd_idx) += 2;
2032			break;
2033		case ALGORITHM_PARITY_N:
2034			pd_idx = data_disks;
2035			qd_idx = data_disks + 1;
2036			break;
2037
2038		case ALGORITHM_ROTATING_ZERO_RESTART:
2039			/* Exactly the same as RIGHT_ASYMMETRIC, but or
2040			 * of blocks for computing Q is different.
2041			 */
2042			pd_idx = sector_div(stripe2, raid_disks);
2043			qd_idx = pd_idx + 1;
2044			if (pd_idx == raid_disks-1) {
2045				(*dd_idx)++;	/* Q D D D P */
2046				qd_idx = 0;
2047			} else if (*dd_idx >= pd_idx)
2048				(*dd_idx) += 2; /* D D P Q D */
2049			ddf_layout = 1;
2050			break;
2051
2052		case ALGORITHM_ROTATING_N_RESTART:
2053			/* Same a left_asymmetric, by first stripe is
2054			 * D D D P Q  rather than
2055			 * Q D D D P
2056			 */
2057			stripe2 += 1;
2058			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2059			qd_idx = pd_idx + 1;
2060			if (pd_idx == raid_disks-1) {
2061				(*dd_idx)++;	/* Q D D D P */
2062				qd_idx = 0;
2063			} else if (*dd_idx >= pd_idx)
2064				(*dd_idx) += 2; /* D D P Q D */
2065			ddf_layout = 1;
2066			break;
2067
2068		case ALGORITHM_ROTATING_N_CONTINUE:
2069			/* Same as left_symmetric but Q is before P */
2070			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2071			qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2072			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2073			ddf_layout = 1;
2074			break;
2075
2076		case ALGORITHM_LEFT_ASYMMETRIC_6:
2077			/* RAID5 left_asymmetric, with Q on last device */
2078			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2079			if (*dd_idx >= pd_idx)
2080				(*dd_idx)++;
2081			qd_idx = raid_disks - 1;
2082			break;
2083
2084		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2085			pd_idx = sector_div(stripe2, raid_disks-1);
2086			if (*dd_idx >= pd_idx)
2087				(*dd_idx)++;
2088			qd_idx = raid_disks - 1;
2089			break;
2090
2091		case ALGORITHM_LEFT_SYMMETRIC_6:
2092			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2093			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2094			qd_idx = raid_disks - 1;
2095			break;
2096
2097		case ALGORITHM_RIGHT_SYMMETRIC_6:
2098			pd_idx = sector_div(stripe2, raid_disks-1);
2099			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2100			qd_idx = raid_disks - 1;
2101			break;
2102
2103		case ALGORITHM_PARITY_0_6:
2104			pd_idx = 0;
2105			(*dd_idx)++;
2106			qd_idx = raid_disks - 1;
2107			break;
2108
2109		default:
2110			BUG();
2111		}
2112		break;
2113	}
2114
2115	if (sh) {
2116		sh->pd_idx = pd_idx;
2117		sh->qd_idx = qd_idx;
2118		sh->ddf_layout = ddf_layout;
2119	}
2120	/*
2121	 * Finally, compute the new sector number
2122	 */
2123	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2124	return new_sector;
2125}
2126
2127
2128static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
2129{
2130	struct r5conf *conf = sh->raid_conf;
2131	int raid_disks = sh->disks;
2132	int data_disks = raid_disks - conf->max_degraded;
2133	sector_t new_sector = sh->sector, check;
2134	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2135					 : conf->chunk_sectors;
2136	int algorithm = previous ? conf->prev_algo
2137				 : conf->algorithm;
2138	sector_t stripe;
2139	int chunk_offset;
2140	sector_t chunk_number;
2141	int dummy1, dd_idx = i;
2142	sector_t r_sector;
2143	struct stripe_head sh2;
2144
2145
2146	chunk_offset = sector_div(new_sector, sectors_per_chunk);
2147	stripe = new_sector;
2148
2149	if (i == sh->pd_idx)
2150		return 0;
2151	switch(conf->level) {
2152	case 4: break;
2153	case 5:
2154		switch (algorithm) {
2155		case ALGORITHM_LEFT_ASYMMETRIC:
2156		case ALGORITHM_RIGHT_ASYMMETRIC:
2157			if (i > sh->pd_idx)
2158				i--;
2159			break;
2160		case ALGORITHM_LEFT_SYMMETRIC:
2161		case ALGORITHM_RIGHT_SYMMETRIC:
2162			if (i < sh->pd_idx)
2163				i += raid_disks;
2164			i -= (sh->pd_idx + 1);
2165			break;
2166		case ALGORITHM_PARITY_0:
2167			i -= 1;
2168			break;
2169		case ALGORITHM_PARITY_N:
2170			break;
2171		default:
2172			BUG();
2173		}
2174		break;
2175	case 6:
2176		if (i == sh->qd_idx)
2177			return 0; /* It is the Q disk */
2178		switch (algorithm) {
2179		case ALGORITHM_LEFT_ASYMMETRIC:
2180		case ALGORITHM_RIGHT_ASYMMETRIC:
2181		case ALGORITHM_ROTATING_ZERO_RESTART:
2182		case ALGORITHM_ROTATING_N_RESTART:
2183			if (sh->pd_idx == raid_disks-1)
2184				i--;	/* Q D D D P */
2185			else if (i > sh->pd_idx)
2186				i -= 2; /* D D P Q D */
2187			break;
2188		case ALGORITHM_LEFT_SYMMETRIC:
2189		case ALGORITHM_RIGHT_SYMMETRIC:
2190			if (sh->pd_idx == raid_disks-1)
2191				i--; /* Q D D D P */
2192			else {
2193				/* D D P Q D */
2194				if (i < sh->pd_idx)
2195					i += raid_disks;
2196				i -= (sh->pd_idx + 2);
2197			}
2198			break;
2199		case ALGORITHM_PARITY_0:
2200			i -= 2;
2201			break;
2202		case ALGORITHM_PARITY_N:
2203			break;
2204		case ALGORITHM_ROTATING_N_CONTINUE:
2205			/* Like left_symmetric, but P is before Q */
2206			if (sh->pd_idx == 0)
2207				i--;	/* P D D D Q */
2208			else {
2209				/* D D Q P D */
2210				if (i < sh->pd_idx)
2211					i += raid_disks;
2212				i -= (sh->pd_idx + 1);
2213			}
2214			break;
2215		case ALGORITHM_LEFT_ASYMMETRIC_6:
2216		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2217			if (i > sh->pd_idx)
2218				i--;
2219			break;
2220		case ALGORITHM_LEFT_SYMMETRIC_6:
2221		case ALGORITHM_RIGHT_SYMMETRIC_6:
2222			if (i < sh->pd_idx)
2223				i += data_disks + 1;
2224			i -= (sh->pd_idx + 1);
2225			break;
2226		case ALGORITHM_PARITY_0_6:
2227			i -= 1;
2228			break;
2229		default:
2230			BUG();
2231		}
2232		break;
2233	}
2234
2235	chunk_number = stripe * data_disks + i;
2236	r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2237
2238	check = raid5_compute_sector(conf, r_sector,
2239				     previous, &dummy1, &sh2);
2240	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2241		|| sh2.qd_idx != sh->qd_idx) {
2242		printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2243		       mdname(conf->mddev));
2244		return 0;
2245	}
2246	return r_sector;
2247}
2248
2249
2250static void
2251schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2252			 int rcw, int expand)
2253{
2254	int i, pd_idx = sh->pd_idx, disks = sh->disks;
2255	struct r5conf *conf = sh->raid_conf;
2256	int level = conf->level;
2257
2258	if (rcw) {
2259		/* if we are not expanding this is a proper write request, and
2260		 * there will be bios with new data to be drained into the
2261		 * stripe cache
2262		 */
2263		if (!expand) {
2264			sh->reconstruct_state = reconstruct_state_drain_run;
2265			set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2266		} else
2267			sh->reconstruct_state = reconstruct_state_run;
2268
2269		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2270
2271		for (i = disks; i--; ) {
2272			struct r5dev *dev = &sh->dev[i];
2273
2274			if (dev->towrite) {
2275				set_bit(R5_LOCKED, &dev->flags);
2276				set_bit(R5_Wantdrain, &dev->flags);
2277				if (!expand)
2278					clear_bit(R5_UPTODATE, &dev->flags);
2279				s->locked++;
2280			}
2281		}
2282		if (s->locked + conf->max_degraded == disks)
2283			if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2284				atomic_inc(&conf->pending_full_writes);
2285	} else {
2286		BUG_ON(level == 6);
2287		BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2288			test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2289
2290		sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2291		set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2292		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2293		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2294
2295		for (i = disks; i--; ) {
2296			struct r5dev *dev = &sh->dev[i];
2297			if (i == pd_idx)
2298				continue;
2299
2300			if (dev->towrite &&
2301			    (test_bit(R5_UPTODATE, &dev->flags) ||
2302			     test_bit(R5_Wantcompute, &dev->flags))) {
2303				set_bit(R5_Wantdrain, &dev->flags);
2304				set_bit(R5_LOCKED, &dev->flags);
2305				clear_bit(R5_UPTODATE, &dev->flags);
2306				s->locked++;
2307			}
2308		}
2309	}
2310
2311	/* keep the parity disk(s) locked while asynchronous operations
2312	 * are in flight
2313	 */
2314	set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2315	clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2316	s->locked++;
2317
2318	if (level == 6) {
2319		int qd_idx = sh->qd_idx;
2320		struct r5dev *dev = &sh->dev[qd_idx];
2321
2322		set_bit(R5_LOCKED, &dev->flags);
2323		clear_bit(R5_UPTODATE, &dev->flags);
2324		s->locked++;
2325	}
2326
2327	pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2328		__func__, (unsigned long long)sh->sector,
2329		s->locked, s->ops_request);
2330}
2331
2332/*
2333 * Each stripe/dev can have one or more bion attached.
2334 * toread/towrite point to the first in a chain.
2335 * The bi_next chain must be in order.
2336 */
2337static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2338{
2339	struct bio **bip;
2340	struct r5conf *conf = sh->raid_conf;
2341	int firstwrite=0;
2342
2343	pr_debug("adding bi b#%llu to stripe s#%llu\n",
2344		(unsigned long long)bi->bi_sector,
2345		(unsigned long long)sh->sector);
2346
2347
2348	spin_lock_irq(&conf->device_lock);
2349	if (forwrite) {
2350		bip = &sh->dev[dd_idx].towrite;
2351		if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2352			firstwrite = 1;
2353	} else
2354		bip = &sh->dev[dd_idx].toread;
2355	while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2356		if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2357			goto overlap;
2358		bip = & (*bip)->bi_next;
2359	}
2360	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2361		goto overlap;
2362
2363	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2364	if (*bip)
2365		bi->bi_next = *bip;
2366	*bip = bi;
2367	bi->bi_phys_segments++;
2368
2369	if (forwrite) {
2370		/* check if page is covered */
2371		sector_t sector = sh->dev[dd_idx].sector;
2372		for (bi=sh->dev[dd_idx].towrite;
2373		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2374			     bi && bi->bi_sector <= sector;
2375		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2376			if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2377				sector = bi->bi_sector + (bi->bi_size>>9);
2378		}
2379		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2380			set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2381	}
2382	spin_unlock_irq(&conf->device_lock);
2383
2384	pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2385		(unsigned long long)(*bip)->bi_sector,
2386		(unsigned long long)sh->sector, dd_idx);
2387
2388	if (conf->mddev->bitmap && firstwrite) {
2389		bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2390				  STRIPE_SECTORS, 0);
2391		sh->bm_seq = conf->seq_flush+1;
2392		set_bit(STRIPE_BIT_DELAY, &sh->state);
2393	}
2394	return 1;
2395
2396 overlap:
2397	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2398	spin_unlock_irq(&conf->device_lock);
2399	return 0;
2400}
2401
2402static void end_reshape(struct r5conf *conf);
2403
2404static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
2405			    struct stripe_head *sh)
2406{
2407	int sectors_per_chunk =
2408		previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2409	int dd_idx;
2410	int chunk_offset = sector_div(stripe, sectors_per_chunk);
2411	int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2412
2413	raid5_compute_sector(conf,
2414			     stripe * (disks - conf->max_degraded)
2415			     *sectors_per_chunk + chunk_offset,
2416			     previous,
2417			     &dd_idx, sh);
2418}
2419
2420static void
2421handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2422				struct stripe_head_state *s, int disks,
2423				struct bio **return_bi)
2424{
2425	int i;
2426	for (i = disks; i--; ) {
2427		struct bio *bi;
2428		int bitmap_end = 0;
2429
2430		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2431			struct md_rdev *rdev;
2432			rcu_read_lock();
2433			rdev = rcu_dereference(conf->disks[i].rdev);
2434			if (rdev && test_bit(In_sync, &rdev->flags))
2435				atomic_inc(&rdev->nr_pending);
2436			else
2437				rdev = NULL;
2438			rcu_read_unlock();
2439			if (rdev) {
2440				if (!rdev_set_badblocks(
2441					    rdev,
2442					    sh->sector,
2443					    STRIPE_SECTORS, 0))
2444					md_error(conf->mddev, rdev);
2445				rdev_dec_pending(rdev, conf->mddev);
2446			}
2447		}
2448		spin_lock_irq(&conf->device_lock);
2449		/* fail all writes first */
2450		bi = sh->dev[i].towrite;
2451		sh->dev[i].towrite = NULL;
2452		if (bi) {
2453			s->to_write--;
2454			bitmap_end = 1;
2455		}
2456
2457		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2458			wake_up(&conf->wait_for_overlap);
2459
2460		while (bi && bi->bi_sector <
2461			sh->dev[i].sector + STRIPE_SECTORS) {
2462			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2463			clear_bit(BIO_UPTODATE, &bi->bi_flags);
2464			if (!raid5_dec_bi_phys_segments(bi)) {
2465				md_write_end(conf->mddev);
2466				bi->bi_next = *return_bi;
2467				*return_bi = bi;
2468			}
2469			bi = nextbi;
2470		}
2471		/* and fail all 'written' */
2472		bi = sh->dev[i].written;
2473		sh->dev[i].written = NULL;
2474		if (bi) bitmap_end = 1;
2475		while (bi && bi->bi_sector <
2476		       sh->dev[i].sector + STRIPE_SECTORS) {
2477			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2478			clear_bit(BIO_UPTODATE, &bi->bi_flags);
2479			if (!raid5_dec_bi_phys_segments(bi)) {
2480				md_write_end(conf->mddev);
2481				bi->bi_next = *return_bi;
2482				*return_bi = bi;
2483			}
2484			bi = bi2;
2485		}
2486
2487		/* fail any reads if this device is non-operational and
2488		 * the data has not reached the cache yet.
2489		 */
2490		if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2491		    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2492		      test_bit(R5_ReadError, &sh->dev[i].flags))) {
2493			bi = sh->dev[i].toread;
2494			sh->dev[i].toread = NULL;
2495			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2496				wake_up(&conf->wait_for_overlap);
2497			if (bi) s->to_read--;
2498			while (bi && bi->bi_sector <
2499			       sh->dev[i].sector + STRIPE_SECTORS) {
2500				struct bio *nextbi =
2501					r5_next_bio(bi, sh->dev[i].sector);
2502				clear_bit(BIO_UPTODATE, &bi->bi_flags);
2503				if (!raid5_dec_bi_phys_segments(bi)) {
2504					bi->bi_next = *return_bi;
2505					*return_bi = bi;
2506				}
2507				bi = nextbi;
2508			}
2509		}
2510		spin_unlock_irq(&conf->device_lock);
2511		if (bitmap_end)
2512			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2513					STRIPE_SECTORS, 0, 0);
2514		/* If we were in the middle of a write the parity block might
2515		 * still be locked - so just clear all R5_LOCKED flags
2516		 */
2517		clear_bit(R5_LOCKED, &sh->dev[i].flags);
2518	}
2519
2520	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2521		if (atomic_dec_and_test(&conf->pending_full_writes))
2522			md_wakeup_thread(conf->mddev->thread);
2523}
2524
2525static void
2526handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
2527		   struct stripe_head_state *s)
2528{
2529	int abort = 0;
2530	int i;
2531
2532	clear_bit(STRIPE_SYNCING, &sh->state);
2533	s->syncing = 0;
2534	s->replacing = 0;
2535	/* There is nothing more to do for sync/check/repair.
2536	 * Don't even need to abort as that is handled elsewhere
2537	 * if needed, and not always wanted e.g. if there is a known
2538	 * bad block here.
2539	 * For recover/replace we need to record a bad block on all
2540	 * non-sync devices, or abort the recovery
2541	 */
2542	if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
2543		/* During recovery devices cannot be removed, so
2544		 * locking and refcounting of rdevs is not needed
2545		 */
2546		for (i = 0; i < conf->raid_disks; i++) {
2547			struct md_rdev *rdev = conf->disks[i].rdev;
2548			if (rdev
2549			    && !test_bit(Faulty, &rdev->flags)
2550			    && !test_bit(In_sync, &rdev->flags)
2551			    && !rdev_set_badblocks(rdev, sh->sector,
2552						   STRIPE_SECTORS, 0))
2553				abort = 1;
2554			rdev = conf->disks[i].replacement;
2555			if (rdev
2556			    && !test_bit(Faulty, &rdev->flags)
2557			    && !test_bit(In_sync, &rdev->flags)
2558			    && !rdev_set_badblocks(rdev, sh->sector,
2559						   STRIPE_SECTORS, 0))
2560				abort = 1;
2561		}
2562		if (abort)
2563			conf->recovery_disabled =
2564				conf->mddev->recovery_disabled;
2565	}
2566	md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
2567}
2568
2569static int want_replace(struct stripe_head *sh, int disk_idx)
2570{
2571	struct md_rdev *rdev;
2572	int rv = 0;
2573	/* Doing recovery so rcu locking not required */
2574	rdev = sh->raid_conf->disks[disk_idx].replacement;
2575	if (rdev
2576	    && !test_bit(Faulty, &rdev->flags)
2577	    && !test_bit(In_sync, &rdev->flags)
2578	    && (rdev->recovery_offset <= sh->sector
2579		|| rdev->mddev->recovery_cp <= sh->sector))
2580		rv = 1;
2581
2582	return rv;
2583}
2584
2585/* fetch_block - checks the given member device to see if its data needs
2586 * to be read or computed to satisfy a request.
2587 *
2588 * Returns 1 when no more member devices need to be checked, otherwise returns
2589 * 0 to tell the loop in handle_stripe_fill to continue
2590 */
2591static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2592		       int disk_idx, int disks)
2593{
2594	struct r5dev *dev = &sh->dev[disk_idx];
2595	struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2596				  &sh->dev[s->failed_num[1]] };
2597
2598	/* is the data in this block needed, and can we get it? */
2599	if (!test_bit(R5_LOCKED, &dev->flags) &&
2600	    !test_bit(R5_UPTODATE, &dev->flags) &&
2601	    (dev->toread ||
2602	     (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2603	     s->syncing || s->expanding ||
2604	     (s->replacing && want_replace(sh, disk_idx)) ||
2605	     (s->failed >= 1 && fdev[0]->toread) ||
2606	     (s->failed >= 2 && fdev[1]->toread) ||
2607	     (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2608	      !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2609	     (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2610		/* we would like to get this block, possibly by computing it,
2611		 * otherwise read it if the backing disk is insync
2612		 */
2613		BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2614		BUG_ON(test_bit(R5_Wantread, &dev->flags));
2615		if ((s->uptodate == disks - 1) &&
2616		    (s->failed && (disk_idx == s->failed_num[0] ||
2617				   disk_idx == s->failed_num[1]))) {
2618			/* have disk failed, and we're requested to fetch it;
2619			 * do compute it
2620			 */
2621			pr_debug("Computing stripe %llu block %d\n",
2622			       (unsigned long long)sh->sector, disk_idx);
2623			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2624			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2625			set_bit(R5_Wantcompute, &dev->flags);
2626			sh->ops.target = disk_idx;
2627			sh->ops.target2 = -1; /* no 2nd target */
2628			s->req_compute = 1;
2629			/* Careful: from this point on 'uptodate' is in the eye
2630			 * of raid_run_ops which services 'compute' operations
2631			 * before writes. R5_Wantcompute flags a block that will
2632			 * be R5_UPTODATE by the time it is needed for a
2633			 * subsequent operation.
2634			 */
2635			s->uptodate++;
2636			return 1;
2637		} else if (s->uptodate == disks-2 && s->failed >= 2) {
2638			/* Computing 2-failure is *very* expensive; only
2639			 * do it if failed >= 2
2640			 */
2641			int other;
2642			for (other = disks; other--; ) {
2643				if (other == disk_idx)
2644					continue;
2645				if (!test_bit(R5_UPTODATE,
2646				      &sh->dev[other].flags))
2647					break;
2648			}
2649			BUG_ON(other < 0);
2650			pr_debug("Computing stripe %llu blocks %d,%d\n",
2651			       (unsigned long long)sh->sector,
2652			       disk_idx, other);
2653			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2654			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2655			set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2656			set_bit(R5_Wantcompute, &sh->dev[other].flags);
2657			sh->ops.target = disk_idx;
2658			sh->ops.target2 = other;
2659			s->uptodate += 2;
2660			s->req_compute = 1;
2661			return 1;
2662		} else if (test_bit(R5_Insync, &dev->flags)) {
2663			set_bit(R5_LOCKED, &dev->flags);
2664			set_bit(R5_Wantread, &dev->flags);
2665			s->locked++;
2666			pr_debug("Reading block %d (sync=%d)\n",
2667				disk_idx, s->syncing);
2668		}
2669	}
2670
2671	return 0;
2672}
2673
2674/**
2675 * handle_stripe_fill - read or compute data to satisfy pending requests.
2676 */
2677static void handle_stripe_fill(struct stripe_head *sh,
2678			       struct stripe_head_state *s,
2679			       int disks)
2680{
2681	int i;
2682
2683	/* look for blocks to read/compute, skip this if a compute
2684	 * is already in flight, or if the stripe contents are in the
2685	 * midst of changing due to a write
2686	 */
2687	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2688	    !sh->reconstruct_state)
2689		for (i = disks; i--; )
2690			if (fetch_block(sh, s, i, disks))
2691				break;
2692	set_bit(STRIPE_HANDLE, &sh->state);
2693}
2694
2695
2696/* handle_stripe_clean_event
2697 * any written block on an uptodate or failed drive can be returned.
2698 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2699 * never LOCKED, so we don't need to test 'failed' directly.
2700 */
2701static void handle_stripe_clean_event(struct r5conf *conf,
2702	struct stripe_head *sh, int disks, struct bio **return_bi)
2703{
2704	int i;
2705	struct r5dev *dev;
2706
2707	for (i = disks; i--; )
2708		if (sh->dev[i].written) {
2709			dev = &sh->dev[i];
2710			if (!test_bit(R5_LOCKED, &dev->flags) &&
2711				test_bit(R5_UPTODATE, &dev->flags)) {
2712				/* We can return any write requests */
2713				struct bio *wbi, *wbi2;
2714				int bitmap_end = 0;
2715				pr_debug("Return write for disc %d\n", i);
2716				spin_lock_irq(&conf->device_lock);
2717				wbi = dev->written;
2718				dev->written = NULL;
2719				while (wbi && wbi->bi_sector <
2720					dev->sector + STRIPE_SECTORS) {
2721					wbi2 = r5_next_bio(wbi, dev->sector);
2722					if (!raid5_dec_bi_phys_segments(wbi)) {
2723						md_write_end(conf->mddev);
2724						wbi->bi_next = *return_bi;
2725						*return_bi = wbi;
2726					}
2727					wbi = wbi2;
2728				}
2729				if (dev->towrite == NULL)
2730					bitmap_end = 1;
2731				spin_unlock_irq(&conf->device_lock);
2732				if (bitmap_end)
2733					bitmap_endwrite(conf->mddev->bitmap,
2734							sh->sector,
2735							STRIPE_SECTORS,
2736					 !test_bit(STRIPE_DEGRADED, &sh->state),
2737							0);
2738			}
2739		}
2740
2741	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2742		if (atomic_dec_and_test(&conf->pending_full_writes))
2743			md_wakeup_thread(conf->mddev->thread);
2744}
2745
2746static void handle_stripe_dirtying(struct r5conf *conf,
2747				   struct stripe_head *sh,
2748				   struct stripe_head_state *s,
2749				   int disks)
2750{
2751	int rmw = 0, rcw = 0, i;
2752	if (conf->max_degraded == 2) {
2753		/* RAID6 requires 'rcw' in current implementation
2754		 * Calculate the real rcw later - for now fake it
2755		 * look like rcw is cheaper
2756		 */
2757		rcw = 1; rmw = 2;
2758	} else for (i = disks; i--; ) {
2759		/* would I have to read this buffer for read_modify_write */
2760		struct r5dev *dev = &sh->dev[i];
2761		if ((dev->towrite || i == sh->pd_idx) &&
2762		    !test_bit(R5_LOCKED, &dev->flags) &&
2763		    !(test_bit(R5_UPTODATE, &dev->flags) ||
2764		      test_bit(R5_Wantcompute, &dev->flags))) {
2765			if (test_bit(R5_Insync, &dev->flags))
2766				rmw++;
2767			else
2768				rmw += 2*disks;  /* cannot read it */
2769		}
2770		/* Would I have to read this buffer for reconstruct_write */
2771		if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2772		    !test_bit(R5_LOCKED, &dev->flags) &&
2773		    !(test_bit(R5_UPTODATE, &dev->flags) ||
2774		    test_bit(R5_Wantcompute, &dev->flags))) {
2775			if (test_bit(R5_Insync, &dev->flags)) rcw++;
2776			else
2777				rcw += 2*disks;
2778		}
2779	}
2780	pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2781		(unsigned long long)sh->sector, rmw, rcw);
2782	set_bit(STRIPE_HANDLE, &sh->state);
2783	if (rmw < rcw && rmw > 0)
2784		/* prefer read-modify-write, but need to get some data */
2785		for (i = disks; i--; ) {
2786			struct r5dev *dev = &sh->dev[i];
2787			if ((dev->towrite || i == sh->pd_idx) &&
2788			    !test_bit(R5_LOCKED, &dev->flags) &&
2789			    !(test_bit(R5_UPTODATE, &dev->flags) ||
2790			    test_bit(R5_Wantcompute, &dev->flags)) &&
2791			    test_bit(R5_Insync, &dev->flags)) {
2792				if (
2793				  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2794					pr_debug("Read_old block "
2795						"%d for r-m-w\n", i);
2796					set_bit(R5_LOCKED, &dev->flags);
2797					set_bit(R5_Wantread, &dev->flags);
2798					s->locked++;
2799				} else {
2800					set_bit(STRIPE_DELAYED, &sh->state);
2801					set_bit(STRIPE_HANDLE, &sh->state);
2802				}
2803			}
2804		}
2805	if (rcw <= rmw && rcw > 0) {
2806		/* want reconstruct write, but need to get some data */
2807		rcw = 0;
2808		for (i = disks; i--; ) {
2809			struct r5dev *dev = &sh->dev[i];
2810			if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2811			    i != sh->pd_idx && i != sh->qd_idx &&
2812			    !test_bit(R5_LOCKED, &dev->flags) &&
2813			    !(test_bit(R5_UPTODATE, &dev->flags) ||
2814			      test_bit(R5_Wantcompute, &dev->flags))) {
2815				rcw++;
2816				if (!test_bit(R5_Insync, &dev->flags))
2817					continue; /* it's a failed drive */
2818				if (
2819				  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2820					pr_debug("Read_old block "
2821						"%d for Reconstruct\n", i);
2822					set_bit(R5_LOCKED, &dev->flags);
2823					set_bit(R5_Wantread, &dev->flags);
2824					s->locked++;
2825				} else {
2826					set_bit(STRIPE_DELAYED, &sh->state);
2827					set_bit(STRIPE_HANDLE, &sh->state);
2828				}
2829			}
2830		}
2831	}
2832	/* now if nothing is locked, and if we have enough data,
2833	 * we can start a write request
2834	 */
2835	/* since handle_stripe can be called at any time we need to handle the
2836	 * case where a compute block operation has been submitted and then a
2837	 * subsequent call wants to start a write request.  raid_run_ops only
2838	 * handles the case where compute block and reconstruct are requested
2839	 * simultaneously.  If this is not the case then new writes need to be
2840	 * held off until the compute completes.
2841	 */
2842	if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2843	    (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2844	    !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2845		schedule_reconstruction(sh, s, rcw == 0, 0);
2846}
2847
2848static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
2849				struct stripe_head_state *s, int disks)
2850{
2851	struct r5dev *dev = NULL;
2852
2853	set_bit(STRIPE_HANDLE, &sh->state);
2854
2855	switch (sh->check_state) {
2856	case check_state_idle:
2857		/* start a new check operation if there are no failures */
2858		if (s->failed == 0) {
2859			BUG_ON(s->uptodate != disks);
2860			sh->check_state = check_state_run;
2861			set_bit(STRIPE_OP_CHECK, &s->ops_request);
2862			clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2863			s->uptodate--;
2864			break;
2865		}
2866		dev = &sh->dev[s->failed_num[0]];
2867		/* fall through */
2868	case check_state_compute_result:
2869		sh->check_state = check_state_idle;
2870		if (!dev)
2871			dev = &sh->dev[sh->pd_idx];
2872
2873		/* check that a write has not made the stripe insync */
2874		if (test_bit(STRIPE_INSYNC, &sh->state))
2875			break;
2876
2877		/* either failed parity check, or recovery is happening */
2878		BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2879		BUG_ON(s->uptodate != disks);
2880
2881		set_bit(R5_LOCKED, &dev->flags);
2882		s->locked++;
2883		set_bit(R5_Wantwrite, &dev->flags);
2884
2885		clear_bit(STRIPE_DEGRADED, &sh->state);
2886		set_bit(STRIPE_INSYNC, &sh->state);
2887		break;
2888	case check_state_run:
2889		break; /* we will be called again upon completion */
2890	case check_state_check_result:
2891		sh->check_state = check_state_idle;
2892
2893		/* if a failure occurred during the check operation, leave
2894		 * STRIPE_INSYNC not set and let the stripe be handled again
2895		 */
2896		if (s->failed)
2897			break;
2898
2899		/* handle a successful check operation, if parity is correct
2900		 * we are done.  Otherwise update the mismatch count and repair
2901		 * parity if !MD_RECOVERY_CHECK
2902		 */
2903		if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2904			/* parity is correct (on disc,
2905			 * not in buffer any more)
2906			 */
2907			set_bit(STRIPE_INSYNC, &sh->state);
2908		else {
2909			conf->mddev->resync_mismatches += STRIPE_SECTORS;
2910			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2911				/* don't try to repair!! */
2912				set_bit(STRIPE_INSYNC, &sh->state);
2913			else {
2914				sh->check_state = check_state_compute_run;
2915				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2916				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2917				set_bit(R5_Wantcompute,
2918					&sh->dev[sh->pd_idx].flags);
2919				sh->ops.target = sh->pd_idx;
2920				sh->ops.target2 = -1;
2921				s->uptodate++;
2922			}
2923		}
2924		break;
2925	case check_state_compute_run:
2926		break;
2927	default:
2928		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2929		       __func__, sh->check_state,
2930		       (unsigned long long) sh->sector);
2931		BUG();
2932	}
2933}
2934
2935
2936static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
2937				  struct stripe_head_state *s,
2938				  int disks)
2939{
2940	int pd_idx = sh->pd_idx;
2941	int qd_idx = sh->qd_idx;
2942	struct r5dev *dev;
2943
2944	set_bit(STRIPE_HANDLE, &sh->state);
2945
2946	BUG_ON(s->failed > 2);
2947
2948	/* Want to check and possibly repair P and Q.
2949	 * However there could be one 'failed' device, in which
2950	 * case we can only check one of them, possibly using the
2951	 * other to generate missing data
2952	 */
2953
2954	switch (sh->check_state) {
2955	case check_state_idle:
2956		/* start a new check operation if there are < 2 failures */
2957		if (s->failed == s->q_failed) {
2958			/* The only possible failed device holds Q, so it
2959			 * makes sense to check P (If anything else were failed,
2960			 * we would have used P to recreate it).
2961			 */
2962			sh->check_state = check_state_run;
2963		}
2964		if (!s->q_failed && s->failed < 2) {
2965			/* Q is not failed, and we didn't use it to generate
2966			 * anything, so it makes sense to check it
2967			 */
2968			if (sh->check_state == check_state_run)
2969				sh->check_state = check_state_run_pq;
2970			else
2971				sh->check_state = check_state_run_q;
2972		}
2973
2974		/* discard potentially stale zero_sum_result */
2975		sh->ops.zero_sum_result = 0;
2976
2977		if (sh->check_state == check_state_run) {
2978			/* async_xor_zero_sum destroys the contents of P */
2979			clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2980			s->uptodate--;
2981		}
2982		if (sh->check_state >= check_state_run &&
2983		    sh->check_state <= check_state_run_pq) {
2984			/* async_syndrome_zero_sum preserves P and Q, so
2985			 * no need to mark them !uptodate here
2986			 */
2987			set_bit(STRIPE_OP_CHECK, &s->ops_request);
2988			break;
2989		}
2990
2991		/* we have 2-disk failure */
2992		BUG_ON(s->failed != 2);
2993		/* fall through */
2994	case check_state_compute_result:
2995		sh->check_state = check_state_idle;
2996
2997		/* check that a write has not made the stripe insync */
2998		if (test_bit(STRIPE_INSYNC, &sh->state))
2999			break;
3000
3001		/* now write out any block on a failed drive,
3002		 * or P or Q if they were recomputed
3003		 */
3004		BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
3005		if (s->failed == 2) {
3006			dev = &sh->dev[s->failed_num[1]];
3007			s->locked++;
3008			set_bit(R5_LOCKED, &dev->flags);
3009			set_bit(R5_Wantwrite, &dev->flags);
3010		}
3011		if (s->failed >= 1) {
3012			dev = &sh->dev[s->failed_num[0]];
3013			s->locked++;
3014			set_bit(R5_LOCKED, &dev->flags);
3015			set_bit(R5_Wantwrite, &dev->flags);
3016		}
3017		if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3018			dev = &sh->dev[pd_idx];
3019			s->locked++;
3020			set_bit(R5_LOCKED, &dev->flags);
3021			set_bit(R5_Wantwrite, &dev->flags);
3022		}
3023		if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3024			dev = &sh->dev[qd_idx];
3025			s->locked++;
3026			set_bit(R5_LOCKED, &dev->flags);
3027			set_bit(R5_Wantwrite, &dev->flags);
3028		}
3029		clear_bit(STRIPE_DEGRADED, &sh->state);
3030
3031		set_bit(STRIPE_INSYNC, &sh->state);
3032		break;
3033	case check_state_run:
3034	case check_state_run_q:
3035	case check_state_run_pq:
3036		break; /* we will be called again upon completion */
3037	case check_state_check_result:
3038		sh->check_state = check_state_idle;
3039
3040		/* handle a successful check operation, if parity is correct
3041		 * we are done.  Otherwise update the mismatch count and repair
3042		 * parity if !MD_RECOVERY_CHECK
3043		 */
3044		if (sh->ops.zero_sum_result == 0) {
3045			/* both parities are correct */
3046			if (!s->failed)
3047				set_bit(STRIPE_INSYNC, &sh->state);
3048			else {
3049				/* in contrast to the raid5 case we can validate
3050				 * parity, but still have a failure to write
3051				 * back
3052				 */
3053				sh->check_state = check_state_compute_result;
3054				/* Returning at this point means that we may go
3055				 * off and bring p and/or q uptodate again so
3056				 * we make sure to check zero_sum_result again
3057				 * to verify if p or q need writeback
3058				 */
3059			}
3060		} else {
3061			conf->mddev->resync_mismatches += STRIPE_SECTORS;
3062			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3063				/* don't try to repair!! */
3064				set_bit(STRIPE_INSYNC, &sh->state);
3065			else {
3066				int *target = &sh->ops.target;
3067
3068				sh->ops.target = -1;
3069				sh->ops.target2 = -1;
3070				sh->check_state = check_state_compute_run;
3071				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3072				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3073				if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3074					set_bit(R5_Wantcompute,
3075						&sh->dev[pd_idx].flags);
3076					*target = pd_idx;
3077					target = &sh->ops.target2;
3078					s->uptodate++;
3079				}
3080				if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3081					set_bit(R5_Wantcompute,
3082						&sh->dev[qd_idx].flags);
3083					*target = qd_idx;
3084					s->uptodate++;
3085				}
3086			}
3087		}
3088		break;
3089	case check_state_compute_run:
3090		break;
3091	default:
3092		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3093		       __func__, sh->check_state,
3094		       (unsigned long long) sh->sector);
3095		BUG();
3096	}
3097}
3098
3099static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3100{
3101	int i;
3102
3103	/* We have read all the blocks in this stripe and now we need to
3104	 * copy some of them into a target stripe for expand.
3105	 */
3106	struct dma_async_tx_descriptor *tx = NULL;
3107	clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3108	for (i = 0; i < sh->disks; i++)
3109		if (i != sh->pd_idx && i != sh->qd_idx) {
3110			int dd_idx, j;
3111			struct stripe_head *sh2;
3112			struct async_submit_ctl submit;
3113
3114			sector_t bn = compute_blocknr(sh, i, 1);
3115			sector_t s = raid5_compute_sector(conf, bn, 0,
3116							  &dd_idx, NULL);
3117			sh2 = get_active_stripe(conf, s, 0, 1, 1);
3118			if (sh2 == NULL)
3119				/* so far only the early blocks of this stripe
3120				 * have been requested.  When later blocks
3121				 * get requested, we will try again
3122				 */
3123				continue;
3124			if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3125			   test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3126				/* must have already done this block */
3127				release_stripe(sh2);
3128				continue;
3129			}
3130
3131			/* place all the copies on one channel */
3132			init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3133			tx = async_memcpy(sh2->dev[dd_idx].page,
3134					  sh->dev[i].page, 0, 0, STRIPE_SIZE,
3135					  &submit);
3136
3137			set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3138			set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3139			for (j = 0; j < conf->raid_disks; j++)
3140				if (j != sh2->pd_idx &&
3141				    j != sh2->qd_idx &&
3142				    !test_bit(R5_Expanded, &sh2->dev[j].flags))
3143					break;
3144			if (j == conf->raid_disks) {
3145				set_bit(STRIPE_EXPAND_READY, &sh2->state);
3146				set_bit(STRIPE_HANDLE, &sh2->state);
3147			}
3148			release_stripe(sh2);
3149
3150		}
3151	/* done submitting copies, wait for them to complete */
3152	if (tx) {
3153		async_tx_ack(tx);
3154		dma_wait_for_async_tx(tx);
3155	}
3156}
3157
3158/*
3159 * handle_stripe - do things to a stripe.
3160 *
3161 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3162 * state of various bits to see what needs to be done.
3163 * Possible results:
3164 *    return some read requests which now have data
3165 *    return some write requests which are safely on storage
3166 *    schedule a read on some buffers
3167 *    schedule a write of some buffers
3168 *    return confirmation of parity correctness
3169 *
3170 */
3171
3172static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3173{
3174	struct r5conf *conf = sh->raid_conf;
3175	int disks = sh->disks;
3176	struct r5dev *dev;
3177	int i;
3178	int do_recovery = 0;
3179
3180	memset(s, 0, sizeof(*s));
3181
3182	s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3183	s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
3184	s->failed_num[0] = -1;
3185	s->failed_num[1] = -1;
3186
3187	/* Now to look around and see what can be done */
3188	rcu_read_lock();
3189	spin_lock_irq(&conf->device_lock);
3190	for (i=disks; i--; ) {
3191		struct md_rdev *rdev;
3192		sector_t first_bad;
3193		int bad_sectors;
3194		int is_bad = 0;
3195
3196		dev = &sh->dev[i];
3197
3198		pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3199			 i, dev->flags,
3200			 dev->toread, dev->towrite, dev->written);
3201		/* maybe we can reply to a read
3202		 *
3203		 * new wantfill requests are only permitted while
3204		 * ops_complete_biofill is guaranteed to be inactive
3205		 */
3206		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3207		    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3208			set_bit(R5_Wantfill, &dev->flags);
3209
3210		/* now count some things */
3211		if (test_bit(R5_LOCKED, &dev->flags))
3212			s->locked++;
3213		if (test_bit(R5_UPTODATE, &dev->flags))
3214			s->uptodate++;
3215		if (test_bit(R5_Wantcompute, &dev->flags)) {
3216			s->compute++;
3217			BUG_ON(s->compute > 2);
3218		}
3219
3220		if (test_bit(R5_Wantfill, &dev->flags))
3221			s->to_fill++;
3222		else if (dev->toread)
3223			s->to_read++;
3224		if (dev->towrite) {
3225			s->to_write++;
3226			if (!test_bit(R5_OVERWRITE, &dev->flags))
3227				s->non_overwrite++;
3228		}
3229		if (dev->written)
3230			s->written++;
3231		/* Prefer to use the replacement for reads, but only
3232		 * if it is recovered enough and has no bad blocks.
3233		 */
3234		rdev = rcu_dereference(conf->disks[i].replacement);
3235		if (rdev && !test_bit(Faulty, &rdev->flags) &&
3236		    rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
3237		    !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3238				 &first_bad, &bad_sectors))
3239			set_bit(R5_ReadRepl, &dev->flags);
3240		else {
3241			if (rdev)
3242				set_bit(R5_NeedReplace, &dev->flags);
3243			rdev = rcu_dereference(conf->disks[i].rdev);
3244			clear_bit(R5_ReadRepl, &dev->flags);
3245		}
3246		if (rdev && test_bit(Faulty, &rdev->flags))
3247			rdev = NULL;
3248		if (rdev) {
3249			is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3250					     &first_bad, &bad_sectors);
3251			if (s->blocked_rdev == NULL
3252			    && (test_bit(Blocked, &rdev->flags)
3253				|| is_bad < 0)) {
3254				if (is_bad < 0)
3255					set_bit(BlockedBadBlocks,
3256						&rdev->flags);
3257				s->blocked_rdev = rdev;
3258				atomic_inc(&rdev->nr_pending);
3259			}
3260		}
3261		clear_bit(R5_Insync, &dev->flags);
3262		if (!rdev)
3263			/* Not in-sync */;
3264		else if (is_bad) {
3265			/* also not in-sync */
3266			if (!test_bit(WriteErrorSeen, &rdev->flags) &&
3267			    test_bit(R5_UPTODATE, &dev->flags)) {
3268				/* treat as in-sync, but with a read error
3269				 * which we can now try to correct
3270				 */
3271				set_bit(R5_Insync, &dev->flags);
3272				set_bit(R5_ReadError, &dev->flags);
3273			}
3274		} else if (test_bit(In_sync, &rdev->flags))
3275			set_bit(R5_Insync, &dev->flags);
3276		else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3277			/* in sync if before recovery_offset */
3278			set_bit(R5_Insync, &dev->flags);
3279		else if (test_bit(R5_UPTODATE, &dev->flags) &&
3280			 test_bit(R5_Expanded, &dev->flags))
3281			/* If we've reshaped into here, we assume it is Insync.
3282			 * We will shortly update recovery_offset to make
3283			 * it official.
3284			 */
3285			set_bit(R5_Insync, &dev->flags);
3286
3287		if (rdev && test_bit(R5_WriteError, &dev->flags)) {
3288			/* This flag does not apply to '.replacement'
3289			 * only to .rdev, so make sure to check that*/
3290			struct md_rdev *rdev2 = rcu_dereference(
3291				conf->disks[i].rdev);
3292			if (rdev2 == rdev)
3293				clear_bit(R5_Insync, &dev->flags);
3294			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3295				s->handle_bad_blocks = 1;
3296				atomic_inc(&rdev2->nr_pending);
3297			} else
3298				clear_bit(R5_WriteError, &dev->flags);
3299		}
3300		if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
3301			/* This flag does not apply to '.replacement'
3302			 * only to .rdev, so make sure to check that*/
3303			struct md_rdev *rdev2 = rcu_dereference(
3304				conf->disks[i].rdev);
3305			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3306				s->handle_bad_blocks = 1;
3307				atomic_inc(&rdev2->nr_pending);
3308			} else
3309				clear_bit(R5_MadeGood, &dev->flags);
3310		}
3311		if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
3312			struct md_rdev *rdev2 = rcu_dereference(
3313				conf->disks[i].replacement);
3314			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
3315				s->handle_bad_blocks = 1;
3316				atomic_inc(&rdev2->nr_pending);
3317			} else
3318				clear_bit(R5_MadeGoodRepl, &dev->flags);
3319		}
3320		if (!test_bit(R5_Insync, &dev->flags)) {
3321			/* The ReadError flag will just be confusing now */
3322			clear_bit(R5_ReadError, &dev->flags);
3323			clear_bit(R5_ReWrite, &dev->flags);
3324		}
3325		if (test_bit(R5_ReadError, &dev->flags))
3326			clear_bit(R5_Insync, &dev->flags);
3327		if (!test_bit(R5_Insync, &dev->flags)) {
3328			if (s->failed < 2)
3329				s->failed_num[s->failed] = i;
3330			s->failed++;
3331			if (rdev && !test_bit(Faulty, &rdev->flags))
3332				do_recovery = 1;
3333		}
3334	}
3335	spin_unlock_irq(&conf->device_lock);
3336	if (test_bit(STRIPE_SYNCING, &sh->state)) {
3337		/* If there is a failed device being replaced,
3338		 *     we must be recovering.
3339		 * else if we are after recovery_cp, we must be syncing
3340		 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
3341		 * else we can only be replacing
3342		 * sync and recovery both need to read all devices, and so
3343		 * use the same flag.
3344		 */
3345		if (do_recovery ||
3346		    sh->sector >= conf->mddev->recovery_cp ||
3347		    test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
3348			s->syncing = 1;
3349		else
3350			s->replacing = 1;
3351	}
3352	rcu_read_unlock();
3353}
3354
3355static void handle_stripe(struct stripe_head *sh)
3356{
3357	struct stripe_head_state s;
3358	struct r5conf *conf = sh->raid_conf;
3359	int i;
3360	int prexor;
3361	int disks = sh->disks;
3362	struct r5dev *pdev, *qdev;
3363
3364	clear_bit(STRIPE_HANDLE, &sh->state);
3365	if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3366		/* already being handled, ensure it gets handled
3367		 * again when current action finishes */
3368		set_bit(STRIPE_HANDLE, &sh->state);
3369		return;
3370	}
3371
3372	if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3373		set_bit(STRIPE_SYNCING, &sh->state);
3374		clear_bit(STRIPE_INSYNC, &sh->state);
3375	}
3376	clear_bit(STRIPE_DELAYED, &sh->state);
3377
3378	pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3379		"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3380	       (unsigned long long)sh->sector, sh->state,
3381	       atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3382	       sh->check_state, sh->reconstruct_state);
3383
3384	analyse_stripe(sh, &s);
3385
3386	if (s.handle_bad_blocks) {
3387		set_bit(STRIPE_HANDLE, &sh->state);
3388		goto finish;
3389	}
3390
3391	if (unlikely(s.blocked_rdev)) {
3392		if (s.syncing || s.expanding || s.expanded ||
3393		    s.replacing || s.to_write || s.written) {
3394			set_bit(STRIPE_HANDLE, &sh->state);
3395			goto finish;
3396		}
3397		/* There is nothing for the blocked_rdev to block */
3398		rdev_dec_pending(s.blocked_rdev, conf->mddev);
3399		s.blocked_rdev = NULL;
3400	}
3401
3402	if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3403		set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3404		set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3405	}
3406
3407	pr_debug("locked=%d uptodate=%d to_read=%d"
3408	       " to_write=%d failed=%d failed_num=%d,%d\n",
3409	       s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3410	       s.failed_num[0], s.failed_num[1]);
3411	/* check if the array has lost more than max_degraded devices and,
3412	 * if so, some requests might need to be failed.
3413	 */
3414	if (s.failed > conf->max_degraded) {
3415		sh->check_state = 0;
3416		sh->reconstruct_state = 0;
3417		if (s.to_read+s.to_write+s.written)
3418			handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3419		if (s.syncing + s.replacing)
3420			handle_failed_sync(conf, sh, &s);
3421	}
3422
3423	/*
3424	 * might be able to return some write requests if the parity blocks
3425	 * are safe, or on a failed drive
3426	 */
3427	pdev = &sh->dev[sh->pd_idx];
3428	s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3429		|| (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3430	qdev = &sh->dev[sh->qd_idx];
3431	s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3432		|| (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3433		|| conf->level < 6;
3434
3435	if (s.written &&
3436	    (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3437			     && !test_bit(R5_LOCKED, &pdev->flags)
3438			     && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3439	    (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3440			     && !test_bit(R5_LOCKED, &qdev->flags)
3441			     && test_bit(R5_UPTODATE, &qdev->flags)))))
3442		handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3443
3444	/* Now we might consider reading some blocks, either to check/generate
3445	 * parity, or to satisfy requests
3446	 * or to load a block that is being partially written.
3447	 */
3448	if (s.to_read || s.non_overwrite
3449	    || (conf->level == 6 && s.to_write && s.failed)
3450	    || (s.syncing && (s.uptodate + s.compute < disks))
3451	    || s.replacing
3452	    || s.expanding)
3453		handle_stripe_fill(sh, &s, disks);
3454
3455	/* Now we check to see if any write operations have recently
3456	 * completed
3457	 */
3458	prexor = 0;
3459	if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3460		prexor = 1;
3461	if (sh->reconstruct_state == reconstruct_state_drain_result ||
3462	    sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3463		sh->reconstruct_state = reconstruct_state_idle;
3464
3465		/* All the 'written' buffers and the parity block are ready to
3466		 * be written back to disk
3467		 */
3468		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3469		BUG_ON(sh->qd_idx >= 0 &&
3470		       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3471		for (i = disks; i--; ) {
3472			struct r5dev *dev = &sh->dev[i];
3473			if (test_bit(R5_LOCKED, &dev->flags) &&
3474				(i == sh->pd_idx || i == sh->qd_idx ||
3475				 dev->written)) {
3476				pr_debug("Writing block %d\n", i);
3477				set_bit(R5_Wantwrite, &dev->flags);
3478				if (prexor)
3479					continue;
3480				if (!test_bit(R5_Insync, &dev->flags) ||
3481				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
3482				     s.failed == 0))
3483					set_bit(STRIPE_INSYNC, &sh->state);
3484			}
3485		}
3486		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3487			s.dec_preread_active = 1;
3488	}
3489
3490	/* Now to consider new write requests and what else, if anything
3491	 * should be read.  We do not handle new writes when:
3492	 * 1/ A 'write' operation (copy+xor) is already in flight.
3493	 * 2/ A 'check' operation is in flight, as it may clobber the parity
3494	 *    block.
3495	 */
3496	if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3497		handle_stripe_dirtying(conf, sh, &s, disks);
3498
3499	/* maybe we need to check and possibly fix the parity for this stripe
3500	 * Any reads will already have been scheduled, so we just see if enough
3501	 * data is available.  The parity check is held off while parity
3502	 * dependent operations are in flight.
3503	 */
3504	if (sh->check_state ||
3505	    (s.syncing && s.locked == 0 &&
3506	     !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3507	     !test_bit(STRIPE_INSYNC, &sh->state))) {
3508		if (conf->level == 6)
3509			handle_parity_checks6(conf, sh, &s, disks);
3510		else
3511			handle_parity_checks5(conf, sh, &s, disks);
3512	}
3513
3514	if (s.replacing && s.locked == 0
3515	    && !test_bit(STRIPE_INSYNC, &sh->state)) {
3516		/* Write out to replacement devices where possible */
3517		for (i = 0; i < conf->raid_disks; i++)
3518			if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
3519			    test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
3520				set_bit(R5_WantReplace, &sh->dev[i].flags);
3521				set_bit(R5_LOCKED, &sh->dev[i].flags);
3522				s.locked++;
3523			}
3524		set_bit(STRIPE_INSYNC, &sh->state);
3525	}
3526	if ((s.syncing || s.replacing) && s.locked == 0 &&
3527	    test_bit(STRIPE_INSYNC, &sh->state)) {
3528		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3529		clear_bit(STRIPE_SYNCING, &sh->state);
3530	}
3531
3532	/* If the failed drives are just a ReadError, then we might need
3533	 * to progress the repair/check process
3534	 */
3535	if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3536		for (i = 0; i < s.failed; i++) {
3537			struct r5dev *dev = &sh->dev[s.failed_num[i]];
3538			if (test_bit(R5_ReadError, &dev->flags)
3539			    && !test_bit(R5_LOCKED, &dev->flags)
3540			    && test_bit(R5_UPTODATE, &dev->flags)
3541				) {
3542				if (!test_bit(R5_ReWrite, &dev->flags)) {
3543					set_bit(R5_Wantwrite, &dev->flags);
3544					set_bit(R5_ReWrite, &dev->flags);
3545					set_bit(R5_LOCKED, &dev->flags);
3546					s.locked++;
3547				} else {
3548					/* let's read it back */
3549					set_bit(R5_Wantread, &dev->flags);
3550					set_bit(R5_LOCKED, &dev->flags);
3551					s.locked++;
3552				}
3553			}
3554		}
3555
3556
3557	/* Finish reconstruct operations initiated by the expansion process */
3558	if (sh->reconstruct_state == reconstruct_state_result) {
3559		struct stripe_head *sh_src
3560			= get_active_stripe(conf, sh->sector, 1, 1, 1);
3561		if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3562			/* sh cannot be written until sh_src has been read.
3563			 * so arrange for sh to be delayed a little
3564			 */
3565			set_bit(STRIPE_DELAYED, &sh->state);
3566			set_bit(STRIPE_HANDLE, &sh->state);
3567			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3568					      &sh_src->state))
3569				atomic_inc(&conf->preread_active_stripes);
3570			release_stripe(sh_src);
3571			goto finish;
3572		}
3573		if (sh_src)
3574			release_stripe(sh_src);
3575
3576		sh->reconstruct_state = reconstruct_state_idle;
3577		clear_bit(STRIPE_EXPANDING, &sh->state);
3578		for (i = conf->raid_disks; i--; ) {
3579			set_bit(R5_Wantwrite, &sh->dev[i].flags);
3580			set_bit(R5_LOCKED, &sh->dev[i].flags);
3581			s.locked++;
3582		}
3583	}
3584
3585	if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3586	    !sh->reconstruct_state) {
3587		/* Need to write out all blocks after computing parity */
3588		sh->disks = conf->raid_disks;
3589		stripe_set_idx(sh->sector, conf, 0, sh);
3590		schedule_reconstruction(sh, &s, 1, 1);
3591	} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3592		clear_bit(STRIPE_EXPAND_READY, &sh->state);
3593		atomic_dec(&conf->reshape_stripes);
3594		wake_up(&conf->wait_for_overlap);
3595		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3596	}
3597
3598	if (s.expanding && s.locked == 0 &&
3599	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3600		handle_stripe_expansion(conf, sh);
3601
3602finish:
3603	/* wait for this device to become unblocked */
3604	if (unlikely(s.blocked_rdev)) {
3605		if (conf->mddev->external)
3606			md_wait_for_blocked_rdev(s.blocked_rdev,
3607						 conf->mddev);
3608		else
3609			/* Internal metadata will immediately
3610			 * be written by raid5d, so we don't
3611			 * need to wait here.
3612			 */
3613			rdev_dec_pending(s.blocked_rdev,
3614					 conf->mddev);
3615	}
3616
3617	if (s.handle_bad_blocks)
3618		for (i = disks; i--; ) {
3619			struct md_rdev *rdev;
3620			struct r5dev *dev = &sh->dev[i];
3621			if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3622				/* We own a safe reference to the rdev */
3623				rdev = conf->disks[i].rdev;
3624				if (!rdev_set_badblocks(rdev, sh->sector,
3625							STRIPE_SECTORS, 0))
3626					md_error(conf->mddev, rdev);
3627				rdev_dec_pending(rdev, conf->mddev);
3628			}
3629			if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3630				rdev = conf->disks[i].rdev;
3631				rdev_clear_badblocks(rdev, sh->sector,
3632						     STRIPE_SECTORS, 0);
3633				rdev_dec_pending(rdev, conf->mddev);
3634			}
3635			if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
3636				rdev = conf->disks[i].replacement;
3637				if (!rdev)
3638					/* rdev have been moved down */
3639					rdev = conf->disks[i].rdev;
3640				rdev_clear_badblocks(rdev, sh->sector,
3641						     STRIPE_SECTORS, 0);
3642				rdev_dec_pending(rdev, conf->mddev);
3643			}
3644		}
3645
3646	if (s.ops_request)
3647		raid_run_ops(sh, s.ops_request);
3648
3649	ops_run_io(sh, &s);
3650
3651	if (s.dec_preread_active) {
3652		/* We delay this until after ops_run_io so that if make_request
3653		 * is waiting on a flush, it won't continue until the writes
3654		 * have actually been submitted.
3655		 */
3656		atomic_dec(&conf->preread_active_stripes);
3657		if (atomic_read(&conf->preread_active_stripes) <
3658		    IO_THRESHOLD)
3659			md_wakeup_thread(conf->mddev->thread);
3660	}
3661
3662	return_io(s.return_bi);
3663
3664	clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3665}
3666
3667static void raid5_activate_delayed(struct r5conf *conf)
3668{
3669	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3670		while (!list_empty(&conf->delayed_list)) {
3671			struct list_head *l = conf->delayed_list.next;
3672			struct stripe_head *sh;
3673			sh = list_entry(l, struct stripe_head, lru);
3674			list_del_init(l);
3675			clear_bit(STRIPE_DELAYED, &sh->state);
3676			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3677				atomic_inc(&conf->preread_active_stripes);
3678			list_add_tail(&sh->lru, &conf->hold_list);
3679		}
3680	}
3681}
3682
3683static void activate_bit_delay(struct r5conf *conf)
3684{
3685	/* device_lock is held */
3686	struct list_head head;
3687	list_add(&head, &conf->bitmap_list);
3688	list_del_init(&conf->bitmap_list);
3689	while (!list_empty(&head)) {
3690		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3691		list_del_init(&sh->lru);
3692		atomic_inc(&sh->count);
3693		__release_stripe(conf, sh);
3694	}
3695}
3696
3697int md_raid5_congested(struct mddev *mddev, int bits)
3698{
3699	struct r5conf *conf = mddev->private;
3700
3701	/* No difference between reads and writes.  Just check
3702	 * how busy the stripe_cache is
3703	 */
3704
3705	if (conf->inactive_blocked)
3706		return 1;
3707	if (conf->quiesce)
3708		return 1;
3709	if (list_empty_careful(&conf->inactive_list))
3710		return 1;
3711
3712	return 0;
3713}
3714EXPORT_SYMBOL_GPL(md_raid5_congested);
3715
3716static int raid5_congested(void *data, int bits)
3717{
3718	struct mddev *mddev = data;
3719
3720	return mddev_congested(mddev, bits) ||
3721		md_raid5_congested(mddev, bits);
3722}
3723
3724/* We want read requests to align with chunks where possible,
3725 * but write requests don't need to.
3726 */
3727static int raid5_mergeable_bvec(struct request_queue *q,
3728				struct bvec_merge_data *bvm,
3729				struct bio_vec *biovec)
3730{
3731	struct mddev *mddev = q->queuedata;
3732	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3733	int max;
3734	unsigned int chunk_sectors = mddev->chunk_sectors;
3735	unsigned int bio_sectors = bvm->bi_size >> 9;
3736
3737	if ((bvm->bi_rw & 1) == WRITE)
3738		return biovec->bv_len; /* always allow writes to be mergeable */
3739
3740	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3741		chunk_sectors = mddev->new_chunk_sectors;
3742	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3743	if (max < 0) max = 0;
3744	if (max <= biovec->bv_len && bio_sectors == 0)
3745		return biovec->bv_len;
3746	else
3747		return max;
3748}
3749
3750
3751static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
3752{
3753	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3754	unsigned int chunk_sectors = mddev->chunk_sectors;
3755	unsigned int bio_sectors = bio->bi_size >> 9;
3756
3757	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3758		chunk_sectors = mddev->new_chunk_sectors;
3759	return  chunk_sectors >=
3760		((sector & (chunk_sectors - 1)) + bio_sectors);
3761}
3762
3763/*
3764 *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
3765 *  later sampled by raid5d.
3766 */
3767static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
3768{
3769	unsigned long flags;
3770
3771	spin_lock_irqsave(&conf->device_lock, flags);
3772
3773	bi->bi_next = conf->retry_read_aligned_list;
3774	conf->retry_read_aligned_list = bi;
3775
3776	spin_unlock_irqrestore(&conf->device_lock, flags);
3777	md_wakeup_thread(conf->mddev->thread);
3778}
3779
3780
3781static struct bio *remove_bio_from_retry(struct r5conf *conf)
3782{
3783	struct bio *bi;
3784
3785	bi = conf->retry_read_aligned;
3786	if (bi) {
3787		conf->retry_read_aligned = NULL;
3788		return bi;
3789	}
3790	bi = conf->retry_read_aligned_list;
3791	if(bi) {
3792		conf->retry_read_aligned_list = bi->bi_next;
3793		bi->bi_next = NULL;
3794		/*
3795		 * this sets the active strip count to 1 and the processed
3796		 * strip count to zero (upper 8 bits)
3797		 */
3798		bi->bi_phys_segments = 1; /* biased count of active stripes */
3799	}
3800
3801	return bi;
3802}
3803
3804
3805/*
3806 *  The "raid5_align_endio" should check if the read succeeded and if it
3807 *  did, call bio_endio on the original bio (having bio_put the new bio
3808 *  first).
3809 *  If the read failed..
3810 */
3811static void raid5_align_endio(struct bio *bi, int error)
3812{
3813	struct bio* raid_bi  = bi->bi_private;
3814	struct mddev *mddev;
3815	struct r5conf *conf;
3816	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3817	struct md_rdev *rdev;
3818
3819	bio_put(bi);
3820
3821	rdev = (void*)raid_bi->bi_next;
3822	raid_bi->bi_next = NULL;
3823	mddev = rdev->mddev;
3824	conf = mddev->private;
3825
3826	rdev_dec_pending(rdev, conf->mddev);
3827
3828	if (!error && uptodate) {
3829		bio_endio(raid_bi, 0);
3830		if (atomic_dec_and_test(&conf->active_aligned_reads))
3831			wake_up(&conf->wait_for_stripe);
3832		return;
3833	}
3834
3835
3836	pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3837
3838	add_bio_to_retry(raid_bi, conf);
3839}
3840
3841static int bio_fits_rdev(struct bio *bi)
3842{
3843	struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3844
3845	if ((bi->bi_size>>9) > queue_max_sectors(q))
3846		return 0;
3847	blk_recount_segments(q, bi);
3848	if (bi->bi_phys_segments > queue_max_segments(q))
3849		return 0;
3850
3851	if (q->merge_bvec_fn)
3852		/* it's too hard to apply the merge_bvec_fn at this stage,
3853		 * just just give up
3854		 */
3855		return 0;
3856
3857	return 1;
3858}
3859
3860
3861static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3862{
3863	struct r5conf *conf = mddev->private;
3864	int dd_idx;
3865	struct bio* align_bi;
3866	struct md_rdev *rdev;
3867	sector_t end_sector;
3868
3869	if (!in_chunk_boundary(mddev, raid_bio)) {
3870		pr_debug("chunk_aligned_read : non aligned\n");
3871		return 0;
3872	}
3873	/*
3874	 * use bio_clone_mddev to make a copy of the bio
3875	 */
3876	align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3877	if (!align_bi)
3878		return 0;
3879	/*
3880	 *   set bi_end_io to a new function, and set bi_private to the
3881	 *     original bio.
3882	 */
3883	align_bi->bi_end_io  = raid5_align_endio;
3884	align_bi->bi_private = raid_bio;
3885	/*
3886	 *	compute position
3887	 */
3888	align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
3889						    0,
3890						    &dd_idx, NULL);
3891
3892	end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
3893	rcu_read_lock();
3894	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
3895	if (!rdev || test_bit(Faulty, &rdev->flags) ||
3896	    rdev->recovery_offset < end_sector) {
3897		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3898		if (rdev &&
3899		    (test_bit(Faulty, &rdev->flags) ||
3900		    !(test_bit(In_sync, &rdev->flags) ||
3901		      rdev->recovery_offset >= end_sector)))
3902			rdev = NULL;
3903	}
3904	if (rdev) {
3905		sector_t first_bad;
3906		int bad_sectors;
3907
3908		atomic_inc(&rdev->nr_pending);
3909		rcu_read_unlock();
3910		raid_bio->bi_next = (void*)rdev;
3911		align_bi->bi_bdev =  rdev->bdev;
3912		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3913
3914		if (!bio_fits_rdev(align_bi) ||
3915		    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3916				&first_bad, &bad_sectors)) {
3917			/* too big in some way, or has a known bad block */
3918			bio_put(align_bi);
3919			rdev_dec_pending(rdev, mddev);
3920			return 0;
3921		}
3922
3923		/* No reshape active, so we can trust rdev->data_offset */
3924		align_bi->bi_sector += rdev->data_offset;
3925
3926		spin_lock_irq(&conf->device_lock);
3927		wait_event_lock_irq(conf->wait_for_stripe,
3928				    conf->quiesce == 0,
3929				    conf->device_lock, /* nothing */);
3930		atomic_inc(&conf->active_aligned_reads);
3931		spin_unlock_irq(&conf->device_lock);
3932
3933		generic_make_request(align_bi);
3934		return 1;
3935	} else {
3936		rcu_read_unlock();
3937		bio_put(align_bi);
3938		return 0;
3939	}
3940}
3941
3942/* __get_priority_stripe - get the next stripe to process
3943 *
3944 * Full stripe writes are allowed to pass preread active stripes up until
3945 * the bypass_threshold is exceeded.  In general the bypass_count
3946 * increments when the handle_list is handled before the hold_list; however, it
3947 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3948 * stripe with in flight i/o.  The bypass_count will be reset when the
3949 * head of the hold_list has changed, i.e. the head was promoted to the
3950 * handle_list.
3951 */
3952static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
3953{
3954	struct stripe_head *sh;
3955
3956	pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3957		  __func__,
3958		  list_empty(&conf->handle_list) ? "empty" : "busy",
3959		  list_empty(&conf->hold_list) ? "empty" : "busy",
3960		  atomic_read(&conf->pending_full_writes), conf->bypass_count);
3961
3962	if (!list_empty(&conf->handle_list)) {
3963		sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3964
3965		if (list_empty(&conf->hold_list))
3966			conf->bypass_count = 0;
3967		else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3968			if (conf->hold_list.next == conf->last_hold)
3969				conf->bypass_count++;
3970			else {
3971				conf->last_hold = conf->hold_list.next;
3972				conf->bypass_count -= conf->bypass_threshold;
3973				if (conf->bypass_count < 0)
3974					conf->bypass_count = 0;
3975			}
3976		}
3977	} else if (!list_empty(&conf->hold_list) &&
3978		   ((conf->bypass_threshold &&
3979		     conf->bypass_count > conf->bypass_threshold) ||
3980		    atomic_read(&conf->pending_full_writes) == 0)) {
3981		sh = list_entry(conf->hold_list.next,
3982				typeof(*sh), lru);
3983		conf->bypass_count -= conf->bypass_threshold;
3984		if (conf->bypass_count < 0)
3985			conf->bypass_count = 0;
3986	} else
3987		return NULL;
3988
3989	list_del_init(&sh->lru);
3990	atomic_inc(&sh->count);
3991	BUG_ON(atomic_read(&sh->count) != 1);
3992	return sh;
3993}
3994
3995static void make_request(struct mddev *mddev, struct bio * bi)
3996{
3997	struct r5conf *conf = mddev->private;
3998	int dd_idx;
3999	sector_t new_sector;
4000	sector_t logical_sector, last_sector;
4001	struct stripe_head *sh;
4002	const int rw = bio_data_dir(bi);
4003	int remaining;
4004
4005	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
4006		md_flush_request(mddev, bi);
4007		return;
4008	}
4009
4010	md_write_start(mddev, bi);
4011
4012	if (rw == READ &&
4013	     mddev->reshape_position == MaxSector &&
4014	     chunk_aligned_read(mddev,bi))
4015		return;
4016
4017	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4018	last_sector = bi->bi_sector + (bi->bi_size>>9);
4019	bi->bi_next = NULL;
4020	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
4021
4022	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
4023		DEFINE_WAIT(w);
4024		int previous;
4025
4026	retry:
4027		previous = 0;
4028		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
4029		if (unlikely(conf->reshape_progress != MaxSector)) {
4030			/* spinlock is needed as reshape_progress may be
4031			 * 64bit on a 32bit platform, and so it might be
4032			 * possible to see a half-updated value
4033			 * Of course reshape_progress could change after
4034			 * the lock is dropped, so once we get a reference
4035			 * to the stripe that we think it is, we will have
4036			 * to check again.
4037			 */
4038			spin_lock_irq(&conf->device_lock);
4039			if (mddev->reshape_backwards
4040			    ? logical_sector < conf->reshape_progress
4041			    : logical_sector >= conf->reshape_progress) {
4042				previous = 1;
4043			} else {
4044				if (mddev->reshape_backwards
4045				    ? logical_sector < conf->reshape_safe
4046				    : logical_sector >= conf->reshape_safe) {
4047					spin_unlock_irq(&conf->device_lock);
4048					schedule();
4049					goto retry;
4050				}
4051			}
4052			spin_unlock_irq(&conf->device_lock);
4053		}
4054
4055		new_sector = raid5_compute_sector(conf, logical_sector,
4056						  previous,
4057						  &dd_idx, NULL);
4058		pr_debug("raid456: make_request, sector %llu logical %llu\n",
4059			(unsigned long long)new_sector, 
4060			(unsigned long long)logical_sector);
4061
4062		sh = get_active_stripe(conf, new_sector, previous,
4063				       (bi->bi_rw&RWA_MASK), 0);
4064		if (sh) {
4065			if (unlikely(previous)) {
4066				/* expansion might have moved on while waiting for a
4067				 * stripe, so we must do the range check again.
4068				 * Expansion could still move past after this
4069				 * test, but as we are holding a reference to
4070				 * 'sh', we know that if that happens,
4071				 *  STRIPE_EXPANDING will get set and the expansion
4072				 * won't proceed until we finish with the stripe.
4073				 */
4074				int must_retry = 0;
4075				spin_lock_irq(&conf->device_lock);
4076				if (mddev->reshape_backwards
4077				    ? logical_sector >= conf->reshape_progress
4078				    : logical_sector < conf->reshape_progress)
4079					/* mismatch, need to try again */
4080					must_retry = 1;
4081				spin_unlock_irq(&conf->device_lock);
4082				if (must_retry) {
4083					release_stripe(sh);
4084					schedule();
4085					goto retry;
4086				}
4087			}
4088
4089			if (rw == WRITE &&
4090			    logical_sector >= mddev->suspend_lo &&
4091			    logical_sector < mddev->suspend_hi) {
4092				release_stripe(sh);
4093				/* As the suspend_* range is controlled by
4094				 * userspace, we want an interruptible
4095				 * wait.
4096				 */
4097				flush_signals(current);
4098				prepare_to_wait(&conf->wait_for_overlap,
4099						&w, TASK_INTERRUPTIBLE);
4100				if (logical_sector >= mddev->suspend_lo &&
4101				    logical_sector < mddev->suspend_hi)
4102					schedule();
4103				goto retry;
4104			}
4105
4106			if (test_bit(STRIPE_EXPANDING, &sh->state) ||
4107			    !add_stripe_bio(sh, bi, dd_idx, rw)) {
4108				/* Stripe is busy expanding or
4109				 * add failed due to overlap.  Flush everything
4110				 * and wait a while
4111				 */
4112				md_wakeup_thread(mddev->thread);
4113				release_stripe(sh);
4114				schedule();
4115				goto retry;
4116			}
4117			finish_wait(&conf->wait_for_overlap, &w);
4118			set_bit(STRIPE_HANDLE, &sh->state);
4119			clear_bit(STRIPE_DELAYED, &sh->state);
4120			if ((bi->bi_rw & REQ_SYNC) &&
4121			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4122				atomic_inc(&conf->preread_active_stripes);
4123			mddev_check_plugged(mddev);
4124			release_stripe(sh);
4125		} else {
4126			/* cannot get stripe for read-ahead, just give-up */
4127			clear_bit(BIO_UPTODATE, &bi->bi_flags);
4128			finish_wait(&conf->wait_for_overlap, &w);
4129			break;
4130		}
4131	}
4132
4133	spin_lock_irq(&conf->device_lock);
4134	remaining = raid5_dec_bi_phys_segments(bi);
4135	spin_unlock_irq(&conf->device_lock);
4136	if (remaining == 0) {
4137
4138		if ( rw == WRITE )
4139			md_write_end(mddev);
4140
4141		bio_endio(bi, 0);
4142	}
4143}
4144
4145static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
4146
4147static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
4148{
4149	/* reshaping is quite different to recovery/resync so it is
4150	 * handled quite separately ... here.
4151	 *
4152	 * On each call to sync_request, we gather one chunk worth of
4153	 * destination stripes and flag them as expanding.
4154	 * Then we find all the source stripes and request reads.
4155	 * As the reads complete, handle_stripe will copy the data
4156	 * into the destination stripe and release that stripe.
4157	 */
4158	struct r5conf *conf = mddev->private;
4159	struct stripe_head *sh;
4160	sector_t first_sector, last_sector;
4161	int raid_disks = conf->previous_raid_disks;
4162	int data_disks = raid_disks - conf->max_degraded;
4163	int new_data_disks = conf->raid_disks - conf->max_degraded;
4164	int i;
4165	int dd_idx;
4166	sector_t writepos, readpos, safepos;
4167	sector_t stripe_addr;
4168	int reshape_sectors;
4169	struct list_head stripes;
4170
4171	if (sector_nr == 0) {
4172		/* If restarting in the middle, skip the initial sectors */
4173		if (mddev->reshape_backwards &&
4174		    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
4175			sector_nr = raid5_size(mddev, 0, 0)
4176				- conf->reshape_progress;
4177		} else if (!mddev->reshape_backwards &&
4178			   conf->reshape_progress > 0)
4179			sector_nr = conf->reshape_progress;
4180		sector_div(sector_nr, new_data_disks);
4181		if (sector_nr) {
4182			mddev->curr_resync_completed = sector_nr;
4183			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4184			*skipped = 1;
4185			return sector_nr;
4186		}
4187	}
4188
4189	/* We need to process a full chunk at a time.
4190	 * If old and new chunk sizes differ, we need to process the
4191	 * largest of these
4192	 */
4193	if (mddev->new_chunk_sectors > mddev->chunk_sectors)
4194		reshape_sectors = mddev->new_chunk_sectors;
4195	else
4196		reshape_sectors = mddev->chunk_sectors;
4197
4198	/* We update the metadata at least every 10 seconds, or when
4199	 * the data about to be copied would over-write the source of
4200	 * the data at the front of the range.  i.e. one new_stripe
4201	 * along from reshape_progress new_maps to after where
4202	 * reshape_safe old_maps to
4203	 */
4204	writepos = conf->reshape_progress;
4205	sector_div(writepos, new_data_disks);
4206	readpos = conf->reshape_progress;
4207	sector_div(readpos, data_disks);
4208	safepos = conf->reshape_safe;
4209	sector_div(safepos, data_disks);
4210	if (mddev->reshape_backwards) {
4211		writepos -= min_t(sector_t, reshape_sectors, writepos);
4212		readpos += reshape_sectors;
4213		safepos += reshape_sectors;
4214	} else {
4215		writepos += reshape_sectors;
4216		readpos -= min_t(sector_t, reshape_sectors, readpos);
4217		safepos -= min_t(sector_t, reshape_sectors, safepos);
4218	}
4219
4220	/* Having calculated the 'writepos' possibly use it
4221	 * to set 'stripe_addr' which is where we will write to.
4222	 */
4223	if (mddev->reshape_backwards) {
4224		BUG_ON(conf->reshape_progress == 0);
4225		stripe_addr = writepos;
4226		BUG_ON((mddev->dev_sectors &
4227			~((sector_t)reshape_sectors - 1))
4228		       - reshape_sectors - stripe_addr
4229		       != sector_nr);
4230	} else {
4231		BUG_ON(writepos != sector_nr + reshape_sectors);
4232		stripe_addr = sector_nr;
4233	}
4234
4235	/* 'writepos' is the most advanced device address we might write.
4236	 * 'readpos' is the least advanced device address we might read.
4237	 * 'safepos' is the least address recorded in the metadata as having
4238	 *     been reshaped.
4239	 * If there is a min_offset_diff, these are adjusted either by
4240	 * increasing the safepos/readpos if diff is negative, or
4241	 * increasing writepos if diff is positive.
4242	 * If 'readpos' is then behind 'writepos', there is no way that we can
4243	 * ensure safety in the face of a crash - that must be done by userspace
4244	 * making a backup of the data.  So in that case there is no particular
4245	 * rush to update metadata.
4246	 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4247	 * update the metadata to advance 'safepos' to match 'readpos' so that
4248	 * we can be safe in the event of a crash.
4249	 * So we insist on updating metadata if safepos is behind writepos and
4250	 * readpos is beyond writepos.
4251	 * In any case, update the metadata every 10 seconds.
4252	 * Maybe that number should be configurable, but I'm not sure it is
4253	 * worth it.... maybe it could be a multiple of safemode_delay???
4254	 */
4255	if (conf->min_offset_diff < 0) {
4256		safepos += -conf->min_offset_diff;
4257		readpos += -conf->min_offset_diff;
4258	} else
4259		writepos += conf->min_offset_diff;
4260
4261	if ((mddev->reshape_backwards
4262	     ? (safepos > writepos && readpos < writepos)
4263	     : (safepos < writepos && readpos > writepos)) ||
4264	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4265		/* Cannot proceed until we've updated the superblock... */
4266		wait_event(conf->wait_for_overlap,
4267			   atomic_read(&conf->reshape_stripes)==0);
4268		mddev->reshape_position = conf->reshape_progress;
4269		mddev->curr_resync_completed = sector_nr;
4270		conf->reshape_checkpoint = jiffies;
4271		set_bit(MD_CHANGE_DEVS, &mddev->flags);
4272		md_wakeup_thread(mddev->thread);
4273		wait_event(mddev->sb_wait, mddev->flags == 0 ||
4274			   kthread_should_stop());
4275		spin_lock_irq(&conf->device_lock);
4276		conf->reshape_safe = mddev->reshape_position;
4277		spin_unlock_irq(&conf->device_lock);
4278		wake_up(&conf->wait_for_overlap);
4279		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4280	}
4281
4282	INIT_LIST_HEAD(&stripes);
4283	for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
4284		int j;
4285		int skipped_disk = 0;
4286		sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
4287		set_bit(STRIPE_EXPANDING, &sh->state);
4288		atomic_inc(&conf->reshape_stripes);
4289		/* If any of this stripe is beyond the end of the old
4290		 * array, then we need to zero those blocks
4291		 */
4292		for (j=sh->disks; j--;) {
4293			sector_t s;
4294			if (j == sh->pd_idx)
4295				continue;
4296			if (conf->level == 6 &&
4297			    j == sh->qd_idx)
4298				continue;
4299			s = compute_blocknr(sh, j, 0);
4300			if (s < raid5_size(mddev, 0, 0)) {
4301				skipped_disk = 1;
4302				continue;
4303			}
4304			memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4305			set_bit(R5_Expanded, &sh->dev[j].flags);
4306			set_bit(R5_UPTODATE, &sh->dev[j].flags);
4307		}
4308		if (!skipped_disk) {
4309			set_bit(STRIPE_EXPAND_READY, &sh->state);
4310			set_bit(STRIPE_HANDLE, &sh->state);
4311		}
4312		list_add(&sh->lru, &stripes);
4313	}
4314	spin_lock_irq(&conf->device_lock);
4315	if (mddev->reshape_backwards)
4316		conf->reshape_progress -= reshape_sectors * new_data_disks;
4317	else
4318		conf->reshape_progress += reshape_sectors * new_data_disks;
4319	spin_unlock_irq(&conf->device_lock);
4320	/* Ok, those stripe are ready. We can start scheduling
4321	 * reads on the source stripes.
4322	 * The source stripes are determined by mapping the first and last
4323	 * block on the destination stripes.
4324	 */
4325	first_sector =
4326		raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4327				     1, &dd_idx, NULL);
4328	last_sector =
4329		raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4330					    * new_data_disks - 1),
4331				     1, &dd_idx, NULL);
4332	if (last_sector >= mddev->dev_sectors)
4333		last_sector = mddev->dev_sectors - 1;
4334	while (first_sector <= last_sector) {
4335		sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4336		set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4337		set_bit(STRIPE_HANDLE, &sh->state);
4338		release_stripe(sh);
4339		first_sector += STRIPE_SECTORS;
4340	}
4341	/* Now that the sources are clearly marked, we can release
4342	 * the destination stripes
4343	 */
4344	while (!list_empty(&stripes)) {
4345		sh = list_entry(stripes.next, struct stripe_head, lru);
4346		list_del_init(&sh->lru);
4347		release_stripe(sh);
4348	}
4349	/* If this takes us to the resync_max point where we have to pause,
4350	 * then we need to write out the superblock.
4351	 */
4352	sector_nr += reshape_sectors;
4353	if ((sector_nr - mddev->curr_resync_completed) * 2
4354	    >= mddev->resync_max - mddev->curr_resync_completed) {
4355		/* Cannot proceed until we've updated the superblock... */
4356		wait_event(conf->wait_for_overlap,
4357			   atomic_read(&conf->reshape_stripes) == 0);
4358		mddev->reshape_position = conf->reshape_progress;
4359		mddev->curr_resync_completed = sector_nr;
4360		conf->reshape_checkpoint = jiffies;
4361		set_bit(MD_CHANGE_DEVS, &mddev->flags);
4362		md_wakeup_thread(mddev->thread);
4363		wait_event(mddev->sb_wait,
4364			   !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4365			   || kthread_should_stop());
4366		spin_lock_irq(&conf->device_lock);
4367		conf->reshape_safe = mddev->reshape_position;
4368		spin_unlock_irq(&conf->device_lock);
4369		wake_up(&conf->wait_for_overlap);
4370		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4371	}
4372	return reshape_sectors;
4373}
4374
4375/* FIXME go_faster isn't used */
4376static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
4377{
4378	struct r5conf *conf = mddev->private;
4379	struct stripe_head *sh;
4380	sector_t max_sector = mddev->dev_sectors;
4381	sector_t sync_blocks;
4382	int still_degraded = 0;
4383	int i;
4384
4385	if (sector_nr >= max_sector) {
4386		/* just being told to finish up .. nothing much to do */
4387
4388		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4389			end_reshape(conf);
4390			return 0;
4391		}
4392
4393		if (mddev->curr_resync < max_sector) /* aborted */
4394			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4395					&sync_blocks, 1);
4396		else /* completed sync */
4397			conf->fullsync = 0;
4398		bitmap_close_sync(mddev->bitmap);
4399
4400		return 0;
4401	}
4402
4403	/* Allow raid5_quiesce to complete */
4404	wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4405
4406	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4407		return reshape_request(mddev, sector_nr, skipped);
4408
4409	/* No need to check resync_max as we never do more than one
4410	 * stripe, and as resync_max will always be on a chunk boundary,
4411	 * if the check in md_do_sync didn't fire, there is no chance
4412	 * of overstepping resync_max here
4413	 */
4414
4415	/* if there is too many failed drives and we are trying
4416	 * to resync, then assert that we are finished, because there is
4417	 * nothing we can do.
4418	 */
4419	if (mddev->degraded >= conf->max_degraded &&
4420	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4421		sector_t rv = mddev->dev_sectors - sector_nr;
4422		*skipped = 1;
4423		return rv;
4424	}
4425	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4426	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4427	    !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4428		/* we can skip this block, and probably more */
4429		sync_blocks /= STRIPE_SECTORS;
4430		*skipped = 1;
4431		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4432	}
4433
4434	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4435
4436	sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4437	if (sh == NULL) {
4438		sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4439		/* make sure we don't swamp the stripe cache if someone else
4440		 * is trying to get access
4441		 */
4442		schedule_timeout_uninterruptible(1);
4443	}
4444	/* Need to check if array will still be degraded after recovery/resync
4445	 * We don't need to check the 'failed' flag as when that gets set,
4446	 * recovery aborts.
4447	 */
4448	for (i = 0; i < conf->raid_disks; i++)
4449		if (conf->disks[i].rdev == NULL)
4450			still_degraded = 1;
4451
4452	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4453
4454	set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4455
4456	handle_stripe(sh);
4457	release_stripe(sh);
4458
4459	return STRIPE_SECTORS;
4460}
4461
4462static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4463{
4464	/* We may not be able to submit a whole bio at once as there
4465	 * may not be enough stripe_heads available.
4466	 * We cannot pre-allocate enough stripe_heads as we may need
4467	 * more than exist in the cache (if we allow ever large chunks).
4468	 * So we do one stripe head at a time and record in
4469	 * ->bi_hw_segments how many have been done.
4470	 *
4471	 * We *know* that this entire raid_bio is in one chunk, so
4472	 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4473	 */
4474	struct stripe_head *sh;
4475	int dd_idx;
4476	sector_t sector, logical_sector, last_sector;
4477	int scnt = 0;
4478	int remaining;
4479	int handled = 0;
4480
4481	logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4482	sector = raid5_compute_sector(conf, logical_sector,
4483				      0, &dd_idx, NULL);
4484	last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4485
4486	for (; logical_sector < last_sector;
4487	     logical_sector += STRIPE_SECTORS,
4488		     sector += STRIPE_SECTORS,
4489		     scnt++) {
4490
4491		if (scnt < raid5_bi_hw_segments(raid_bio))
4492			/* already done this stripe */
4493			continue;
4494
4495		sh = get_active_stripe(conf, sector, 0, 1, 0);
4496
4497		if (!sh) {
4498			/* failed to get a stripe - must wait */
4499			raid5_set_bi_hw_segments(raid_bio, scnt);
4500			conf->retry_read_aligned = raid_bio;
4501			return handled;
4502		}
4503
4504		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4505			release_stripe(sh);
4506			raid5_set_bi_hw_segments(raid_bio, scnt);
4507			conf->retry_read_aligned = raid_bio;
4508			return handled;
4509		}
4510
4511		handle_stripe(sh);
4512		release_stripe(sh);
4513		handled++;
4514	}
4515	spin_lock_irq(&conf->device_lock);
4516	remaining = raid5_dec_bi_phys_segments(raid_bio);
4517	spin_unlock_irq(&conf->device_lock);
4518	if (remaining == 0)
4519		bio_endio(raid_bio, 0);
4520	if (atomic_dec_and_test(&conf->active_aligned_reads))
4521		wake_up(&conf->wait_for_stripe);
4522	return handled;
4523}
4524
4525
4526/*
4527 * This is our raid5 kernel thread.
4528 *
4529 * We scan the hash table for stripes which can be handled now.
4530 * During the scan, completed stripes are saved for us by the interrupt
4531 * handler, so that they will not have to wait for our next wakeup.
4532 */
4533static void raid5d(struct mddev *mddev)
4534{
4535	struct stripe_head *sh;
4536	struct r5conf *conf = mddev->private;
4537	int handled;
4538	struct blk_plug plug;
4539
4540	pr_debug("+++ raid5d active\n");
4541
4542	md_check_recovery(mddev);
4543
4544	blk_start_plug(&plug);
4545	handled = 0;
4546	spin_lock_irq(&conf->device_lock);
4547	while (1) {
4548		struct bio *bio;
4549
4550		if (atomic_read(&mddev->plug_cnt) == 0 &&
4551		    !list_empty(&conf->bitmap_list)) {
4552			/* Now is a good time to flush some bitmap updates */
4553			conf->seq_flush++;
4554			spin_unlock_irq(&conf->device_lock);
4555			bitmap_unplug(mddev->bitmap);
4556			spin_lock_irq(&conf->device_lock);
4557			conf->seq_write = conf->seq_flush;
4558			activate_bit_delay(conf);
4559		}
4560		if (atomic_read(&mddev->plug_cnt) == 0)
4561			raid5_activate_delayed(conf);
4562
4563		while ((bio = remove_bio_from_retry(conf))) {
4564			int ok;
4565			spin_unlock_irq(&conf->device_lock);
4566			ok = retry_aligned_read(conf, bio);
4567			spin_lock_irq(&conf->device_lock);
4568			if (!ok)
4569				break;
4570			handled++;
4571		}
4572
4573		sh = __get_priority_stripe(conf);
4574
4575		if (!sh)
4576			break;
4577		spin_unlock_irq(&conf->device_lock);
4578		
4579		handled++;
4580		handle_stripe(sh);
4581		release_stripe(sh);
4582		cond_resched();
4583
4584		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4585			md_check_recovery(mddev);
4586
4587		spin_lock_irq(&conf->device_lock);
4588	}
4589	pr_debug("%d stripes handled\n", handled);
4590
4591	spin_unlock_irq(&conf->device_lock);
4592
4593	async_tx_issue_pending_all();
4594	blk_finish_plug(&plug);
4595
4596	pr_debug("--- raid5d inactive\n");
4597}
4598
4599static ssize_t
4600raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
4601{
4602	struct r5conf *conf = mddev->private;
4603	if (conf)
4604		return sprintf(page, "%d\n", conf->max_nr_stripes);
4605	else
4606		return 0;
4607}
4608
4609int
4610raid5_set_cache_size(struct mddev *mddev, int size)
4611{
4612	struct r5conf *conf = mddev->private;
4613	int err;
4614
4615	if (size <= 16 || size > 32768)
4616		return -EINVAL;
4617	while (size < conf->max_nr_stripes) {
4618		if (drop_one_stripe(conf))
4619			conf->max_nr_stripes--;
4620		else
4621			break;
4622	}
4623	err = md_allow_write(mddev);
4624	if (err)
4625		return err;
4626	while (size > conf->max_nr_stripes) {
4627		if (grow_one_stripe(conf))
4628			conf->max_nr_stripes++;
4629		else break;
4630	}
4631	return 0;
4632}
4633EXPORT_SYMBOL(raid5_set_cache_size);
4634
4635static ssize_t
4636raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
4637{
4638	struct r5conf *conf = mddev->private;
4639	unsigned long new;
4640	int err;
4641
4642	if (len >= PAGE_SIZE)
4643		return -EINVAL;
4644	if (!conf)
4645		return -ENODEV;
4646
4647	if (strict_strtoul(page, 10, &new))
4648		return -EINVAL;
4649	err = raid5_set_cache_size(mddev, new);
4650	if (err)
4651		return err;
4652	return len;
4653}
4654
4655static struct md_sysfs_entry
4656raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4657				raid5_show_stripe_cache_size,
4658				raid5_store_stripe_cache_size);
4659
4660static ssize_t
4661raid5_show_preread_threshold(struct mddev *mddev, char *page)
4662{
4663	struct r5conf *conf = mddev->private;
4664	if (conf)
4665		return sprintf(page, "%d\n", conf->bypass_threshold);
4666	else
4667		return 0;
4668}
4669
4670static ssize_t
4671raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
4672{
4673	struct r5conf *conf = mddev->private;
4674	unsigned long new;
4675	if (len >= PAGE_SIZE)
4676		return -EINVAL;
4677	if (!conf)
4678		return -ENODEV;
4679
4680	if (strict_strtoul(page, 10, &new))
4681		return -EINVAL;
4682	if (new > conf->max_nr_stripes)
4683		return -EINVAL;
4684	conf->bypass_threshold = new;
4685	return len;
4686}
4687
4688static struct md_sysfs_entry
4689raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4690					S_IRUGO | S_IWUSR,
4691					raid5_show_preread_threshold,
4692					raid5_store_preread_threshold);
4693
4694static ssize_t
4695stripe_cache_active_show(struct mddev *mddev, char *page)
4696{
4697	struct r5conf *conf = mddev->private;
4698	if (conf)
4699		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4700	else
4701		return 0;
4702}
4703
4704static struct md_sysfs_entry
4705raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4706
4707static struct attribute *raid5_attrs[] =  {
4708	&raid5_stripecache_size.attr,
4709	&raid5_stripecache_active.attr,
4710	&raid5_preread_bypass_threshold.attr,
4711	NULL,
4712};
4713static struct attribute_group raid5_attrs_group = {
4714	.name = NULL,
4715	.attrs = raid5_attrs,
4716};
4717
4718static sector_t
4719raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
4720{
4721	struct r5conf *conf = mddev->private;
4722
4723	if (!sectors)
4724		sectors = mddev->dev_sectors;
4725	if (!raid_disks)
4726		/* size is defined by the smallest of previous and new size */
4727		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4728
4729	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4730	sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4731	return sectors * (raid_disks - conf->max_degraded);
4732}
4733
4734static void raid5_free_percpu(struct r5conf *conf)
4735{
4736	struct raid5_percpu *percpu;
4737	unsigned long cpu;
4738
4739	if (!conf->percpu)
4740		return;
4741
4742	get_online_cpus();
4743	for_each_possible_cpu(cpu) {
4744		percpu = per_cpu_ptr(conf->percpu, cpu);
4745		safe_put_page(percpu->spare_page);
4746		kfree(percpu->scribble);
4747	}
4748#ifdef CONFIG_HOTPLUG_CPU
4749	unregister_cpu_notifier(&conf->cpu_notify);
4750#endif
4751	put_online_cpus();
4752
4753	free_percpu(conf->percpu);
4754}
4755
4756static void free_conf(struct r5conf *conf)
4757{
4758	shrink_stripes(conf);
4759	raid5_free_percpu(conf);
4760	kfree(conf->disks);
4761	kfree(conf->stripe_hashtbl);
4762	kfree(conf);
4763}
4764
4765#ifdef CONFIG_HOTPLUG_CPU
4766static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4767			      void *hcpu)
4768{
4769	struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
4770	long cpu = (long)hcpu;
4771	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4772
4773	switch (action) {
4774	case CPU_UP_PREPARE:
4775	case CPU_UP_PREPARE_FROZEN:
4776		if (conf->level == 6 && !percpu->spare_page)
4777			percpu->spare_page = alloc_page(GFP_KERNEL);
4778		if (!percpu->scribble)
4779			percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4780
4781		if (!percpu->scribble ||
4782		    (conf->level == 6 && !percpu->spare_page)) {
4783			safe_put_page(percpu->spare_page);
4784			kfree(percpu->scribble);
4785			pr_err("%s: failed memory allocation for cpu%ld\n",
4786			       __func__, cpu);
4787			return notifier_from_errno(-ENOMEM);
4788		}
4789		break;
4790	case CPU_DEAD:
4791	case CPU_DEAD_FROZEN:
4792		safe_put_page(percpu->spare_page);
4793		kfree(percpu->scribble);
4794		percpu->spare_page = NULL;
4795		percpu->scribble = NULL;
4796		break;
4797	default:
4798		break;
4799	}
4800	return NOTIFY_OK;
4801}
4802#endif
4803
4804static int raid5_alloc_percpu(struct r5conf *conf)
4805{
4806	unsigned long cpu;
4807	struct page *spare_page;
4808	struct raid5_percpu __percpu *allcpus;
4809	void *scribble;
4810	int err;
4811
4812	allcpus = alloc_percpu(struct raid5_percpu);
4813	if (!allcpus)
4814		return -ENOMEM;
4815	conf->percpu = allcpus;
4816
4817	get_online_cpus();
4818	err = 0;
4819	for_each_present_cpu(cpu) {
4820		if (conf->level == 6) {
4821			spare_page = alloc_page(GFP_KERNEL);
4822			if (!spare_page) {
4823				err = -ENOMEM;
4824				break;
4825			}
4826			per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4827		}
4828		scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4829		if (!scribble) {
4830			err = -ENOMEM;
4831			break;
4832		}
4833		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4834	}
4835#ifdef CONFIG_HOTPLUG_CPU
4836	conf->cpu_notify.notifier_call = raid456_cpu_notify;
4837	conf->cpu_notify.priority = 0;
4838	if (err == 0)
4839		err = register_cpu_notifier(&conf->cpu_notify);
4840#endif
4841	put_online_cpus();
4842
4843	return err;
4844}
4845
4846static struct r5conf *setup_conf(struct mddev *mddev)
4847{
4848	struct r5conf *conf;
4849	int raid_disk, memory, max_disks;
4850	struct md_rdev *rdev;
4851	struct disk_info *disk;
4852	char pers_name[6];
4853
4854	if (mddev->new_level != 5
4855	    && mddev->new_level != 4
4856	    && mddev->new_level != 6) {
4857		printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4858		       mdname(mddev), mddev->new_level);
4859		return ERR_PTR(-EIO);
4860	}
4861	if ((mddev->new_level == 5
4862	     && !algorithm_valid_raid5(mddev->new_layout)) ||
4863	    (mddev->new_level == 6
4864	     && !algorithm_valid_raid6(mddev->new_layout))) {
4865		printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4866		       mdname(mddev), mddev->new_layout);
4867		return ERR_PTR(-EIO);
4868	}
4869	if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4870		printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4871		       mdname(mddev), mddev->raid_disks);
4872		return ERR_PTR(-EINVAL);
4873	}
4874
4875	if (!mddev->new_chunk_sectors ||
4876	    (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4877	    !is_power_of_2(mddev->new_chunk_sectors)) {
4878		printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4879		       mdname(mddev), mddev->new_chunk_sectors << 9);
4880		return ERR_PTR(-EINVAL);
4881	}
4882
4883	conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
4884	if (conf == NULL)
4885		goto abort;
4886	spin_lock_init(&conf->device_lock);
4887	init_waitqueue_head(&conf->wait_for_stripe);
4888	init_waitqueue_head(&conf->wait_for_overlap);
4889	INIT_LIST_HEAD(&conf->handle_list);
4890	INIT_LIST_HEAD(&conf->hold_list);
4891	INIT_LIST_HEAD(&conf->delayed_list);
4892	INIT_LIST_HEAD(&conf->bitmap_list);
4893	INIT_LIST_HEAD(&conf->inactive_list);
4894	atomic_set(&conf->active_stripes, 0);
4895	atomic_set(&conf->preread_active_stripes, 0);
4896	atomic_set(&conf->active_aligned_reads, 0);
4897	conf->bypass_threshold = BYPASS_THRESHOLD;
4898	conf->recovery_disabled = mddev->recovery_disabled - 1;
4899
4900	conf->raid_disks = mddev->raid_disks;
4901	if (mddev->reshape_position == MaxSector)
4902		conf->previous_raid_disks = mddev->raid_disks;
4903	else
4904		conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4905	max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4906	conf->scribble_len = scribble_len(max_disks);
4907
4908	conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4909			      GFP_KERNEL);
4910	if (!conf->disks)
4911		goto abort;
4912
4913	conf->mddev = mddev;
4914
4915	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4916		goto abort;
4917
4918	conf->level = mddev->new_level;
4919	if (raid5_alloc_percpu(conf) != 0)
4920		goto abort;
4921
4922	pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4923
4924	rdev_for_each(rdev, mddev) {
4925		raid_disk = rdev->raid_disk;
4926		if (raid_disk >= max_disks
4927		    || raid_disk < 0)
4928			continue;
4929		disk = conf->disks + raid_disk;
4930
4931		if (test_bit(Replacement, &rdev->flags)) {
4932			if (disk->replacement)
4933				goto abort;
4934			disk->replacement = rdev;
4935		} else {
4936			if (disk->rdev)
4937				goto abort;
4938			disk->rdev = rdev;
4939		}
4940
4941		if (test_bit(In_sync, &rdev->flags)) {
4942			char b[BDEVNAME_SIZE];
4943			printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4944			       " disk %d\n",
4945			       mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4946		} else if (rdev->saved_raid_disk != raid_disk)
4947			/* Cannot rely on bitmap to complete recovery */
4948			conf->fullsync = 1;
4949	}
4950
4951	conf->chunk_sectors = mddev->new_chunk_sectors;
4952	conf->level = mddev->new_level;
4953	if (conf->level == 6)
4954		conf->max_degraded = 2;
4955	else
4956		conf->max_degraded = 1;
4957	conf->algorithm = mddev->new_layout;
4958	conf->max_nr_stripes = NR_STRIPES;
4959	conf->reshape_progress = mddev->reshape_position;
4960	if (conf->reshape_progress != MaxSector) {
4961		conf->prev_chunk_sectors = mddev->chunk_sectors;
4962		conf->prev_algo = mddev->layout;
4963	}
4964
4965	memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4966		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4967	if (grow_stripes(conf, conf->max_nr_stripes)) {
4968		printk(KERN_ERR
4969		       "md/raid:%s: couldn't allocate %dkB for buffers\n",
4970		       mdname(mddev), memory);
4971		goto abort;
4972	} else
4973		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4974		       mdname(mddev), memory);
4975
4976	sprintf(pers_name, "raid%d", mddev->new_level);
4977	conf->thread = md_register_thread(raid5d, mddev, pers_name);
4978	if (!conf->thread) {
4979		printk(KERN_ERR
4980		       "md/raid:%s: couldn't allocate thread.\n",
4981		       mdname(mddev));
4982		goto abort;
4983	}
4984
4985	return conf;
4986
4987 abort:
4988	if (conf) {
4989		free_conf(conf);
4990		return ERR_PTR(-EIO);
4991	} else
4992		return ERR_PTR(-ENOMEM);
4993}
4994
4995
4996static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4997{
4998	switch (algo) {
4999	case ALGORITHM_PARITY_0:
5000		if (raid_disk < max_degraded)
5001			return 1;
5002		break;
5003	case ALGORITHM_PARITY_N:
5004		if (raid_disk >= raid_disks - max_degraded)
5005			return 1;
5006		break;
5007	case ALGORITHM_PARITY_0_6:
5008		if (raid_disk == 0 || 
5009		    raid_disk == raid_disks - 1)
5010			return 1;
5011		break;
5012	case ALGORITHM_LEFT_ASYMMETRIC_6:
5013	case ALGORITHM_RIGHT_ASYMMETRIC_6:
5014	case ALGORITHM_LEFT_SYMMETRIC_6:
5015	case ALGORITHM_RIGHT_SYMMETRIC_6:
5016		if (raid_disk == raid_disks - 1)
5017			return 1;
5018	}
5019	return 0;
5020}
5021
5022static int run(struct mddev *mddev)
5023{
5024	struct r5conf *conf;
5025	int working_disks = 0;
5026	int dirty_parity_disks = 0;
5027	struct md_rdev *rdev;
5028	sector_t reshape_offset = 0;
5029	int i;
5030	long long min_offset_diff = 0;
5031	int first = 1;
5032
5033	if (mddev->recovery_cp != MaxSector)
5034		printk(KERN_NOTICE "md/raid:%s: not clean"
5035		       " -- starting background reconstruction\n",
5036		       mdname(mddev));
5037
5038	rdev_for_each(rdev, mddev) {
5039		long long diff;
5040		if (rdev->raid_disk < 0)
5041			continue;
5042		diff = (rdev->new_data_offset - rdev->data_offset);
5043		if (first) {
5044			min_offset_diff = diff;
5045			first = 0;
5046		} else if (mddev->reshape_backwards &&
5047			 diff < min_offset_diff)
5048			min_offset_diff = diff;
5049		else if (!mddev->reshape_backwards &&
5050			 diff > min_offset_diff)
5051			min_offset_diff = diff;
5052	}
5053
5054	if (mddev->reshape_position != MaxSector) {
5055		/* Check that we can continue the reshape.
5056		 * Difficulties arise if the stripe we would write to
5057		 * next is at or after the stripe we would read from next.
5058		 * For a reshape that changes the number of devices, this
5059		 * is only possible for a very short time, and mdadm makes
5060		 * sure that time appears to have past before assembling
5061		 * the array.  So we fail if that time hasn't passed.
5062		 * For a reshape that keeps the number of devices the same
5063		 * mdadm must be monitoring the reshape can keeping the
5064		 * critical areas read-only and backed up.  It will start
5065		 * the array in read-only mode, so we check for that.
5066		 */
5067		sector_t here_new, here_old;
5068		int old_disks;
5069		int max_degraded = (mddev->level == 6 ? 2 : 1);
5070
5071		if (mddev->new_level != mddev->level) {
5072			printk(KERN_ERR "md/raid:%s: unsupported reshape "
5073			       "required - aborting.\n",
5074			       mdname(mddev));
5075			return -EINVAL;
5076		}
5077		old_disks = mddev->raid_disks - mddev->delta_disks;
5078		/* reshape_position must be on a new-stripe boundary, and one
5079		 * further up in new geometry must map after here in old
5080		 * geometry.
5081		 */
5082		here_new = mddev->reshape_position;
5083		if (sector_div(here_new, mddev->new_chunk_sectors *
5084			       (mddev->raid_disks - max_degraded))) {
5085			printk(KERN_ERR "md/raid:%s: reshape_position not "
5086			       "on a stripe boundary\n", mdname(mddev));
5087			return -EINVAL;
5088		}
5089		reshape_offset = here_new * mddev->new_chunk_sectors;
5090		/* here_new is the stripe we will write to */
5091		here_old = mddev->reshape_position;
5092		sector_div(here_old, mddev->chunk_sectors *
5093			   (old_disks-max_degraded));
5094		/* here_old is the first stripe that we might need to read
5095		 * from */
5096		if (mddev->delta_disks == 0) {
5097			if ((here_new * mddev->new_chunk_sectors !=
5098			     here_old * mddev->chunk_sectors)) {
5099				printk(KERN_ERR "md/raid:%s: reshape position is"
5100				       " confused - aborting\n", mdname(mddev));
5101				return -EINVAL;
5102			}
5103			/* We cannot be sure it is safe to start an in-place
5104			 * reshape.  It is only safe if user-space is monitoring
5105			 * and taking constant backups.
5106			 * mdadm always starts a situation like this in
5107			 * readonly mode so it can take control before
5108			 * allowing any writes.  So just check for that.
5109			 */
5110			if (abs(min_offset_diff) >= mddev->chunk_sectors &&
5111			    abs(min_offset_diff) >= mddev->new_chunk_sectors)
5112				/* not really in-place - so OK */;
5113			else if (mddev->ro == 0) {
5114				printk(KERN_ERR "md/raid:%s: in-place reshape "
5115				       "must be started in read-only mode "
5116				       "- aborting\n",
5117				       mdname(mddev));
5118				return -EINVAL;
5119			}
5120		} else if (mddev->reshape_backwards
5121		    ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
5122		       here_old * mddev->chunk_sectors)
5123		    : (here_new * mddev->new_chunk_sectors >=
5124		       here_old * mddev->chunk_sectors + (-min_offset_diff))) {
5125			/* Reading from the same stripe as writing to - bad */
5126			printk(KERN_ERR "md/raid:%s: reshape_position too early for "
5127			       "auto-recovery - aborting.\n",
5128			       mdname(mddev));
5129			return -EINVAL;
5130		}
5131		printk(KERN_INFO "md/raid:%s: reshape will continue\n",
5132		       mdname(mddev));
5133		/* OK, we should be able to continue; */
5134	} else {
5135		BUG_ON(mddev->level != mddev->new_level);
5136		BUG_ON(mddev->layout != mddev->new_layout);
5137		BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
5138		BUG_ON(mddev->delta_disks != 0);
5139	}
5140
5141	if (mddev->private == NULL)
5142		conf = setup_conf(mddev);
5143	else
5144		conf = mddev->private;
5145
5146	if (IS_ERR(conf))
5147		return PTR_ERR(conf);
5148
5149	conf->min_offset_diff = min_offset_diff;
5150	mddev->thread = conf->thread;
5151	conf->thread = NULL;
5152	mddev->private = conf;
5153
5154	for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
5155	     i++) {
5156		rdev = conf->disks[i].rdev;
5157		if (!rdev && conf->disks[i].replacement) {
5158			/* The replacement is all we have yet */
5159			rdev = conf->disks[i].replacement;
5160			conf->disks[i].replacement = NULL;
5161			clear_bit(Replacement, &rdev->flags);
5162			conf->disks[i].rdev = rdev;
5163		}
5164		if (!rdev)
5165			continue;
5166		if (conf->disks[i].replacement &&
5167		    conf->reshape_progress != MaxSector) {
5168			/* replacements and reshape simply do not mix. */
5169			printk(KERN_ERR "md: cannot handle concurrent "
5170			       "replacement and reshape.\n");
5171			goto abort;
5172		}
5173		if (test_bit(In_sync, &rdev->flags)) {
5174			working_disks++;
5175			continue;
5176		}
5177		/* This disc is not fully in-sync.  However if it
5178		 * just stored parity (beyond the recovery_offset),
5179		 * when we don't need to be concerned about the
5180		 * array being dirty.
5181		 * When reshape goes 'backwards', we never have
5182		 * partially completed devices, so we only need
5183		 * to worry about reshape going forwards.
5184		 */
5185		/* Hack because v0.91 doesn't store recovery_offset properly. */
5186		if (mddev->major_version == 0 &&
5187		    mddev->minor_version > 90)
5188			rdev->recovery_offset = reshape_offset;
5189			
5190		if (rdev->recovery_offset < reshape_offset) {
5191			/* We need to check old and new layout */
5192			if (!only_parity(rdev->raid_disk,
5193					 conf->algorithm,
5194					 conf->raid_disks,
5195					 conf->max_degraded))
5196				continue;
5197		}
5198		if (!only_parity(rdev->raid_disk,
5199				 conf->prev_algo,
5200				 conf->previous_raid_disks,
5201				 conf->max_degraded))
5202			continue;
5203		dirty_parity_disks++;
5204	}
5205
5206	/*
5207	 * 0 for a fully functional array, 1 or 2 for a degraded array.
5208	 */
5209	mddev->degraded = calc_degraded(conf);
5210
5211	if (has_failed(conf)) {
5212		printk(KERN_ERR "md/raid:%s: not enough operational devices"
5213			" (%d/%d failed)\n",
5214			mdname(mddev), mddev->degraded, conf->raid_disks);
5215		goto abort;
5216	}
5217
5218	/* device size must be a multiple of chunk size */
5219	mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
5220	mddev->resync_max_sectors = mddev->dev_sectors;
5221
5222	if (mddev->degraded > dirty_parity_disks &&
5223	    mddev->recovery_cp != MaxSector) {
5224		if (mddev->ok_start_degraded)
5225			printk(KERN_WARNING
5226			       "md/raid:%s: starting dirty degraded array"
5227			       " - data corruption possible.\n",
5228			       mdname(mddev));
5229		else {
5230			printk(KERN_ERR
5231			       "md/raid:%s: cannot start dirty degraded array.\n",
5232			       mdname(mddev));
5233			goto abort;
5234		}
5235	}
5236
5237	if (mddev->degraded == 0)
5238		printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
5239		       " devices, algorithm %d\n", mdname(mddev), conf->level,
5240		       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
5241		       mddev->new_layout);
5242	else
5243		printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
5244		       " out of %d devices, algorithm %d\n",
5245		       mdname(mddev), conf->level,
5246		       mddev->raid_disks - mddev->degraded,
5247		       mddev->raid_disks, mddev->new_layout);
5248
5249	print_raid5_conf(conf);
5250
5251	if (conf->reshape_progress != MaxSector) {
5252		conf->reshape_safe = conf->reshape_progress;
5253		atomic_set(&conf->reshape_stripes, 0);
5254		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5255		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5256		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5257		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5258		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5259							"reshape");
5260	}
5261
5262
5263	/* Ok, everything is just fine now */
5264	if (mddev->to_remove == &raid5_attrs_group)
5265		mddev->to_remove = NULL;
5266	else if (mddev->kobj.sd &&
5267	    sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
5268		printk(KERN_WARNING
5269		       "raid5: failed to create sysfs attributes for %s\n",
5270		       mdname(mddev));
5271	md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5272
5273	if (mddev->queue) {
5274		int chunk_size;
5275		/* read-ahead size must cover two whole stripes, which
5276		 * is 2 * (datadisks) * chunksize where 'n' is the
5277		 * number of raid devices
5278		 */
5279		int data_disks = conf->previous_raid_disks - conf->max_degraded;
5280		int stripe = data_disks *
5281			((mddev->chunk_sectors << 9) / PAGE_SIZE);
5282		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5283			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5284
5285		blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
5286
5287		mddev->queue->backing_dev_info.congested_data = mddev;
5288		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5289
5290		chunk_size = mddev->chunk_sectors << 9;
5291		blk_queue_io_min(mddev->queue, chunk_size);
5292		blk_queue_io_opt(mddev->queue, chunk_size *
5293				 (conf->raid_disks - conf->max_degraded));
5294
5295		rdev_for_each(rdev, mddev) {
5296			disk_stack_limits(mddev->gendisk, rdev->bdev,
5297					  rdev->data_offset << 9);
5298			disk_stack_limits(mddev->gendisk, rdev->bdev,
5299					  rdev->new_data_offset << 9);
5300		}
5301	}
5302
5303	return 0;
5304abort:
5305	md_unregister_thread(&mddev->thread);
5306	print_raid5_conf(conf);
5307	free_conf(conf);
5308	mddev->private = NULL;
5309	printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
5310	return -EIO;
5311}
5312
5313static int stop(struct mddev *mddev)
5314{
5315	struct r5conf *conf = mddev->private;
5316
5317	md_unregister_thread(&mddev->thread);
5318	if (mddev->queue)
5319		mddev->queue->backing_dev_info.congested_fn = NULL;
5320	free_conf(conf);
5321	mddev->private = NULL;
5322	mddev->to_remove = &raid5_attrs_group;
5323	return 0;
5324}
5325
5326static void status(struct seq_file *seq, struct mddev *mddev)
5327{
5328	struct r5conf *conf = mddev->private;
5329	int i;
5330
5331	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5332		mddev->chunk_sectors / 2, mddev->layout);
5333	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5334	for (i = 0; i < conf->raid_disks; i++)
5335		seq_printf (seq, "%s",
5336			       conf->disks[i].rdev &&
5337			       test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5338	seq_printf (seq, "]");
5339}
5340
5341static void print_raid5_conf (struct r5conf *conf)
5342{
5343	int i;
5344	struct disk_info *tmp;
5345
5346	printk(KERN_DEBUG "RAID conf printout:\n");
5347	if (!conf) {
5348		printk("(conf==NULL)\n");
5349		return;
5350	}
5351	printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5352	       conf->raid_disks,
5353	       conf->raid_disks - conf->mddev->degraded);
5354
5355	for (i = 0; i < conf->raid_disks; i++) {
5356		char b[BDEVNAME_SIZE];
5357		tmp = conf->disks + i;
5358		if (tmp->rdev)
5359			printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5360			       i, !test_bit(Faulty, &tmp->rdev->flags),
5361			       bdevname(tmp->rdev->bdev, b));
5362	}
5363}
5364
5365static int raid5_spare_active(struct mddev *mddev)
5366{
5367	int i;
5368	struct r5conf *conf = mddev->private;
5369	struct disk_info *tmp;
5370	int count = 0;
5371	unsigned long flags;
5372
5373	for (i = 0; i < conf->raid_disks; i++) {
5374		tmp = conf->disks + i;
5375		if (tmp->replacement
5376		    && tmp->replacement->recovery_offset == MaxSector
5377		    && !test_bit(Faulty, &tmp->replacement->flags)
5378		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
5379			/* Replacement has just become active. */
5380			if (!tmp->rdev
5381			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
5382				count++;
5383			if (tmp->rdev) {
5384				/* Replaced device not technically faulty,
5385				 * but we need to be sure it gets removed
5386				 * and never re-added.
5387				 */
5388				set_bit(Faulty, &tmp->rdev->flags);
5389				sysfs_notify_dirent_safe(
5390					tmp->rdev->sysfs_state);
5391			}
5392			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
5393		} else if (tmp->rdev
5394		    && tmp->rdev->recovery_offset == MaxSector
5395		    && !test_bit(Faulty, &tmp->rdev->flags)
5396		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5397			count++;
5398			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
5399		}
5400	}
5401	spin_lock_irqsave(&conf->device_lock, flags);
5402	mddev->degraded = calc_degraded(conf);
5403	spin_unlock_irqrestore(&conf->device_lock, flags);
5404	print_raid5_conf(conf);
5405	return count;
5406}
5407
5408static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
5409{
5410	struct r5conf *conf = mddev->private;
5411	int err = 0;
5412	int number = rdev->raid_disk;
5413	struct md_rdev **rdevp;
5414	struct disk_info *p = conf->disks + number;
5415
5416	print_raid5_conf(conf);
5417	if (rdev == p->rdev)
5418		rdevp = &p->rdev;
5419	else if (rdev == p->replacement)
5420		rdevp = &p->replacement;
5421	else
5422		return 0;
5423
5424	if (number >= conf->raid_disks &&
5425	    conf->reshape_progress == MaxSector)
5426		clear_bit(In_sync, &rdev->flags);
5427
5428	if (test_bit(In_sync, &rdev->flags) ||
5429	    atomic_read(&rdev->nr_pending)) {
5430		err = -EBUSY;
5431		goto abort;
5432	}
5433	/* Only remove non-faulty devices if recovery
5434	 * isn't possible.
5435	 */
5436	if (!test_bit(Faulty, &rdev->flags) &&
5437	    mddev->recovery_disabled != conf->recovery_disabled &&
5438	    !has_failed(conf) &&
5439	    (!p->replacement || p->replacement == rdev) &&
5440	    number < conf->raid_disks) {
5441		err = -EBUSY;
5442		goto abort;
5443	}
5444	*rdevp = NULL;
5445	synchronize_rcu();
5446	if (atomic_read(&rdev->nr_pending)) {
5447		/* lost the race, try later */
5448		err = -EBUSY;
5449		*rdevp = rdev;
5450	} else if (p->replacement) {
5451		/* We must have just cleared 'rdev' */
5452		p->rdev = p->replacement;
5453		clear_bit(Replacement, &p->replacement->flags);
5454		smp_mb(); /* Make sure other CPUs may see both as identical
5455			   * but will never see neither - if they are careful
5456			   */
5457		p->replacement = NULL;
5458		clear_bit(WantReplacement, &rdev->flags);
5459	} else
5460		/* We might have just removed the Replacement as faulty-
5461		 * clear the bit just in case
5462		 */
5463		clear_bit(WantReplacement, &rdev->flags);
5464abort:
5465
5466	print_raid5_conf(conf);
5467	return err;
5468}
5469
5470static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
5471{
5472	struct r5conf *conf = mddev->private;
5473	int err = -EEXIST;
5474	int disk;
5475	struct disk_info *p;
5476	int first = 0;
5477	int last = conf->raid_disks - 1;
5478
5479	if (mddev->recovery_disabled == conf->recovery_disabled)
5480		return -EBUSY;
5481
5482	if (rdev->saved_raid_disk < 0 && has_failed(conf))
5483		/* no point adding a device */
5484		return -EINVAL;
5485
5486	if (rdev->raid_disk >= 0)
5487		first = last = rdev->raid_disk;
5488
5489	/*
5490	 * find the disk ... but prefer rdev->saved_raid_disk
5491	 * if possible.
5492	 */
5493	if (rdev->saved_raid_disk >= 0 &&
5494	    rdev->saved_raid_disk >= first &&
5495	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
5496		first = rdev->saved_raid_disk;
5497
5498	for (disk = first; disk <= last; disk++) {
5499		p = conf->disks + disk;
5500		if (p->rdev == NULL) {
5501			clear_bit(In_sync, &rdev->flags);
5502			rdev->raid_disk = disk;
5503			err = 0;
5504			if (rdev->saved_raid_disk != disk)
5505				conf->fullsync = 1;
5506			rcu_assign_pointer(p->rdev, rdev);
5507			goto out;
5508		}
5509	}
5510	for (disk = first; disk <= last; disk++) {
5511		p = conf->disks + disk;
5512		if (test_bit(WantReplacement, &p->rdev->flags) &&
5513		    p->replacement == NULL) {
5514			clear_bit(In_sync, &rdev->flags);
5515			set_bit(Replacement, &rdev->flags);
5516			rdev->raid_disk = disk;
5517			err = 0;
5518			conf->fullsync = 1;
5519			rcu_assign_pointer(p->replacement, rdev);
5520			break;
5521		}
5522	}
5523out:
5524	print_raid5_conf(conf);
5525	return err;
5526}
5527
5528static int raid5_resize(struct mddev *mddev, sector_t sectors)
5529{
5530	/* no resync is happening, and there is enough space
5531	 * on all devices, so we can resize.
5532	 * We need to make sure resync covers any new space.
5533	 * If the array is shrinking we should possibly wait until
5534	 * any io in the removed space completes, but it hardly seems
5535	 * worth it.
5536	 */
5537	sector_t newsize;
5538	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5539	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
5540	if (mddev->external_size &&
5541	    mddev->array_sectors > newsize)
5542		return -EINVAL;
5543	if (mddev->bitmap) {
5544		int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
5545		if (ret)
5546			return ret;
5547	}
5548	md_set_array_sectors(mddev, newsize);
5549	set_capacity(mddev->gendisk, mddev->array_sectors);
5550	revalidate_disk(mddev->gendisk);
5551	if (sectors > mddev->dev_sectors &&
5552	    mddev->recovery_cp > mddev->dev_sectors) {
5553		mddev->recovery_cp = mddev->dev_sectors;
5554		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5555	}
5556	mddev->dev_sectors = sectors;
5557	mddev->resync_max_sectors = sectors;
5558	return 0;
5559}
5560
5561static int check_stripe_cache(struct mddev *mddev)
5562{
5563	/* Can only proceed if there are plenty of stripe_heads.
5564	 * We need a minimum of one full stripe,, and for sensible progress
5565	 * it is best to have about 4 times that.
5566	 * If we require 4 times, then the default 256 4K stripe_heads will
5567	 * allow for chunk sizes up to 256K, which is probably OK.
5568	 * If the chunk size is greater, user-space should request more
5569	 * stripe_heads first.
5570	 */
5571	struct r5conf *conf = mddev->private;
5572	if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5573	    > conf->max_nr_stripes ||
5574	    ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5575	    > conf->max_nr_stripes) {
5576		printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
5577		       mdname(mddev),
5578		       ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5579			/ STRIPE_SIZE)*4);
5580		return 0;
5581	}
5582	return 1;
5583}
5584
5585static int check_reshape(struct mddev *mddev)
5586{
5587	struct r5conf *conf = mddev->private;
5588
5589	if (mddev->delta_disks == 0 &&
5590	    mddev->new_layout == mddev->layout &&
5591	    mddev->new_chunk_sectors == mddev->chunk_sectors)
5592		return 0; /* nothing to do */
5593	if (has_failed(conf))
5594		return -EINVAL;
5595	if (mddev->delta_disks < 0) {
5596		/* We might be able to shrink, but the devices must
5597		 * be made bigger first.
5598		 * For raid6, 4 is the minimum size.
5599		 * Otherwise 2 is the minimum
5600		 */
5601		int min = 2;
5602		if (mddev->level == 6)
5603			min = 4;
5604		if (mddev->raid_disks + mddev->delta_disks < min)
5605			return -EINVAL;
5606	}
5607
5608	if (!check_stripe_cache(mddev))
5609		return -ENOSPC;
5610
5611	return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
5612}
5613
5614static int raid5_start_reshape(struct mddev *mddev)
5615{
5616	struct r5conf *conf = mddev->private;
5617	struct md_rdev *rdev;
5618	int spares = 0;
5619	unsigned long flags;
5620
5621	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5622		return -EBUSY;
5623
5624	if (!check_stripe_cache(mddev))
5625		return -ENOSPC;
5626
5627	if (has_failed(conf))
5628		return -EINVAL;
5629
5630	rdev_for_each(rdev, mddev) {
5631		if (!test_bit(In_sync, &rdev->flags)
5632		    && !test_bit(Faulty, &rdev->flags))
5633			spares++;
5634	}
5635
5636	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5637		/* Not enough devices even to make a degraded array
5638		 * of that size
5639		 */
5640		return -EINVAL;
5641
5642	/* Refuse to reduce size of the array.  Any reductions in
5643	 * array size must be through explicit setting of array_size
5644	 * attribute.
5645	 */
5646	if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5647	    < mddev->array_sectors) {
5648		printk(KERN_ERR "md/raid:%s: array size must be reduced "
5649		       "before number of disks\n", mdname(mddev));
5650		return -EINVAL;
5651	}
5652
5653	atomic_set(&conf->reshape_stripes, 0);
5654	spin_lock_irq(&conf->device_lock);
5655	conf->previous_raid_disks = conf->raid_disks;
5656	conf->raid_disks += mddev->delta_disks;
5657	conf->prev_chunk_sectors = conf->chunk_sectors;
5658	conf->chunk_sectors = mddev->new_chunk_sectors;
5659	conf->prev_algo = conf->algorithm;
5660	conf->algorithm = mddev->new_layout;
5661	conf->generation++;
5662	/* Code that selects data_offset needs to see the generation update
5663	 * if reshape_progress has been set - so a memory barrier needed.
5664	 */
5665	smp_mb();
5666	if (mddev->reshape_backwards)
5667		conf->reshape_progress = raid5_size(mddev, 0, 0);
5668	else
5669		conf->reshape_progress = 0;
5670	conf->reshape_safe = conf->reshape_progress;
5671	spin_unlock_irq(&conf->device_lock);
5672
5673	/* Add some new drives, as many as will fit.
5674	 * We know there are enough to make the newly sized array work.
5675	 * Don't add devices if we are reducing the number of
5676	 * devices in the array.  This is because it is not possible
5677	 * to correctly record the "partially reconstructed" state of
5678	 * such devices during the reshape and confusion could result.
5679	 */
5680	if (mddev->delta_disks >= 0) {
5681		rdev_for_each(rdev, mddev)
5682			if (rdev->raid_disk < 0 &&
5683			    !test_bit(Faulty, &rdev->flags)) {
5684				if (raid5_add_disk(mddev, rdev) == 0) {
5685					if (rdev->raid_disk
5686					    >= conf->previous_raid_disks)
5687						set_bit(In_sync, &rdev->flags);
5688					else
5689						rdev->recovery_offset = 0;
5690
5691					if (sysfs_link_rdev(mddev, rdev))
5692						/* Failure here is OK */;
5693				}
5694			} else if (rdev->raid_disk >= conf->previous_raid_disks
5695				   && !test_bit(Faulty, &rdev->flags)) {
5696				/* This is a spare that was manually added */
5697				set_bit(In_sync, &rdev->flags);
5698			}
5699
5700		/* When a reshape changes the number of devices,
5701		 * ->degraded is measured against the larger of the
5702		 * pre and post number of devices.
5703		 */
5704		spin_lock_irqsave(&conf->device_lock, flags);
5705		mddev->degraded = calc_degraded(conf);
5706		spin_unlock_irqrestore(&conf->device_lock, flags);
5707	}
5708	mddev->raid_disks = conf->raid_disks;
5709	mddev->reshape_position = conf->reshape_progress;
5710	set_bit(MD_CHANGE_DEVS, &mddev->flags);
5711
5712	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5713	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5714	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5715	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5716	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5717						"reshape");
5718	if (!mddev->sync_thread) {
5719		mddev->recovery = 0;
5720		spin_lock_irq(&conf->device_lock);
5721		mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
5722		rdev_for_each(rdev, mddev)
5723			rdev->new_data_offset = rdev->data_offset;
5724		smp_wmb();
5725		conf->reshape_progress = MaxSector;
5726		mddev->reshape_position = MaxSector;
5727		spin_unlock_irq(&conf->device_lock);
5728		return -EAGAIN;
5729	}
5730	conf->reshape_checkpoint = jiffies;
5731	md_wakeup_thread(mddev->sync_thread);
5732	md_new_event(mddev);
5733	return 0;
5734}
5735
5736/* This is called from the reshape thread and should make any
5737 * changes needed in 'conf'
5738 */
5739static void end_reshape(struct r5conf *conf)
5740{
5741
5742	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
5743		struct md_rdev *rdev;
5744
5745		spin_lock_irq(&conf->device_lock);
5746		conf->previous_raid_disks = conf->raid_disks;
5747		rdev_for_each(rdev, conf->mddev)
5748			rdev->data_offset = rdev->new_data_offset;
5749		smp_wmb();
5750		conf->reshape_progress = MaxSector;
5751		spin_unlock_irq(&conf->device_lock);
5752		wake_up(&conf->wait_for_overlap);
5753
5754		/* read-ahead size must cover two whole stripes, which is
5755		 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5756		 */
5757		if (conf->mddev->queue) {
5758			int data_disks = conf->raid_disks - conf->max_degraded;
5759			int stripe = data_disks * ((conf->chunk_sectors << 9)
5760						   / PAGE_SIZE);
5761			if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5762				conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5763		}
5764	}
5765}
5766
5767/* This is called from the raid5d thread with mddev_lock held.
5768 * It makes config changes to the device.
5769 */
5770static void raid5_finish_reshape(struct mddev *mddev)
5771{
5772	struct r5conf *conf = mddev->private;
5773
5774	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5775
5776		if (mddev->delta_disks > 0) {
5777			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5778			set_capacity(mddev->gendisk, mddev->array_sectors);
5779			revalidate_disk(mddev->gendisk);
5780		} else {
5781			int d;
5782			spin_lock_irq(&conf->device_lock);
5783			mddev->degraded = calc_degraded(conf);
5784			spin_unlock_irq(&conf->device_lock);
5785			for (d = conf->raid_disks ;
5786			     d < conf->raid_disks - mddev->delta_disks;
5787			     d++) {
5788				struct md_rdev *rdev = conf->disks[d].rdev;
5789				if (rdev)
5790					clear_bit(In_sync, &rdev->flags);
5791				rdev = conf->disks[d].replacement;
5792				if (rdev)
5793					clear_bit(In_sync, &rdev->flags);
5794			}
5795		}
5796		mddev->layout = conf->algorithm;
5797		mddev->chunk_sectors = conf->chunk_sectors;
5798		mddev->reshape_position = MaxSector;
5799		mddev->delta_disks = 0;
5800		mddev->reshape_backwards = 0;
5801	}
5802}
5803
5804static void raid5_quiesce(struct mddev *mddev, int state)
5805{
5806	struct r5conf *conf = mddev->private;
5807
5808	switch(state) {
5809	case 2: /* resume for a suspend */
5810		wake_up(&conf->wait_for_overlap);
5811		break;
5812
5813	case 1: /* stop all writes */
5814		spin_lock_irq(&conf->device_lock);
5815		/* '2' tells resync/reshape to pause so that all
5816		 * active stripes can drain
5817		 */
5818		conf->quiesce = 2;
5819		wait_event_lock_irq(conf->wait_for_stripe,
5820				    atomic_read(&conf->active_stripes) == 0 &&
5821				    atomic_read(&conf->active_aligned_reads) == 0,
5822				    conf->device_lock, /* nothing */);
5823		conf->quiesce = 1;
5824		spin_unlock_irq(&conf->device_lock);
5825		/* allow reshape to continue */
5826		wake_up(&conf->wait_for_overlap);
5827		break;
5828
5829	case 0: /* re-enable writes */
5830		spin_lock_irq(&conf->device_lock);
5831		conf->quiesce = 0;
5832		wake_up(&conf->wait_for_stripe);
5833		wake_up(&conf->wait_for_overlap);
5834		spin_unlock_irq(&conf->device_lock);
5835		break;
5836	}
5837}
5838
5839
5840static void *raid45_takeover_raid0(struct mddev *mddev, int level)
5841{
5842	struct r0conf *raid0_conf = mddev->private;
5843	sector_t sectors;
5844
5845	/* for raid0 takeover only one zone is supported */
5846	if (raid0_conf->nr_strip_zones > 1) {
5847		printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5848		       mdname(mddev));
5849		return ERR_PTR(-EINVAL);
5850	}
5851
5852	sectors = raid0_conf->strip_zone[0].zone_end;
5853	sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
5854	mddev->dev_sectors = sectors;
5855	mddev->new_level = level;
5856	mddev->new_layout = ALGORITHM_PARITY_N;
5857	mddev->new_chunk_sectors = mddev->chunk_sectors;
5858	mddev->raid_disks += 1;
5859	mddev->delta_disks = 1;
5860	/* make sure it will be not marked as dirty */
5861	mddev->recovery_cp = MaxSector;
5862
5863	return setup_conf(mddev);
5864}
5865
5866
5867static void *raid5_takeover_raid1(struct mddev *mddev)
5868{
5869	int chunksect;
5870
5871	if (mddev->raid_disks != 2 ||
5872	    mddev->degraded > 1)
5873		return ERR_PTR(-EINVAL);
5874
5875	/* Should check if there are write-behind devices? */
5876
5877	chunksect = 64*2; /* 64K by default */
5878
5879	/* The array must be an exact multiple of chunksize */
5880	while (chunksect && (mddev->array_sectors & (chunksect-1)))
5881		chunksect >>= 1;
5882
5883	if ((chunksect<<9) < STRIPE_SIZE)
5884		/* array size does not allow a suitable chunk size */
5885		return ERR_PTR(-EINVAL);
5886
5887	mddev->new_level = 5;
5888	mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5889	mddev->new_chunk_sectors = chunksect;
5890
5891	return setup_conf(mddev);
5892}
5893
5894static void *raid5_takeover_raid6(struct mddev *mddev)
5895{
5896	int new_layout;
5897
5898	switch (mddev->layout) {
5899	case ALGORITHM_LEFT_ASYMMETRIC_6:
5900		new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5901		break;
5902	case ALGORITHM_RIGHT_ASYMMETRIC_6:
5903		new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5904		break;
5905	case ALGORITHM_LEFT_SYMMETRIC_6:
5906		new_layout = ALGORITHM_LEFT_SYMMETRIC;
5907		break;
5908	case ALGORITHM_RIGHT_SYMMETRIC_6:
5909		new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5910		break;
5911	case ALGORITHM_PARITY_0_6:
5912		new_layout = ALGORITHM_PARITY_0;
5913		break;
5914	case ALGORITHM_PARITY_N:
5915		new_layout = ALGORITHM_PARITY_N;
5916		break;
5917	default:
5918		return ERR_PTR(-EINVAL);
5919	}
5920	mddev->new_level = 5;
5921	mddev->new_layout = new_layout;
5922	mddev->delta_disks = -1;
5923	mddev->raid_disks -= 1;
5924	return setup_conf(mddev);
5925}
5926
5927
5928static int raid5_check_reshape(struct mddev *mddev)
5929{
5930	/* For a 2-drive array, the layout and chunk size can be changed
5931	 * immediately as not restriping is needed.
5932	 * For larger arrays we record the new value - after validation
5933	 * to be used by a reshape pass.
5934	 */
5935	struct r5conf *conf = mddev->private;
5936	int new_chunk = mddev->new_chunk_sectors;
5937
5938	if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5939		return -EINVAL;
5940	if (new_chunk > 0) {
5941		if (!is_power_of_2(new_chunk))
5942			return -EINVAL;
5943		if (new_chunk < (PAGE_SIZE>>9))
5944			return -EINVAL;
5945		if (mddev->array_sectors & (new_chunk-1))
5946			/* not factor of array size */
5947			return -EINVAL;
5948	}
5949
5950	/* They look valid */
5951
5952	if (mddev->raid_disks == 2) {
5953		/* can make the change immediately */
5954		if (mddev->new_layout >= 0) {
5955			conf->algorithm = mddev->new_layout;
5956			mddev->layout = mddev->new_layout;
5957		}
5958		if (new_chunk > 0) {
5959			conf->chunk_sectors = new_chunk ;
5960			mddev->chunk_sectors = new_chunk;
5961		}
5962		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5963		md_wakeup_thread(mddev->thread);
5964	}
5965	return check_reshape(mddev);
5966}
5967
5968static int raid6_check_reshape(struct mddev *mddev)
5969{
5970	int new_chunk = mddev->new_chunk_sectors;
5971
5972	if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5973		return -EINVAL;
5974	if (new_chunk > 0) {
5975		if (!is_power_of_2(new_chunk))
5976			return -EINVAL;
5977		if (new_chunk < (PAGE_SIZE >> 9))
5978			return -EINVAL;
5979		if (mddev->array_sectors & (new_chunk-1))
5980			/* not factor of array size */
5981			return -EINVAL;
5982	}
5983
5984	/* They look valid */
5985	return check_reshape(mddev);
5986}
5987
5988static void *raid5_takeover(struct mddev *mddev)
5989{
5990	/* raid5 can take over:
5991	 *  raid0 - if there is only one strip zone - make it a raid4 layout
5992	 *  raid1 - if there are two drives.  We need to know the chunk size
5993	 *  raid4 - trivial - just use a raid4 layout.
5994	 *  raid6 - Providing it is a *_6 layout
5995	 */
5996	if (mddev->level == 0)
5997		return raid45_takeover_raid0(mddev, 5);
5998	if (mddev->level == 1)
5999		return raid5_takeover_raid1(mddev);
6000	if (mddev->level == 4) {
6001		mddev->new_layout = ALGORITHM_PARITY_N;
6002		mddev->new_level = 5;
6003		return setup_conf(mddev);
6004	}
6005	if (mddev->level == 6)
6006		return raid5_takeover_raid6(mddev);
6007
6008	return ERR_PTR(-EINVAL);
6009}
6010
6011static void *raid4_takeover(struct mddev *mddev)
6012{
6013	/* raid4 can take over:
6014	 *  raid0 - if there is only one strip zone
6015	 *  raid5 - if layout is right
6016	 */
6017	if (mddev->level == 0)
6018		return raid45_takeover_raid0(mddev, 4);
6019	if (mddev->level == 5 &&
6020	    mddev->layout == ALGORITHM_PARITY_N) {
6021		mddev->new_layout = 0;
6022		mddev->new_level = 4;
6023		return setup_conf(mddev);
6024	}
6025	return ERR_PTR(-EINVAL);
6026}
6027
6028static struct md_personality raid5_personality;
6029
6030static void *raid6_takeover(struct mddev *mddev)
6031{
6032	/* Currently can only take over a raid5.  We map the
6033	 * personality to an equivalent raid6 personality
6034	 * with the Q block at the end.
6035	 */
6036	int new_layout;
6037
6038	if (mddev->pers != &raid5_personality)
6039		return ERR_PTR(-EINVAL);
6040	if (mddev->degraded > 1)
6041		return ERR_PTR(-EINVAL);
6042	if (mddev->raid_disks > 253)
6043		return ERR_PTR(-EINVAL);
6044	if (mddev->raid_disks < 3)
6045		return ERR_PTR(-EINVAL);
6046
6047	switch (mddev->layout) {
6048	case ALGORITHM_LEFT_ASYMMETRIC:
6049		new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
6050		break;
6051	case ALGORITHM_RIGHT_ASYMMETRIC:
6052		new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
6053		break;
6054	case ALGORITHM_LEFT_SYMMETRIC:
6055		new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
6056		break;
6057	case ALGORITHM_RIGHT_SYMMETRIC:
6058		new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
6059		break;
6060	case ALGORITHM_PARITY_0:
6061		new_layout = ALGORITHM_PARITY_0_6;
6062		break;
6063	case ALGORITHM_PARITY_N:
6064		new_layout = ALGORITHM_PARITY_N;
6065		break;
6066	default:
6067		return ERR_PTR(-EINVAL);
6068	}
6069	mddev->new_level = 6;
6070	mddev->new_layout = new_layout;
6071	mddev->delta_disks = 1;
6072	mddev->raid_disks += 1;
6073	return setup_conf(mddev);
6074}
6075
6076
6077static struct md_personality raid6_personality =
6078{
6079	.name		= "raid6",
6080	.level		= 6,
6081	.owner		= THIS_MODULE,
6082	.make_request	= make_request,
6083	.run		= run,
6084	.stop		= stop,
6085	.status		= status,
6086	.error_handler	= error,
6087	.hot_add_disk	= raid5_add_disk,
6088	.hot_remove_disk= raid5_remove_disk,
6089	.spare_active	= raid5_spare_active,
6090	.sync_request	= sync_request,
6091	.resize		= raid5_resize,
6092	.size		= raid5_size,
6093	.check_reshape	= raid6_check_reshape,
6094	.start_reshape  = raid5_start_reshape,
6095	.finish_reshape = raid5_finish_reshape,
6096	.quiesce	= raid5_quiesce,
6097	.takeover	= raid6_takeover,
6098};
6099static struct md_personality raid5_personality =
6100{
6101	.name		= "raid5",
6102	.level		= 5,
6103	.owner		= THIS_MODULE,
6104	.make_request	= make_request,
6105	.run		= run,
6106	.stop		= stop,
6107	.status		= status,
6108	.error_handler	= error,
6109	.hot_add_disk	= raid5_add_disk,
6110	.hot_remove_disk= raid5_remove_disk,
6111	.spare_active	= raid5_spare_active,
6112	.sync_request	= sync_request,
6113	.resize		= raid5_resize,
6114	.size		= raid5_size,
6115	.check_reshape	= raid5_check_reshape,
6116	.start_reshape  = raid5_start_reshape,
6117	.finish_reshape = raid5_finish_reshape,
6118	.quiesce	= raid5_quiesce,
6119	.takeover	= raid5_takeover,
6120};
6121
6122static struct md_personality raid4_personality =
6123{
6124	.name		= "raid4",
6125	.level		= 4,
6126	.owner		= THIS_MODULE,
6127	.make_request	= make_request,
6128	.run		= run,
6129	.stop		= stop,
6130	.status		= status,
6131	.error_handler	= error,
6132	.hot_add_disk	= raid5_add_disk,
6133	.hot_remove_disk= raid5_remove_disk,
6134	.spare_active	= raid5_spare_active,
6135	.sync_request	= sync_request,
6136	.resize		= raid5_resize,
6137	.size		= raid5_size,
6138	.check_reshape	= raid5_check_reshape,
6139	.start_reshape  = raid5_start_reshape,
6140	.finish_reshape = raid5_finish_reshape,
6141	.quiesce	= raid5_quiesce,
6142	.takeover	= raid4_takeover,
6143};
6144
6145static int __init raid5_init(void)
6146{
6147	register_md_personality(&raid6_personality);
6148	register_md_personality(&raid5_personality);
6149	register_md_personality(&raid4_personality);
6150	return 0;
6151}
6152
6153static void raid5_exit(void)
6154{
6155	unregister_md_personality(&raid6_personality);
6156	unregister_md_personality(&raid5_personality);
6157	unregister_md_personality(&raid4_personality);
6158}
6159
6160module_init(raid5_init);
6161module_exit(raid5_exit);
6162MODULE_LICENSE("GPL");
6163MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
6164MODULE_ALIAS("md-personality-4"); /* RAID5 */
6165MODULE_ALIAS("md-raid5");
6166MODULE_ALIAS("md-raid4");
6167MODULE_ALIAS("md-level-5");
6168MODULE_ALIAS("md-level-4");
6169MODULE_ALIAS("md-personality-8"); /* RAID6 */
6170MODULE_ALIAS("md-raid6");
6171MODULE_ALIAS("md-level-6");
6172
6173/* This used to be two separate modules, they were: */
6174MODULE_ALIAS("raid5");
6175MODULE_ALIAS("raid6");