Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * raid5.c : Multiple Devices driver for Linux
   3 *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   4 *	   Copyright (C) 1999, 2000 Ingo Molnar
   5 *	   Copyright (C) 2002, 2003 H. Peter Anvin
   6 *
   7 * RAID-4/5/6 management functions.
   8 * Thanks to Penguin Computing for making the RAID-6 development possible
   9 * by donating a test server!
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21/*
  22 * BITMAP UNPLUGGING:
  23 *
  24 * The sequencing for updating the bitmap reliably is a little
  25 * subtle (and I got it wrong the first time) so it deserves some
  26 * explanation.
  27 *
  28 * We group bitmap updates into batches.  Each batch has a number.
  29 * We may write out several batches at once, but that isn't very important.
  30 * conf->seq_write is the number of the last batch successfully written.
  31 * conf->seq_flush is the number of the last batch that was closed to
  32 *    new additions.
  33 * When we discover that we will need to write to any block in a stripe
  34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  35 * the number of the batch it will be in. This is seq_flush+1.
  36 * When we are ready to do a write, if that batch hasn't been written yet,
  37 *   we plug the array and queue the stripe for later.
  38 * When an unplug happens, we increment bm_flush, thus closing the current
  39 *   batch.
  40 * When we notice that bm_flush > bm_write, we write out all pending updates
  41 * to the bitmap, and advance bm_write to where bm_flush was.
  42 * This may occasionally write a bit out twice, but is sure never to
  43 * miss any bits.
  44 */
  45
  46#include <linux/blkdev.h>
  47#include <linux/kthread.h>
  48#include <linux/raid/pq.h>
  49#include <linux/async_tx.h>
  50#include <linux/module.h>
  51#include <linux/async.h>
  52#include <linux/seq_file.h>
  53#include <linux/cpu.h>
  54#include <linux/slab.h>
  55#include <linux/ratelimit.h>
  56#include <linux/nodemask.h>
  57#include <linux/flex_array.h>
  58#include <trace/events/block.h>
  59
  60#include "md.h"
  61#include "raid5.h"
  62#include "raid0.h"
  63#include "bitmap.h"
  64
  65#define cpu_to_group(cpu) cpu_to_node(cpu)
  66#define ANY_GROUP NUMA_NO_NODE
  67
  68static bool devices_handle_discard_safely = false;
  69module_param(devices_handle_discard_safely, bool, 0644);
  70MODULE_PARM_DESC(devices_handle_discard_safely,
  71		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
  72static struct workqueue_struct *raid5_wq;
  73/*
  74 * Stripe cache
  75 */
  76
  77#define NR_STRIPES		256
  78#define STRIPE_SIZE		PAGE_SIZE
  79#define STRIPE_SHIFT		(PAGE_SHIFT - 9)
  80#define STRIPE_SECTORS		(STRIPE_SIZE>>9)
  81#define	IO_THRESHOLD		1
  82#define BYPASS_THRESHOLD	1
  83#define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
  84#define HASH_MASK		(NR_HASH - 1)
  85#define MAX_STRIPE_BATCH	8
  86
  87static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
  88{
  89	int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
  90	return &conf->stripe_hashtbl[hash];
  91}
  92
  93static inline int stripe_hash_locks_hash(sector_t sect)
  94{
  95	return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
  96}
  97
  98static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
  99{
 100	spin_lock_irq(conf->hash_locks + hash);
 101	spin_lock(&conf->device_lock);
 102}
 103
 104static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
 105{
 106	spin_unlock(&conf->device_lock);
 107	spin_unlock_irq(conf->hash_locks + hash);
 108}
 109
 110static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
 111{
 112	int i;
 113	local_irq_disable();
 114	spin_lock(conf->hash_locks);
 115	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
 116		spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
 117	spin_lock(&conf->device_lock);
 118}
 119
 120static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 121{
 122	int i;
 123	spin_unlock(&conf->device_lock);
 124	for (i = NR_STRIPE_HASH_LOCKS; i; i--)
 125		spin_unlock(conf->hash_locks + i - 1);
 126	local_irq_enable();
 127}
 128
 129/* bio's attached to a stripe+device for I/O are linked together in bi_sector
 130 * order without overlap.  There may be several bio's per stripe+device, and
 131 * a bio could span several devices.
 132 * When walking this list for a particular stripe+device, we must never proceed
 133 * beyond a bio that extends past this device, as the next bio might no longer
 134 * be valid.
 135 * This function is used to determine the 'next' bio in the list, given the sector
 136 * of the current stripe+device
 137 */
 138static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
 139{
 140	int sectors = bio_sectors(bio);
 141	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
 142		return bio->bi_next;
 143	else
 144		return NULL;
 145}
 
 
 
 
 
 
 
 146
 147/*
 148 * We maintain a biased count of active stripes in the bottom 16 bits of
 149 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 150 */
 151static inline int raid5_bi_processed_stripes(struct bio *bio)
 152{
 153	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 154	return (atomic_read(segments) >> 16) & 0xffff;
 155}
 156
 157static inline int raid5_dec_bi_active_stripes(struct bio *bio)
 158{
 159	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 160	return atomic_sub_return(1, segments) & 0xffff;
 161}
 162
 163static inline void raid5_inc_bi_active_stripes(struct bio *bio)
 164{
 165	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 166	atomic_inc(segments);
 167}
 168
 169static inline void raid5_set_bi_processed_stripes(struct bio *bio,
 170	unsigned int cnt)
 171{
 172	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 173	int old, new;
 174
 175	do {
 176		old = atomic_read(segments);
 177		new = (old & 0xffff) | (cnt << 16);
 178	} while (atomic_cmpxchg(segments, old, new) != old);
 179}
 180
 181static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
 182{
 183	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
 184	atomic_set(segments, cnt);
 185}
 186
 187/* Find first data disk in a raid6 stripe */
 188static inline int raid6_d0(struct stripe_head *sh)
 189{
 190	if (sh->ddf_layout)
 191		/* ddf always start from first device */
 192		return 0;
 193	/* md starts just after Q block */
 194	if (sh->qd_idx == sh->disks - 1)
 195		return 0;
 196	else
 197		return sh->qd_idx + 1;
 198}
 199static inline int raid6_next_disk(int disk, int raid_disks)
 200{
 201	disk++;
 202	return (disk < raid_disks) ? disk : 0;
 203}
 204
 205/* When walking through the disks in a raid5, starting at raid6_d0,
 206 * We need to map each disk to a 'slot', where the data disks are slot
 207 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
 208 * is raid_disks-1.  This help does that mapping.
 209 */
 210static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
 211			     int *count, int syndrome_disks)
 212{
 213	int slot = *count;
 214
 215	if (sh->ddf_layout)
 216		(*count)++;
 217	if (idx == sh->pd_idx)
 218		return syndrome_disks;
 219	if (idx == sh->qd_idx)
 220		return syndrome_disks + 1;
 221	if (!sh->ddf_layout)
 222		(*count)++;
 223	return slot;
 224}
 225
 226static void return_io(struct bio_list *return_bi)
 227{
 228	struct bio *bi;
 229	while ((bi = bio_list_pop(return_bi)) != NULL) {
 230		bi->bi_iter.bi_size = 0;
 231		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
 232					 bi, 0);
 233		bio_endio(bi);
 
 
 234	}
 235}
 236
 237static void print_raid5_conf (struct r5conf *conf);
 238
 239static int stripe_operations_active(struct stripe_head *sh)
 240{
 241	return sh->check_state || sh->reconstruct_state ||
 242	       test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
 243	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 244}
 245
 246static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
 247{
 248	struct r5conf *conf = sh->raid_conf;
 249	struct r5worker_group *group;
 250	int thread_cnt;
 251	int i, cpu = sh->cpu;
 252
 253	if (!cpu_online(cpu)) {
 254		cpu = cpumask_any(cpu_online_mask);
 255		sh->cpu = cpu;
 256	}
 257
 258	if (list_empty(&sh->lru)) {
 259		struct r5worker_group *group;
 260		group = conf->worker_groups + cpu_to_group(cpu);
 261		list_add_tail(&sh->lru, &group->handle_list);
 262		group->stripes_cnt++;
 263		sh->group = group;
 264	}
 265
 266	if (conf->worker_cnt_per_group == 0) {
 267		md_wakeup_thread(conf->mddev->thread);
 268		return;
 269	}
 270
 271	group = conf->worker_groups + cpu_to_group(sh->cpu);
 272
 273	group->workers[0].working = true;
 274	/* at least one worker should run to avoid race */
 275	queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
 276
 277	thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
 278	/* wakeup more workers */
 279	for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
 280		if (group->workers[i].working == false) {
 281			group->workers[i].working = true;
 282			queue_work_on(sh->cpu, raid5_wq,
 283				      &group->workers[i].work);
 284			thread_cnt--;
 285		}
 286	}
 287}
 288
 289static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
 290			      struct list_head *temp_inactive_list)
 291{
 292	BUG_ON(!list_empty(&sh->lru));
 293	BUG_ON(atomic_read(&conf->active_stripes)==0);
 294	if (test_bit(STRIPE_HANDLE, &sh->state)) {
 295		if (test_bit(STRIPE_DELAYED, &sh->state) &&
 296		    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 297			list_add_tail(&sh->lru, &conf->delayed_list);
 298		else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 299			   sh->bm_seq - conf->seq_write > 0)
 300			list_add_tail(&sh->lru, &conf->bitmap_list);
 301		else {
 302			clear_bit(STRIPE_DELAYED, &sh->state);
 303			clear_bit(STRIPE_BIT_DELAY, &sh->state);
 304			if (conf->worker_cnt_per_group == 0) {
 305				list_add_tail(&sh->lru, &conf->handle_list);
 306			} else {
 307				raid5_wakeup_stripe_thread(sh);
 308				return;
 309			}
 310		}
 311		md_wakeup_thread(conf->mddev->thread);
 312	} else {
 313		BUG_ON(stripe_operations_active(sh));
 314		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 315			if (atomic_dec_return(&conf->preread_active_stripes)
 316			    < IO_THRESHOLD)
 317				md_wakeup_thread(conf->mddev->thread);
 318		atomic_dec(&conf->active_stripes);
 319		if (!test_bit(STRIPE_EXPANDING, &sh->state))
 320			list_add_tail(&sh->lru, temp_inactive_list);
 321	}
 322}
 323
 324static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
 325			     struct list_head *temp_inactive_list)
 326{
 327	if (atomic_dec_and_test(&sh->count))
 328		do_release_stripe(conf, sh, temp_inactive_list);
 329}
 330
 331/*
 332 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
 333 *
 334 * Be careful: Only one task can add/delete stripes from temp_inactive_list at
 335 * given time. Adding stripes only takes device lock, while deleting stripes
 336 * only takes hash lock.
 337 */
 338static void release_inactive_stripe_list(struct r5conf *conf,
 339					 struct list_head *temp_inactive_list,
 340					 int hash)
 341{
 342	int size;
 343	bool do_wakeup = false;
 344	unsigned long flags;
 345
 346	if (hash == NR_STRIPE_HASH_LOCKS) {
 347		size = NR_STRIPE_HASH_LOCKS;
 348		hash = NR_STRIPE_HASH_LOCKS - 1;
 349	} else
 350		size = 1;
 351	while (size) {
 352		struct list_head *list = &temp_inactive_list[size - 1];
 353
 354		/*
 355		 * We don't hold any lock here yet, raid5_get_active_stripe() might
 356		 * remove stripes from the list
 357		 */
 358		if (!list_empty_careful(list)) {
 359			spin_lock_irqsave(conf->hash_locks + hash, flags);
 360			if (list_empty(conf->inactive_list + hash) &&
 361			    !list_empty(list))
 362				atomic_dec(&conf->empty_inactive_list_nr);
 363			list_splice_tail_init(list, conf->inactive_list + hash);
 364			do_wakeup = true;
 365			spin_unlock_irqrestore(conf->hash_locks + hash, flags);
 366		}
 367		size--;
 368		hash--;
 369	}
 370
 371	if (do_wakeup) {
 372		wake_up(&conf->wait_for_stripe);
 373		if (atomic_read(&conf->active_stripes) == 0)
 374			wake_up(&conf->wait_for_quiescent);
 375		if (conf->retry_read_aligned)
 376			md_wakeup_thread(conf->mddev->thread);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 377	}
 378}
 379
 380/* should hold conf->device_lock already */
 381static int release_stripe_list(struct r5conf *conf,
 382			       struct list_head *temp_inactive_list)
 383{
 384	struct stripe_head *sh;
 385	int count = 0;
 386	struct llist_node *head;
 387
 388	head = llist_del_all(&conf->released_stripes);
 389	head = llist_reverse_order(head);
 390	while (head) {
 391		int hash;
 392
 393		sh = llist_entry(head, struct stripe_head, release_list);
 394		head = llist_next(head);
 395		/* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
 396		smp_mb();
 397		clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
 398		/*
 399		 * Don't worry the bit is set here, because if the bit is set
 400		 * again, the count is always > 1. This is true for
 401		 * STRIPE_ON_UNPLUG_LIST bit too.
 402		 */
 403		hash = sh->hash_lock_index;
 404		__release_stripe(conf, sh, &temp_inactive_list[hash]);
 405		count++;
 406	}
 407
 408	return count;
 409}
 410
 411void raid5_release_stripe(struct stripe_head *sh)
 412{
 413	struct r5conf *conf = sh->raid_conf;
 414	unsigned long flags;
 415	struct list_head list;
 416	int hash;
 417	bool wakeup;
 418
 419	/* Avoid release_list until the last reference.
 420	 */
 421	if (atomic_add_unless(&sh->count, -1, 1))
 422		return;
 423
 424	if (unlikely(!conf->mddev->thread) ||
 425		test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
 426		goto slow_path;
 427	wakeup = llist_add(&sh->release_list, &conf->released_stripes);
 428	if (wakeup)
 429		md_wakeup_thread(conf->mddev->thread);
 430	return;
 431slow_path:
 432	local_irq_save(flags);
 433	/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
 434	if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
 435		INIT_LIST_HEAD(&list);
 436		hash = sh->hash_lock_index;
 437		do_release_stripe(conf, sh, &list);
 438		spin_unlock(&conf->device_lock);
 439		release_inactive_stripe_list(conf, &list, hash);
 440	}
 441	local_irq_restore(flags);
 442}
 443
 444static inline void remove_hash(struct stripe_head *sh)
 445{
 446	pr_debug("remove_hash(), stripe %llu\n",
 447		(unsigned long long)sh->sector);
 448
 449	hlist_del_init(&sh->hash);
 450}
 451
 452static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
 453{
 454	struct hlist_head *hp = stripe_hash(conf, sh->sector);
 455
 456	pr_debug("insert_hash(), stripe %llu\n",
 457		(unsigned long long)sh->sector);
 458
 
 459	hlist_add_head(&sh->hash, hp);
 460}
 461
 
 462/* find an idle stripe, make sure it is unhashed, and return it. */
 463static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
 464{
 465	struct stripe_head *sh = NULL;
 466	struct list_head *first;
 467
 468	if (list_empty(conf->inactive_list + hash))
 
 469		goto out;
 470	first = (conf->inactive_list + hash)->next;
 471	sh = list_entry(first, struct stripe_head, lru);
 472	list_del_init(first);
 473	remove_hash(sh);
 474	atomic_inc(&conf->active_stripes);
 475	BUG_ON(hash != sh->hash_lock_index);
 476	if (list_empty(conf->inactive_list + hash))
 477		atomic_inc(&conf->empty_inactive_list_nr);
 478out:
 479	return sh;
 480}
 481
 482static void shrink_buffers(struct stripe_head *sh)
 483{
 484	struct page *p;
 485	int i;
 486	int num = sh->raid_conf->pool_size;
 487
 488	for (i = 0; i < num ; i++) {
 489		WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
 490		p = sh->dev[i].page;
 491		if (!p)
 492			continue;
 493		sh->dev[i].page = NULL;
 494		put_page(p);
 495	}
 496}
 497
 498static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
 499{
 500	int i;
 501	int num = sh->raid_conf->pool_size;
 502
 503	for (i = 0; i < num; i++) {
 504		struct page *page;
 505
 506		if (!(page = alloc_page(gfp))) {
 507			return 1;
 508		}
 509		sh->dev[i].page = page;
 510		sh->dev[i].orig_page = page;
 511	}
 512	return 0;
 513}
 514
 515static void raid5_build_block(struct stripe_head *sh, int i, int previous);
 516static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
 517			    struct stripe_head *sh);
 518
 519static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
 520{
 521	struct r5conf *conf = sh->raid_conf;
 522	int i, seq;
 523
 524	BUG_ON(atomic_read(&sh->count) != 0);
 525	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 526	BUG_ON(stripe_operations_active(sh));
 527	BUG_ON(sh->batch_head);
 528
 
 529	pr_debug("init_stripe called, stripe %llu\n",
 530		(unsigned long long)sector);
 531retry:
 532	seq = read_seqcount_begin(&conf->gen_lock);
 
 533	sh->generation = conf->generation - previous;
 534	sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
 535	sh->sector = sector;
 536	stripe_set_idx(sector, conf, previous, sh);
 537	sh->state = 0;
 538
 
 539	for (i = sh->disks; i--; ) {
 540		struct r5dev *dev = &sh->dev[i];
 541
 542		if (dev->toread || dev->read || dev->towrite || dev->written ||
 543		    test_bit(R5_LOCKED, &dev->flags)) {
 544			printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
 545			       (unsigned long long)sh->sector, i, dev->toread,
 546			       dev->read, dev->towrite, dev->written,
 547			       test_bit(R5_LOCKED, &dev->flags));
 548			WARN_ON(1);
 549		}
 550		dev->flags = 0;
 551		raid5_build_block(sh, i, previous);
 552	}
 553	if (read_seqcount_retry(&conf->gen_lock, seq))
 554		goto retry;
 555	sh->overwrite_disks = 0;
 556	insert_hash(conf, sh);
 557	sh->cpu = smp_processor_id();
 558	set_bit(STRIPE_BATCH_READY, &sh->state);
 559}
 560
 561static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
 562					 short generation)
 563{
 564	struct stripe_head *sh;
 
 565
 
 566	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
 567	hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
 568		if (sh->sector == sector && sh->generation == generation)
 569			return sh;
 570	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
 571	return NULL;
 572}
 573
 574/*
 575 * Need to check if array has failed when deciding whether to:
 576 *  - start an array
 577 *  - remove non-faulty devices
 578 *  - add a spare
 579 *  - allow a reshape
 580 * This determination is simple when no reshape is happening.
 581 * However if there is a reshape, we need to carefully check
 582 * both the before and after sections.
 583 * This is because some failed devices may only affect one
 584 * of the two sections, and some non-in_sync devices may
 585 * be insync in the section most affected by failed devices.
 586 */
 587static int calc_degraded(struct r5conf *conf)
 588{
 589	int degraded, degraded2;
 590	int i;
 
 
 591
 592	rcu_read_lock();
 593	degraded = 0;
 594	for (i = 0; i < conf->previous_raid_disks; i++) {
 595		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 596		if (rdev && test_bit(Faulty, &rdev->flags))
 597			rdev = rcu_dereference(conf->disks[i].replacement);
 598		if (!rdev || test_bit(Faulty, &rdev->flags))
 599			degraded++;
 600		else if (test_bit(In_sync, &rdev->flags))
 601			;
 602		else
 603			/* not in-sync or faulty.
 604			 * If the reshape increases the number of devices,
 605			 * this is being recovered by the reshape, so
 606			 * this 'previous' section is not in_sync.
 607			 * If the number of devices is being reduced however,
 608			 * the device can only be part of the array if
 609			 * we are reverting a reshape, so this section will
 610			 * be in-sync.
 611			 */
 612			if (conf->raid_disks >= conf->previous_raid_disks)
 613				degraded++;
 614	}
 615	rcu_read_unlock();
 616	if (conf->raid_disks == conf->previous_raid_disks)
 617		return degraded;
 618	rcu_read_lock();
 619	degraded2 = 0;
 620	for (i = 0; i < conf->raid_disks; i++) {
 621		struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 622		if (rdev && test_bit(Faulty, &rdev->flags))
 623			rdev = rcu_dereference(conf->disks[i].replacement);
 624		if (!rdev || test_bit(Faulty, &rdev->flags))
 625			degraded2++;
 626		else if (test_bit(In_sync, &rdev->flags))
 627			;
 628		else
 629			/* not in-sync or faulty.
 630			 * If reshape increases the number of devices, this
 631			 * section has already been recovered, else it
 632			 * almost certainly hasn't.
 633			 */
 634			if (conf->raid_disks <= conf->previous_raid_disks)
 635				degraded2++;
 636	}
 637	rcu_read_unlock();
 638	if (degraded2 > degraded)
 639		return degraded2;
 640	return degraded;
 641}
 642
 643static int has_failed(struct r5conf *conf)
 644{
 645	int degraded;
 646
 647	if (conf->mddev->reshape_position == MaxSector)
 648		return conf->mddev->degraded > conf->max_degraded;
 649
 650	degraded = calc_degraded(conf);
 651	if (degraded > conf->max_degraded)
 652		return 1;
 653	return 0;
 654}
 655
 656struct stripe_head *
 657raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 658			int previous, int noblock, int noquiesce)
 659{
 660	struct stripe_head *sh;
 661	int hash = stripe_hash_locks_hash(sector);
 662
 663	pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 664
 665	spin_lock_irq(conf->hash_locks + hash);
 666
 667	do {
 668		wait_event_lock_irq(conf->wait_for_quiescent,
 669				    conf->quiesce == 0 || noquiesce,
 670				    *(conf->hash_locks + hash));
 671		sh = __find_stripe(conf, sector, conf->generation - previous);
 672		if (!sh) {
 673			if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
 674				sh = get_free_stripe(conf, hash);
 675				if (!sh && !test_bit(R5_DID_ALLOC,
 676						     &conf->cache_state))
 677					set_bit(R5_ALLOC_MORE,
 678						&conf->cache_state);
 679			}
 680			if (noblock && sh == NULL)
 681				break;
 682			if (!sh) {
 683				set_bit(R5_INACTIVE_BLOCKED,
 684					&conf->cache_state);
 685				wait_event_lock_irq(
 686					conf->wait_for_stripe,
 687					!list_empty(conf->inactive_list + hash) &&
 688					(atomic_read(&conf->active_stripes)
 689					 < (conf->max_nr_stripes * 3 / 4)
 690					 || !test_bit(R5_INACTIVE_BLOCKED,
 691						      &conf->cache_state)),
 692					*(conf->hash_locks + hash));
 693				clear_bit(R5_INACTIVE_BLOCKED,
 694					  &conf->cache_state);
 695			} else {
 696				init_stripe(sh, sector, previous);
 697				atomic_inc(&sh->count);
 698			}
 699		} else if (!atomic_inc_not_zero(&sh->count)) {
 700			spin_lock(&conf->device_lock);
 701			if (!atomic_read(&sh->count)) {
 702				if (!test_bit(STRIPE_HANDLE, &sh->state))
 703					atomic_inc(&conf->active_stripes);
 704				BUG_ON(list_empty(&sh->lru) &&
 705				       !test_bit(STRIPE_EXPANDING, &sh->state));
 
 706				list_del_init(&sh->lru);
 707				if (sh->group) {
 708					sh->group->stripes_cnt--;
 709					sh->group = NULL;
 710				}
 711			}
 712			atomic_inc(&sh->count);
 713			spin_unlock(&conf->device_lock);
 714		}
 715	} while (sh == NULL);
 716
 717	spin_unlock_irq(conf->hash_locks + hash);
 718	return sh;
 719}
 720
 721static bool is_full_stripe_write(struct stripe_head *sh)
 722{
 723	BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
 724	return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
 725}
 726
 727static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 728{
 729	local_irq_disable();
 730	if (sh1 > sh2) {
 731		spin_lock(&sh2->stripe_lock);
 732		spin_lock_nested(&sh1->stripe_lock, 1);
 733	} else {
 734		spin_lock(&sh1->stripe_lock);
 735		spin_lock_nested(&sh2->stripe_lock, 1);
 736	}
 737}
 738
 739static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 740{
 741	spin_unlock(&sh1->stripe_lock);
 742	spin_unlock(&sh2->stripe_lock);
 743	local_irq_enable();
 744}
 745
 746/* Only freshly new full stripe normal write stripe can be added to a batch list */
 747static bool stripe_can_batch(struct stripe_head *sh)
 748{
 749	struct r5conf *conf = sh->raid_conf;
 750
 751	if (conf->log)
 752		return false;
 753	return test_bit(STRIPE_BATCH_READY, &sh->state) &&
 754		!test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
 755		is_full_stripe_write(sh);
 756}
 757
 758/* we only do back search */
 759static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
 760{
 761	struct stripe_head *head;
 762	sector_t head_sector, tmp_sec;
 763	int hash;
 764	int dd_idx;
 765
 766	/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
 767	tmp_sec = sh->sector;
 768	if (!sector_div(tmp_sec, conf->chunk_sectors))
 769		return;
 770	head_sector = sh->sector - STRIPE_SECTORS;
 771
 772	hash = stripe_hash_locks_hash(head_sector);
 773	spin_lock_irq(conf->hash_locks + hash);
 774	head = __find_stripe(conf, head_sector, conf->generation);
 775	if (head && !atomic_inc_not_zero(&head->count)) {
 776		spin_lock(&conf->device_lock);
 777		if (!atomic_read(&head->count)) {
 778			if (!test_bit(STRIPE_HANDLE, &head->state))
 779				atomic_inc(&conf->active_stripes);
 780			BUG_ON(list_empty(&head->lru) &&
 781			       !test_bit(STRIPE_EXPANDING, &head->state));
 782			list_del_init(&head->lru);
 783			if (head->group) {
 784				head->group->stripes_cnt--;
 785				head->group = NULL;
 786			}
 787		}
 788		atomic_inc(&head->count);
 789		spin_unlock(&conf->device_lock);
 790	}
 791	spin_unlock_irq(conf->hash_locks + hash);
 792
 793	if (!head)
 794		return;
 795	if (!stripe_can_batch(head))
 796		goto out;
 797
 798	lock_two_stripes(head, sh);
 799	/* clear_batch_ready clear the flag */
 800	if (!stripe_can_batch(head) || !stripe_can_batch(sh))
 801		goto unlock_out;
 802
 803	if (sh->batch_head)
 804		goto unlock_out;
 805
 806	dd_idx = 0;
 807	while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
 808		dd_idx++;
 809	if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
 810		goto unlock_out;
 811
 812	if (head->batch_head) {
 813		spin_lock(&head->batch_head->batch_lock);
 814		/* This batch list is already running */
 815		if (!stripe_can_batch(head)) {
 816			spin_unlock(&head->batch_head->batch_lock);
 817			goto unlock_out;
 818		}
 819
 820		/*
 821		 * at this point, head's BATCH_READY could be cleared, but we
 822		 * can still add the stripe to batch list
 823		 */
 824		list_add(&sh->batch_list, &head->batch_list);
 825		spin_unlock(&head->batch_head->batch_lock);
 826
 827		sh->batch_head = head->batch_head;
 828	} else {
 829		head->batch_head = head;
 830		sh->batch_head = head->batch_head;
 831		spin_lock(&head->batch_lock);
 832		list_add_tail(&sh->batch_list, &head->batch_list);
 833		spin_unlock(&head->batch_lock);
 834	}
 835
 836	if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 837		if (atomic_dec_return(&conf->preread_active_stripes)
 838		    < IO_THRESHOLD)
 839			md_wakeup_thread(conf->mddev->thread);
 840
 841	if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
 842		int seq = sh->bm_seq;
 843		if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
 844		    sh->batch_head->bm_seq > seq)
 845			seq = sh->batch_head->bm_seq;
 846		set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
 847		sh->batch_head->bm_seq = seq;
 848	}
 849
 850	atomic_inc(&sh->count);
 851unlock_out:
 852	unlock_two_stripes(head, sh);
 853out:
 854	raid5_release_stripe(head);
 855}
 856
 857/* Determine if 'data_offset' or 'new_data_offset' should be used
 858 * in this stripe_head.
 859 */
 860static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
 861{
 862	sector_t progress = conf->reshape_progress;
 863	/* Need a memory barrier to make sure we see the value
 864	 * of conf->generation, or ->data_offset that was set before
 865	 * reshape_progress was updated.
 866	 */
 867	smp_rmb();
 868	if (progress == MaxSector)
 869		return 0;
 870	if (sh->generation == conf->generation - 1)
 871		return 0;
 872	/* We are in a reshape, and this is a new-generation stripe,
 873	 * so use new_data_offset.
 874	 */
 875	return 1;
 876}
 877
 878static void
 879raid5_end_read_request(struct bio *bi);
 880static void
 881raid5_end_write_request(struct bio *bi);
 882
 883static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 884{
 885	struct r5conf *conf = sh->raid_conf;
 886	int i, disks = sh->disks;
 887	struct stripe_head *head_sh = sh;
 888
 889	might_sleep();
 890
 891	if (r5l_write_stripe(conf->log, sh) == 0)
 892		return;
 893	for (i = disks; i--; ) {
 894		int rw;
 895		int replace_only = 0;
 896		struct bio *bi, *rbi;
 897		struct md_rdev *rdev, *rrdev = NULL;
 898
 899		sh = head_sh;
 900		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 901			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
 902				rw = WRITE_FUA;
 903			else
 904				rw = WRITE;
 905			if (test_bit(R5_Discard, &sh->dev[i].flags))
 906				rw |= REQ_DISCARD;
 907		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
 908			rw = READ;
 909		else if (test_and_clear_bit(R5_WantReplace,
 910					    &sh->dev[i].flags)) {
 911			rw = WRITE;
 912			replace_only = 1;
 913		} else
 914			continue;
 915		if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
 916			rw |= REQ_SYNC;
 917
 918again:
 919		bi = &sh->dev[i].req;
 920		rbi = &sh->dev[i].rreq; /* For writing to replacement */
 
 
 
 
 
 921
 922		rcu_read_lock();
 923		rrdev = rcu_dereference(conf->disks[i].replacement);
 924		smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
 925		rdev = rcu_dereference(conf->disks[i].rdev);
 926		if (!rdev) {
 927			rdev = rrdev;
 928			rrdev = NULL;
 929		}
 930		if (rw & WRITE) {
 931			if (replace_only)
 932				rdev = NULL;
 933			if (rdev == rrdev)
 934				/* We raced and saw duplicates */
 935				rrdev = NULL;
 936		} else {
 937			if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
 938				rdev = rrdev;
 939			rrdev = NULL;
 940		}
 941
 942		if (rdev && test_bit(Faulty, &rdev->flags))
 943			rdev = NULL;
 944		if (rdev)
 945			atomic_inc(&rdev->nr_pending);
 946		if (rrdev && test_bit(Faulty, &rrdev->flags))
 947			rrdev = NULL;
 948		if (rrdev)
 949			atomic_inc(&rrdev->nr_pending);
 950		rcu_read_unlock();
 951
 952		/* We have already checked bad blocks for reads.  Now
 953		 * need to check for writes.  We never accept write errors
 954		 * on the replacement, so we don't to check rrdev.
 955		 */
 956		while ((rw & WRITE) && rdev &&
 957		       test_bit(WriteErrorSeen, &rdev->flags)) {
 958			sector_t first_bad;
 959			int bad_sectors;
 960			int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
 961					      &first_bad, &bad_sectors);
 962			if (!bad)
 963				break;
 964
 965			if (bad < 0) {
 966				set_bit(BlockedBadBlocks, &rdev->flags);
 967				if (!conf->mddev->external &&
 968				    conf->mddev->flags) {
 969					/* It is very unlikely, but we might
 970					 * still need to write out the
 971					 * bad block log - better give it
 972					 * a chance*/
 973					md_check_recovery(conf->mddev);
 974				}
 975				/*
 976				 * Because md_wait_for_blocked_rdev
 977				 * will dec nr_pending, we must
 978				 * increment it first.
 979				 */
 980				atomic_inc(&rdev->nr_pending);
 981				md_wait_for_blocked_rdev(rdev, conf->mddev);
 982			} else {
 983				/* Acknowledged bad block - skip the write */
 984				rdev_dec_pending(rdev, conf->mddev);
 985				rdev = NULL;
 986			}
 987		}
 988
 989		if (rdev) {
 990			if (s->syncing || s->expanding || s->expanded
 991			    || s->replacing)
 992				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 993
 994			set_bit(STRIPE_IO_STARTED, &sh->state);
 995
 996			bio_reset(bi);
 997			bi->bi_bdev = rdev->bdev;
 998			bi->bi_rw = rw;
 999			bi->bi_end_io = (rw & WRITE)
1000				? raid5_end_write_request
1001				: raid5_end_read_request;
1002			bi->bi_private = sh;
1003
1004			pr_debug("%s: for %llu schedule op %ld on disc %d\n",
1005				__func__, (unsigned long long)sh->sector,
1006				bi->bi_rw, i);
1007			atomic_inc(&sh->count);
1008			if (sh != head_sh)
1009				atomic_inc(&head_sh->count);
1010			if (use_new_offset(conf, sh))
1011				bi->bi_iter.bi_sector = (sh->sector
1012						 + rdev->new_data_offset);
1013			else
1014				bi->bi_iter.bi_sector = (sh->sector
1015						 + rdev->data_offset);
1016			if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1017				bi->bi_rw |= REQ_NOMERGE;
1018
1019			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1020				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1021			sh->dev[i].vec.bv_page = sh->dev[i].page;
1022			bi->bi_vcnt = 1;
 
 
 
1023			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1024			bi->bi_io_vec[0].bv_offset = 0;
1025			bi->bi_iter.bi_size = STRIPE_SIZE;
1026			/*
1027			 * If this is discard request, set bi_vcnt 0. We don't
1028			 * want to confuse SCSI because SCSI will replace payload
1029			 */
1030			if (rw & REQ_DISCARD)
1031				bi->bi_vcnt = 0;
1032			if (rrdev)
1033				set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1034
1035			if (conf->mddev->gendisk)
1036				trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
1037						      bi, disk_devt(conf->mddev->gendisk),
1038						      sh->dev[i].sector);
1039			generic_make_request(bi);
1040		}
1041		if (rrdev) {
1042			if (s->syncing || s->expanding || s->expanded
1043			    || s->replacing)
1044				md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
1045
1046			set_bit(STRIPE_IO_STARTED, &sh->state);
1047
1048			bio_reset(rbi);
1049			rbi->bi_bdev = rrdev->bdev;
1050			rbi->bi_rw = rw;
1051			BUG_ON(!(rw & WRITE));
1052			rbi->bi_end_io = raid5_end_write_request;
1053			rbi->bi_private = sh;
1054
1055			pr_debug("%s: for %llu schedule op %ld on "
1056				 "replacement disc %d\n",
1057				__func__, (unsigned long long)sh->sector,
1058				rbi->bi_rw, i);
1059			atomic_inc(&sh->count);
1060			if (sh != head_sh)
1061				atomic_inc(&head_sh->count);
1062			if (use_new_offset(conf, sh))
1063				rbi->bi_iter.bi_sector = (sh->sector
1064						  + rrdev->new_data_offset);
1065			else
1066				rbi->bi_iter.bi_sector = (sh->sector
1067						  + rrdev->data_offset);
1068			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1069				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1070			sh->dev[i].rvec.bv_page = sh->dev[i].page;
1071			rbi->bi_vcnt = 1;
1072			rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1073			rbi->bi_io_vec[0].bv_offset = 0;
1074			rbi->bi_iter.bi_size = STRIPE_SIZE;
1075			/*
1076			 * If this is discard request, set bi_vcnt 0. We don't
1077			 * want to confuse SCSI because SCSI will replace payload
1078			 */
1079			if (rw & REQ_DISCARD)
1080				rbi->bi_vcnt = 0;
1081			if (conf->mddev->gendisk)
1082				trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
1083						      rbi, disk_devt(conf->mddev->gendisk),
1084						      sh->dev[i].sector);
1085			generic_make_request(rbi);
1086		}
1087		if (!rdev && !rrdev) {
1088			if (rw & WRITE)
1089				set_bit(STRIPE_DEGRADED, &sh->state);
1090			pr_debug("skip op %ld on disc %d for sector %llu\n",
1091				bi->bi_rw, i, (unsigned long long)sh->sector);
1092			clear_bit(R5_LOCKED, &sh->dev[i].flags);
1093			set_bit(STRIPE_HANDLE, &sh->state);
1094		}
1095
1096		if (!head_sh->batch_head)
1097			continue;
1098		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1099				      batch_list);
1100		if (sh != head_sh)
1101			goto again;
1102	}
1103}
1104
1105static struct dma_async_tx_descriptor *
1106async_copy_data(int frombio, struct bio *bio, struct page **page,
1107	sector_t sector, struct dma_async_tx_descriptor *tx,
1108	struct stripe_head *sh)
1109{
1110	struct bio_vec bvl;
1111	struct bvec_iter iter;
1112	struct page *bio_page;
 
1113	int page_offset;
1114	struct async_submit_ctl submit;
1115	enum async_tx_flags flags = 0;
1116
1117	if (bio->bi_iter.bi_sector >= sector)
1118		page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
1119	else
1120		page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
1121
1122	if (frombio)
1123		flags |= ASYNC_TX_FENCE;
1124	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
1125
1126	bio_for_each_segment(bvl, bio, iter) {
1127		int len = bvl.bv_len;
1128		int clen;
1129		int b_offset = 0;
1130
1131		if (page_offset < 0) {
1132			b_offset = -page_offset;
1133			page_offset += b_offset;
1134			len -= b_offset;
1135		}
1136
1137		if (len > 0 && page_offset + len > STRIPE_SIZE)
1138			clen = STRIPE_SIZE - page_offset;
1139		else
1140			clen = len;
1141
1142		if (clen > 0) {
1143			b_offset += bvl.bv_offset;
1144			bio_page = bvl.bv_page;
1145			if (frombio) {
1146				if (sh->raid_conf->skip_copy &&
1147				    b_offset == 0 && page_offset == 0 &&
1148				    clen == STRIPE_SIZE)
1149					*page = bio_page;
1150				else
1151					tx = async_memcpy(*page, bio_page, page_offset,
1152						  b_offset, clen, &submit);
1153			} else
1154				tx = async_memcpy(bio_page, *page, b_offset,
1155						  page_offset, clen, &submit);
1156		}
1157		/* chain the operations */
1158		submit.depend_tx = tx;
1159
1160		if (clen < len) /* hit end of page */
1161			break;
1162		page_offset +=  len;
1163	}
1164
1165	return tx;
1166}
1167
1168static void ops_complete_biofill(void *stripe_head_ref)
1169{
1170	struct stripe_head *sh = stripe_head_ref;
1171	struct bio_list return_bi = BIO_EMPTY_LIST;
 
1172	int i;
1173
1174	pr_debug("%s: stripe %llu\n", __func__,
1175		(unsigned long long)sh->sector);
1176
1177	/* clear completed biofills */
 
1178	for (i = sh->disks; i--; ) {
1179		struct r5dev *dev = &sh->dev[i];
1180
1181		/* acknowledge completion of a biofill operation */
1182		/* and check if we need to reply to a read request,
1183		 * new R5_Wantfill requests are held off until
1184		 * !STRIPE_BIOFILL_RUN
1185		 */
1186		if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
1187			struct bio *rbi, *rbi2;
1188
1189			BUG_ON(!dev->read);
1190			rbi = dev->read;
1191			dev->read = NULL;
1192			while (rbi && rbi->bi_iter.bi_sector <
1193				dev->sector + STRIPE_SECTORS) {
1194				rbi2 = r5_next_bio(rbi, dev->sector);
1195				if (!raid5_dec_bi_active_stripes(rbi))
1196					bio_list_add(&return_bi, rbi);
 
 
1197				rbi = rbi2;
1198			}
1199		}
1200	}
 
1201	clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
1202
1203	return_io(&return_bi);
1204
1205	set_bit(STRIPE_HANDLE, &sh->state);
1206	raid5_release_stripe(sh);
1207}
1208
1209static void ops_run_biofill(struct stripe_head *sh)
1210{
1211	struct dma_async_tx_descriptor *tx = NULL;
 
1212	struct async_submit_ctl submit;
1213	int i;
1214
1215	BUG_ON(sh->batch_head);
1216	pr_debug("%s: stripe %llu\n", __func__,
1217		(unsigned long long)sh->sector);
1218
1219	for (i = sh->disks; i--; ) {
1220		struct r5dev *dev = &sh->dev[i];
1221		if (test_bit(R5_Wantfill, &dev->flags)) {
1222			struct bio *rbi;
1223			spin_lock_irq(&sh->stripe_lock);
1224			dev->read = rbi = dev->toread;
1225			dev->toread = NULL;
1226			spin_unlock_irq(&sh->stripe_lock);
1227			while (rbi && rbi->bi_iter.bi_sector <
1228				dev->sector + STRIPE_SECTORS) {
1229				tx = async_copy_data(0, rbi, &dev->page,
1230					dev->sector, tx, sh);
1231				rbi = r5_next_bio(rbi, dev->sector);
1232			}
1233		}
1234	}
1235
1236	atomic_inc(&sh->count);
1237	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
1238	async_trigger_callback(&submit);
1239}
1240
1241static void mark_target_uptodate(struct stripe_head *sh, int target)
1242{
1243	struct r5dev *tgt;
1244
1245	if (target < 0)
1246		return;
1247
1248	tgt = &sh->dev[target];
1249	set_bit(R5_UPTODATE, &tgt->flags);
1250	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1251	clear_bit(R5_Wantcompute, &tgt->flags);
1252}
1253
1254static void ops_complete_compute(void *stripe_head_ref)
1255{
1256	struct stripe_head *sh = stripe_head_ref;
1257
1258	pr_debug("%s: stripe %llu\n", __func__,
1259		(unsigned long long)sh->sector);
1260
1261	/* mark the computed target(s) as uptodate */
1262	mark_target_uptodate(sh, sh->ops.target);
1263	mark_target_uptodate(sh, sh->ops.target2);
1264
1265	clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
1266	if (sh->check_state == check_state_compute_run)
1267		sh->check_state = check_state_compute_result;
1268	set_bit(STRIPE_HANDLE, &sh->state);
1269	raid5_release_stripe(sh);
1270}
1271
1272/* return a pointer to the address conversion region of the scribble buffer */
1273static addr_conv_t *to_addr_conv(struct stripe_head *sh,
1274				 struct raid5_percpu *percpu, int i)
1275{
1276	void *addr;
1277
1278	addr = flex_array_get(percpu->scribble, i);
1279	return addr + sizeof(struct page *) * (sh->disks + 2);
1280}
1281
1282/* return a pointer to the address conversion region of the scribble buffer */
1283static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
1284{
1285	void *addr;
1286
1287	addr = flex_array_get(percpu->scribble, i);
1288	return addr;
1289}
1290
1291static struct dma_async_tx_descriptor *
1292ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
1293{
1294	int disks = sh->disks;
1295	struct page **xor_srcs = to_addr_page(percpu, 0);
1296	int target = sh->ops.target;
1297	struct r5dev *tgt = &sh->dev[target];
1298	struct page *xor_dest = tgt->page;
1299	int count = 0;
1300	struct dma_async_tx_descriptor *tx;
1301	struct async_submit_ctl submit;
1302	int i;
1303
1304	BUG_ON(sh->batch_head);
1305
1306	pr_debug("%s: stripe %llu block: %d\n",
1307		__func__, (unsigned long long)sh->sector, target);
1308	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1309
1310	for (i = disks; i--; )
1311		if (i != target)
1312			xor_srcs[count++] = sh->dev[i].page;
1313
1314	atomic_inc(&sh->count);
1315
1316	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
1317			  ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
1318	if (unlikely(count == 1))
1319		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1320	else
1321		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1322
1323	return tx;
1324}
1325
1326/* set_syndrome_sources - populate source buffers for gen_syndrome
1327 * @srcs - (struct page *) array of size sh->disks
1328 * @sh - stripe_head to parse
1329 *
1330 * Populates srcs in proper layout order for the stripe and returns the
1331 * 'count' of sources to be used in a call to async_gen_syndrome.  The P
1332 * destination buffer is recorded in srcs[count] and the Q destination
1333 * is recorded in srcs[count+1]].
1334 */
1335static int set_syndrome_sources(struct page **srcs,
1336				struct stripe_head *sh,
1337				int srctype)
1338{
1339	int disks = sh->disks;
1340	int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1341	int d0_idx = raid6_d0(sh);
1342	int count;
1343	int i;
1344
1345	for (i = 0; i < disks; i++)
1346		srcs[i] = NULL;
1347
1348	count = 0;
1349	i = d0_idx;
1350	do {
1351		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1352		struct r5dev *dev = &sh->dev[i];
1353
1354		if (i == sh->qd_idx || i == sh->pd_idx ||
1355		    (srctype == SYNDROME_SRC_ALL) ||
1356		    (srctype == SYNDROME_SRC_WANT_DRAIN &&
1357		     test_bit(R5_Wantdrain, &dev->flags)) ||
1358		    (srctype == SYNDROME_SRC_WRITTEN &&
1359		     dev->written))
1360			srcs[slot] = sh->dev[i].page;
1361		i = raid6_next_disk(i, disks);
1362	} while (i != d0_idx);
1363
1364	return syndrome_disks;
1365}
1366
1367static struct dma_async_tx_descriptor *
1368ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
1369{
1370	int disks = sh->disks;
1371	struct page **blocks = to_addr_page(percpu, 0);
1372	int target;
1373	int qd_idx = sh->qd_idx;
1374	struct dma_async_tx_descriptor *tx;
1375	struct async_submit_ctl submit;
1376	struct r5dev *tgt;
1377	struct page *dest;
1378	int i;
1379	int count;
1380
1381	BUG_ON(sh->batch_head);
1382	if (sh->ops.target < 0)
1383		target = sh->ops.target2;
1384	else if (sh->ops.target2 < 0)
1385		target = sh->ops.target;
1386	else
1387		/* we should only have one valid target */
1388		BUG();
1389	BUG_ON(target < 0);
1390	pr_debug("%s: stripe %llu block: %d\n",
1391		__func__, (unsigned long long)sh->sector, target);
1392
1393	tgt = &sh->dev[target];
1394	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1395	dest = tgt->page;
1396
1397	atomic_inc(&sh->count);
1398
1399	if (target == qd_idx) {
1400		count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
1401		blocks[count] = NULL; /* regenerating p is not necessary */
1402		BUG_ON(blocks[count+1] != dest); /* q should already be set */
1403		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1404				  ops_complete_compute, sh,
1405				  to_addr_conv(sh, percpu, 0));
1406		tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
1407	} else {
1408		/* Compute any data- or p-drive using XOR */
1409		count = 0;
1410		for (i = disks; i-- ; ) {
1411			if (i == target || i == qd_idx)
1412				continue;
1413			blocks[count++] = sh->dev[i].page;
1414		}
1415
1416		init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1417				  NULL, ops_complete_compute, sh,
1418				  to_addr_conv(sh, percpu, 0));
1419		tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
1420	}
1421
1422	return tx;
1423}
1424
1425static struct dma_async_tx_descriptor *
1426ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1427{
1428	int i, count, disks = sh->disks;
1429	int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1430	int d0_idx = raid6_d0(sh);
1431	int faila = -1, failb = -1;
1432	int target = sh->ops.target;
1433	int target2 = sh->ops.target2;
1434	struct r5dev *tgt = &sh->dev[target];
1435	struct r5dev *tgt2 = &sh->dev[target2];
1436	struct dma_async_tx_descriptor *tx;
1437	struct page **blocks = to_addr_page(percpu, 0);
1438	struct async_submit_ctl submit;
1439
1440	BUG_ON(sh->batch_head);
1441	pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1442		 __func__, (unsigned long long)sh->sector, target, target2);
1443	BUG_ON(target < 0 || target2 < 0);
1444	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1445	BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1446
1447	/* we need to open-code set_syndrome_sources to handle the
1448	 * slot number conversion for 'faila' and 'failb'
1449	 */
1450	for (i = 0; i < disks ; i++)
1451		blocks[i] = NULL;
1452	count = 0;
1453	i = d0_idx;
1454	do {
1455		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1456
1457		blocks[slot] = sh->dev[i].page;
1458
1459		if (i == target)
1460			faila = slot;
1461		if (i == target2)
1462			failb = slot;
1463		i = raid6_next_disk(i, disks);
1464	} while (i != d0_idx);
1465
1466	BUG_ON(faila == failb);
1467	if (failb < faila)
1468		swap(faila, failb);
1469	pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1470		 __func__, (unsigned long long)sh->sector, faila, failb);
1471
1472	atomic_inc(&sh->count);
1473
1474	if (failb == syndrome_disks+1) {
1475		/* Q disk is one of the missing disks */
1476		if (faila == syndrome_disks) {
1477			/* Missing P+Q, just recompute */
1478			init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1479					  ops_complete_compute, sh,
1480					  to_addr_conv(sh, percpu, 0));
1481			return async_gen_syndrome(blocks, 0, syndrome_disks+2,
1482						  STRIPE_SIZE, &submit);
1483		} else {
1484			struct page *dest;
1485			int data_target;
1486			int qd_idx = sh->qd_idx;
1487
1488			/* Missing D+Q: recompute D from P, then recompute Q */
1489			if (target == qd_idx)
1490				data_target = target2;
1491			else
1492				data_target = target;
1493
1494			count = 0;
1495			for (i = disks; i-- ; ) {
1496				if (i == data_target || i == qd_idx)
1497					continue;
1498				blocks[count++] = sh->dev[i].page;
1499			}
1500			dest = sh->dev[data_target].page;
1501			init_async_submit(&submit,
1502					  ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1503					  NULL, NULL, NULL,
1504					  to_addr_conv(sh, percpu, 0));
1505			tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
1506				       &submit);
1507
1508			count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
1509			init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1510					  ops_complete_compute, sh,
1511					  to_addr_conv(sh, percpu, 0));
1512			return async_gen_syndrome(blocks, 0, count+2,
1513						  STRIPE_SIZE, &submit);
1514		}
1515	} else {
1516		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1517				  ops_complete_compute, sh,
1518				  to_addr_conv(sh, percpu, 0));
1519		if (failb == syndrome_disks) {
1520			/* We're missing D+P. */
1521			return async_raid6_datap_recov(syndrome_disks+2,
1522						       STRIPE_SIZE, faila,
1523						       blocks, &submit);
1524		} else {
1525			/* We're missing D+D. */
1526			return async_raid6_2data_recov(syndrome_disks+2,
1527						       STRIPE_SIZE, faila, failb,
1528						       blocks, &submit);
1529		}
1530	}
1531}
1532
 
1533static void ops_complete_prexor(void *stripe_head_ref)
1534{
1535	struct stripe_head *sh = stripe_head_ref;
1536
1537	pr_debug("%s: stripe %llu\n", __func__,
1538		(unsigned long long)sh->sector);
1539}
1540
1541static struct dma_async_tx_descriptor *
1542ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
1543		struct dma_async_tx_descriptor *tx)
1544{
1545	int disks = sh->disks;
1546	struct page **xor_srcs = to_addr_page(percpu, 0);
1547	int count = 0, pd_idx = sh->pd_idx, i;
1548	struct async_submit_ctl submit;
1549
1550	/* existing parity data subtracted */
1551	struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1552
1553	BUG_ON(sh->batch_head);
1554	pr_debug("%s: stripe %llu\n", __func__,
1555		(unsigned long long)sh->sector);
1556
1557	for (i = disks; i--; ) {
1558		struct r5dev *dev = &sh->dev[i];
1559		/* Only process blocks that are known to be uptodate */
1560		if (test_bit(R5_Wantdrain, &dev->flags))
1561			xor_srcs[count++] = dev->page;
1562	}
1563
1564	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1565			  ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1566	tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1567
1568	return tx;
1569}
1570
1571static struct dma_async_tx_descriptor *
1572ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
1573		struct dma_async_tx_descriptor *tx)
1574{
1575	struct page **blocks = to_addr_page(percpu, 0);
1576	int count;
1577	struct async_submit_ctl submit;
1578
1579	pr_debug("%s: stripe %llu\n", __func__,
1580		(unsigned long long)sh->sector);
1581
1582	count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
1583
1584	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
1585			  ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1586	tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1587
1588	return tx;
1589}
1590
1591static struct dma_async_tx_descriptor *
1592ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1593{
1594	int disks = sh->disks;
1595	int i;
1596	struct stripe_head *head_sh = sh;
1597
1598	pr_debug("%s: stripe %llu\n", __func__,
1599		(unsigned long long)sh->sector);
1600
1601	for (i = disks; i--; ) {
1602		struct r5dev *dev;
1603		struct bio *chosen;
1604
1605		sh = head_sh;
1606		if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
1607			struct bio *wbi;
1608
1609again:
1610			dev = &sh->dev[i];
1611			spin_lock_irq(&sh->stripe_lock);
1612			chosen = dev->towrite;
1613			dev->towrite = NULL;
1614			sh->overwrite_disks = 0;
1615			BUG_ON(dev->written);
1616			wbi = dev->written = chosen;
1617			spin_unlock_irq(&sh->stripe_lock);
1618			WARN_ON(dev->page != dev->orig_page);
1619
1620			while (wbi && wbi->bi_iter.bi_sector <
1621				dev->sector + STRIPE_SECTORS) {
1622				if (wbi->bi_rw & REQ_FUA)
1623					set_bit(R5_WantFUA, &dev->flags);
1624				if (wbi->bi_rw & REQ_SYNC)
1625					set_bit(R5_SyncIO, &dev->flags);
1626				if (wbi->bi_rw & REQ_DISCARD)
1627					set_bit(R5_Discard, &dev->flags);
1628				else {
1629					tx = async_copy_data(1, wbi, &dev->page,
1630						dev->sector, tx, sh);
1631					if (dev->page != dev->orig_page) {
1632						set_bit(R5_SkipCopy, &dev->flags);
1633						clear_bit(R5_UPTODATE, &dev->flags);
1634						clear_bit(R5_OVERWRITE, &dev->flags);
1635					}
1636				}
1637				wbi = r5_next_bio(wbi, dev->sector);
1638			}
1639
1640			if (head_sh->batch_head) {
1641				sh = list_first_entry(&sh->batch_list,
1642						      struct stripe_head,
1643						      batch_list);
1644				if (sh == head_sh)
1645					continue;
1646				goto again;
1647			}
1648		}
1649	}
1650
1651	return tx;
1652}
1653
1654static void ops_complete_reconstruct(void *stripe_head_ref)
1655{
1656	struct stripe_head *sh = stripe_head_ref;
1657	int disks = sh->disks;
1658	int pd_idx = sh->pd_idx;
1659	int qd_idx = sh->qd_idx;
1660	int i;
1661	bool fua = false, sync = false, discard = false;
1662
1663	pr_debug("%s: stripe %llu\n", __func__,
1664		(unsigned long long)sh->sector);
1665
1666	for (i = disks; i--; ) {
1667		fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1668		sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1669		discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1670	}
1671
1672	for (i = disks; i--; ) {
1673		struct r5dev *dev = &sh->dev[i];
1674
1675		if (dev->written || i == pd_idx || i == qd_idx) {
1676			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
1677				set_bit(R5_UPTODATE, &dev->flags);
1678			if (fua)
1679				set_bit(R5_WantFUA, &dev->flags);
1680			if (sync)
1681				set_bit(R5_SyncIO, &dev->flags);
1682		}
1683	}
1684
1685	if (sh->reconstruct_state == reconstruct_state_drain_run)
1686		sh->reconstruct_state = reconstruct_state_drain_result;
1687	else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1688		sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1689	else {
1690		BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1691		sh->reconstruct_state = reconstruct_state_result;
1692	}
1693
1694	set_bit(STRIPE_HANDLE, &sh->state);
1695	raid5_release_stripe(sh);
1696}
1697
1698static void
1699ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1700		     struct dma_async_tx_descriptor *tx)
1701{
1702	int disks = sh->disks;
1703	struct page **xor_srcs;
1704	struct async_submit_ctl submit;
1705	int count, pd_idx = sh->pd_idx, i;
1706	struct page *xor_dest;
1707	int prexor = 0;
1708	unsigned long flags;
1709	int j = 0;
1710	struct stripe_head *head_sh = sh;
1711	int last_stripe;
1712
1713	pr_debug("%s: stripe %llu\n", __func__,
1714		(unsigned long long)sh->sector);
1715
1716	for (i = 0; i < sh->disks; i++) {
1717		if (pd_idx == i)
1718			continue;
1719		if (!test_bit(R5_Discard, &sh->dev[i].flags))
1720			break;
1721	}
1722	if (i >= sh->disks) {
1723		atomic_inc(&sh->count);
1724		set_bit(R5_Discard, &sh->dev[pd_idx].flags);
1725		ops_complete_reconstruct(sh);
1726		return;
1727	}
1728again:
1729	count = 0;
1730	xor_srcs = to_addr_page(percpu, j);
1731	/* check if prexor is active which means only process blocks
1732	 * that are part of a read-modify-write (written)
1733	 */
1734	if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1735		prexor = 1;
1736		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1737		for (i = disks; i--; ) {
1738			struct r5dev *dev = &sh->dev[i];
1739			if (head_sh->dev[i].written)
1740				xor_srcs[count++] = dev->page;
1741		}
1742	} else {
1743		xor_dest = sh->dev[pd_idx].page;
1744		for (i = disks; i--; ) {
1745			struct r5dev *dev = &sh->dev[i];
1746			if (i != pd_idx)
1747				xor_srcs[count++] = dev->page;
1748		}
1749	}
1750
1751	/* 1/ if we prexor'd then the dest is reused as a source
1752	 * 2/ if we did not prexor then we are redoing the parity
1753	 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1754	 * for the synchronous xor case
1755	 */
1756	last_stripe = !head_sh->batch_head ||
1757		list_first_entry(&sh->batch_list,
1758				 struct stripe_head, batch_list) == head_sh;
1759	if (last_stripe) {
1760		flags = ASYNC_TX_ACK |
1761			(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1762
1763		atomic_inc(&head_sh->count);
1764		init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
1765				  to_addr_conv(sh, percpu, j));
1766	} else {
1767		flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
1768		init_async_submit(&submit, flags, tx, NULL, NULL,
1769				  to_addr_conv(sh, percpu, j));
1770	}
1771
 
 
1772	if (unlikely(count == 1))
1773		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1774	else
1775		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1776	if (!last_stripe) {
1777		j++;
1778		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1779				      batch_list);
1780		goto again;
1781	}
1782}
1783
1784static void
1785ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1786		     struct dma_async_tx_descriptor *tx)
1787{
1788	struct async_submit_ctl submit;
1789	struct page **blocks;
1790	int count, i, j = 0;
1791	struct stripe_head *head_sh = sh;
1792	int last_stripe;
1793	int synflags;
1794	unsigned long txflags;
1795
1796	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1797
1798	for (i = 0; i < sh->disks; i++) {
1799		if (sh->pd_idx == i || sh->qd_idx == i)
1800			continue;
1801		if (!test_bit(R5_Discard, &sh->dev[i].flags))
1802			break;
1803	}
1804	if (i >= sh->disks) {
1805		atomic_inc(&sh->count);
1806		set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
1807		set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
1808		ops_complete_reconstruct(sh);
1809		return;
1810	}
1811
1812again:
1813	blocks = to_addr_page(percpu, j);
1814
1815	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1816		synflags = SYNDROME_SRC_WRITTEN;
1817		txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
1818	} else {
1819		synflags = SYNDROME_SRC_ALL;
1820		txflags = ASYNC_TX_ACK;
1821	}
1822
1823	count = set_syndrome_sources(blocks, sh, synflags);
1824	last_stripe = !head_sh->batch_head ||
1825		list_first_entry(&sh->batch_list,
1826				 struct stripe_head, batch_list) == head_sh;
1827
1828	if (last_stripe) {
1829		atomic_inc(&head_sh->count);
1830		init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
1831				  head_sh, to_addr_conv(sh, percpu, j));
1832	} else
1833		init_async_submit(&submit, 0, tx, NULL, NULL,
1834				  to_addr_conv(sh, percpu, j));
1835	tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1836	if (!last_stripe) {
1837		j++;
1838		sh = list_first_entry(&sh->batch_list, struct stripe_head,
1839				      batch_list);
1840		goto again;
1841	}
1842}
1843
1844static void ops_complete_check(void *stripe_head_ref)
1845{
1846	struct stripe_head *sh = stripe_head_ref;
1847
1848	pr_debug("%s: stripe %llu\n", __func__,
1849		(unsigned long long)sh->sector);
1850
1851	sh->check_state = check_state_check_result;
1852	set_bit(STRIPE_HANDLE, &sh->state);
1853	raid5_release_stripe(sh);
1854}
1855
1856static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1857{
1858	int disks = sh->disks;
1859	int pd_idx = sh->pd_idx;
1860	int qd_idx = sh->qd_idx;
1861	struct page *xor_dest;
1862	struct page **xor_srcs = to_addr_page(percpu, 0);
1863	struct dma_async_tx_descriptor *tx;
1864	struct async_submit_ctl submit;
1865	int count;
1866	int i;
1867
1868	pr_debug("%s: stripe %llu\n", __func__,
1869		(unsigned long long)sh->sector);
1870
1871	BUG_ON(sh->batch_head);
1872	count = 0;
1873	xor_dest = sh->dev[pd_idx].page;
1874	xor_srcs[count++] = xor_dest;
1875	for (i = disks; i--; ) {
1876		if (i == pd_idx || i == qd_idx)
1877			continue;
1878		xor_srcs[count++] = sh->dev[i].page;
1879	}
1880
1881	init_async_submit(&submit, 0, NULL, NULL, NULL,
1882			  to_addr_conv(sh, percpu, 0));
1883	tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1884			   &sh->ops.zero_sum_result, &submit);
1885
1886	atomic_inc(&sh->count);
1887	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1888	tx = async_trigger_callback(&submit);
1889}
1890
1891static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1892{
1893	struct page **srcs = to_addr_page(percpu, 0);
1894	struct async_submit_ctl submit;
1895	int count;
1896
1897	pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1898		(unsigned long long)sh->sector, checkp);
1899
1900	BUG_ON(sh->batch_head);
1901	count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
1902	if (!checkp)
1903		srcs[count] = NULL;
1904
1905	atomic_inc(&sh->count);
1906	init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1907			  sh, to_addr_conv(sh, percpu, 0));
1908	async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1909			   &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1910}
1911
1912static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1913{
1914	int overlap_clear = 0, i, disks = sh->disks;
1915	struct dma_async_tx_descriptor *tx = NULL;
1916	struct r5conf *conf = sh->raid_conf;
1917	int level = conf->level;
1918	struct raid5_percpu *percpu;
1919	unsigned long cpu;
1920
1921	cpu = get_cpu();
1922	percpu = per_cpu_ptr(conf->percpu, cpu);
1923	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1924		ops_run_biofill(sh);
1925		overlap_clear++;
1926	}
1927
1928	if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1929		if (level < 6)
1930			tx = ops_run_compute5(sh, percpu);
1931		else {
1932			if (sh->ops.target2 < 0 || sh->ops.target < 0)
1933				tx = ops_run_compute6_1(sh, percpu);
1934			else
1935				tx = ops_run_compute6_2(sh, percpu);
1936		}
1937		/* terminate the chain if reconstruct is not set to be run */
1938		if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1939			async_tx_ack(tx);
1940	}
1941
1942	if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
1943		if (level < 6)
1944			tx = ops_run_prexor5(sh, percpu, tx);
1945		else
1946			tx = ops_run_prexor6(sh, percpu, tx);
1947	}
1948
1949	if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1950		tx = ops_run_biodrain(sh, tx);
1951		overlap_clear++;
1952	}
1953
1954	if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1955		if (level < 6)
1956			ops_run_reconstruct5(sh, percpu, tx);
1957		else
1958			ops_run_reconstruct6(sh, percpu, tx);
1959	}
1960
1961	if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1962		if (sh->check_state == check_state_run)
1963			ops_run_check_p(sh, percpu);
1964		else if (sh->check_state == check_state_run_q)
1965			ops_run_check_pq(sh, percpu, 0);
1966		else if (sh->check_state == check_state_run_pq)
1967			ops_run_check_pq(sh, percpu, 1);
1968		else
1969			BUG();
1970	}
1971
1972	if (overlap_clear && !sh->batch_head)
1973		for (i = disks; i--; ) {
1974			struct r5dev *dev = &sh->dev[i];
1975			if (test_and_clear_bit(R5_Overlap, &dev->flags))
1976				wake_up(&sh->raid_conf->wait_for_overlap);
1977		}
1978	put_cpu();
1979}
1980
1981static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
 
1982{
1983	struct stripe_head *sh;
 
1984
1985	sh = kmem_cache_zalloc(sc, gfp);
1986	if (sh) {
1987		spin_lock_init(&sh->stripe_lock);
1988		spin_lock_init(&sh->batch_lock);
1989		INIT_LIST_HEAD(&sh->batch_list);
1990		INIT_LIST_HEAD(&sh->lru);
1991		atomic_set(&sh->count, 1);
1992	}
1993	return sh;
1994}
1995static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 
1996{
1997	struct stripe_head *sh;
 
 
 
 
 
 
1998
1999	sh = alloc_stripe(conf->slab_cache, gfp);
 
 
 
 
 
 
 
 
 
 
2000	if (!sh)
2001		return 0;
2002
2003	sh->raid_conf = conf;
 
 
 
2004
2005	if (grow_buffers(sh, gfp)) {
2006		shrink_buffers(sh);
2007		kmem_cache_free(conf->slab_cache, sh);
2008		return 0;
2009	}
2010	sh->hash_lock_index =
2011		conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
2012	/* we just created an active stripe so... */
 
2013	atomic_inc(&conf->active_stripes);
2014
2015	raid5_release_stripe(sh);
2016	conf->max_nr_stripes++;
2017	return 1;
2018}
2019
2020static int grow_stripes(struct r5conf *conf, int num)
2021{
2022	struct kmem_cache *sc;
2023	int devs = max(conf->raid_disks, conf->previous_raid_disks);
2024
2025	if (conf->mddev->gendisk)
2026		sprintf(conf->cache_name[0],
2027			"raid%d-%s", conf->level, mdname(conf->mddev));
2028	else
2029		sprintf(conf->cache_name[0],
2030			"raid%d-%p", conf->level, conf->mddev);
2031	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
2032
2033	conf->active_name = 0;
2034	sc = kmem_cache_create(conf->cache_name[conf->active_name],
2035			       sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
2036			       0, 0, NULL);
2037	if (!sc)
2038		return 1;
2039	conf->slab_cache = sc;
2040	conf->pool_size = devs;
2041	while (num--)
2042		if (!grow_one_stripe(conf, GFP_KERNEL))
2043			return 1;
2044
2045	return 0;
2046}
2047
2048/**
2049 * scribble_len - return the required size of the scribble region
2050 * @num - total number of disks in the array
2051 *
2052 * The size must be enough to contain:
2053 * 1/ a struct page pointer for each device in the array +2
2054 * 2/ room to convert each entry in (1) to its corresponding dma
2055 *    (dma_map_page()) or page (page_address()) address.
2056 *
2057 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
2058 * calculate over all devices (not just the data blocks), using zeros in place
2059 * of the P and Q blocks.
2060 */
2061static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
2062{
2063	struct flex_array *ret;
2064	size_t len;
2065
2066	len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
2067	ret = flex_array_alloc(len, cnt, flags);
2068	if (!ret)
2069		return NULL;
2070	/* always prealloc all elements, so no locking is required */
2071	if (flex_array_prealloc(ret, 0, cnt, flags)) {
2072		flex_array_free(ret);
2073		return NULL;
2074	}
2075	return ret;
2076}
2077
2078static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2079{
2080	unsigned long cpu;
2081	int err = 0;
2082
2083	/*
2084	 * Never shrink. And mddev_suspend() could deadlock if this is called
2085	 * from raid5d. In that case, scribble_disks and scribble_sectors
2086	 * should equal to new_disks and new_sectors
2087	 */
2088	if (conf->scribble_disks >= new_disks &&
2089	    conf->scribble_sectors >= new_sectors)
2090		return 0;
2091	mddev_suspend(conf->mddev);
2092	get_online_cpus();
2093	for_each_present_cpu(cpu) {
2094		struct raid5_percpu *percpu;
2095		struct flex_array *scribble;
2096
2097		percpu = per_cpu_ptr(conf->percpu, cpu);
2098		scribble = scribble_alloc(new_disks,
2099					  new_sectors / STRIPE_SECTORS,
2100					  GFP_NOIO);
2101
2102		if (scribble) {
2103			flex_array_free(percpu->scribble);
2104			percpu->scribble = scribble;
2105		} else {
2106			err = -ENOMEM;
2107			break;
2108		}
2109	}
2110	put_online_cpus();
2111	mddev_resume(conf->mddev);
2112	if (!err) {
2113		conf->scribble_disks = new_disks;
2114		conf->scribble_sectors = new_sectors;
2115	}
2116	return err;
2117}
2118
2119static int resize_stripes(struct r5conf *conf, int newsize)
2120{
2121	/* Make all the stripes able to hold 'newsize' devices.
2122	 * New slots in each stripe get 'page' set to a new page.
2123	 *
2124	 * This happens in stages:
2125	 * 1/ create a new kmem_cache and allocate the required number of
2126	 *    stripe_heads.
2127	 * 2/ gather all the old stripe_heads and transfer the pages across
2128	 *    to the new stripe_heads.  This will have the side effect of
2129	 *    freezing the array as once all stripe_heads have been collected,
2130	 *    no IO will be possible.  Old stripe heads are freed once their
2131	 *    pages have been transferred over, and the old kmem_cache is
2132	 *    freed when all stripes are done.
2133	 * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
2134	 *    we simple return a failre status - no need to clean anything up.
2135	 * 4/ allocate new pages for the new slots in the new stripe_heads.
2136	 *    If this fails, we don't bother trying the shrink the
2137	 *    stripe_heads down again, we just leave them as they are.
2138	 *    As each stripe_head is processed the new one is released into
2139	 *    active service.
2140	 *
2141	 * Once step2 is started, we cannot afford to wait for a write,
2142	 * so we use GFP_NOIO allocations.
2143	 */
2144	struct stripe_head *osh, *nsh;
2145	LIST_HEAD(newstripes);
2146	struct disk_info *ndisks;
 
2147	int err;
2148	struct kmem_cache *sc;
2149	int i;
2150	int hash, cnt;
2151
2152	if (newsize <= conf->pool_size)
2153		return 0; /* never bother to shrink */
2154
2155	err = md_allow_write(conf->mddev);
2156	if (err)
2157		return err;
2158
2159	/* Step 1 */
2160	sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
2161			       sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
2162			       0, 0, NULL);
2163	if (!sc)
2164		return -ENOMEM;
2165
2166	/* Need to ensure auto-resizing doesn't interfere */
2167	mutex_lock(&conf->cache_size_mutex);
2168
2169	for (i = conf->max_nr_stripes; i; i--) {
2170		nsh = alloc_stripe(sc, GFP_KERNEL);
2171		if (!nsh)
2172			break;
2173
2174		nsh->raid_conf = conf;
 
 
 
 
2175		list_add(&nsh->lru, &newstripes);
2176	}
2177	if (i) {
2178		/* didn't get enough, give up */
2179		while (!list_empty(&newstripes)) {
2180			nsh = list_entry(newstripes.next, struct stripe_head, lru);
2181			list_del(&nsh->lru);
2182			kmem_cache_free(sc, nsh);
2183		}
2184		kmem_cache_destroy(sc);
2185		mutex_unlock(&conf->cache_size_mutex);
2186		return -ENOMEM;
2187	}
2188	/* Step 2 - Must use GFP_NOIO now.
2189	 * OK, we have enough stripes, start collecting inactive
2190	 * stripes and copying them over
2191	 */
2192	hash = 0;
2193	cnt = 0;
2194	list_for_each_entry(nsh, &newstripes, lru) {
2195		lock_device_hash_lock(conf, hash);
2196		wait_event_cmd(conf->wait_for_stripe,
2197				    !list_empty(conf->inactive_list + hash),
2198				    unlock_device_hash_lock(conf, hash),
2199				    lock_device_hash_lock(conf, hash));
2200		osh = get_free_stripe(conf, hash);
2201		unlock_device_hash_lock(conf, hash);
2202
2203		for(i=0; i<conf->pool_size; i++) {
2204			nsh->dev[i].page = osh->dev[i].page;
2205			nsh->dev[i].orig_page = osh->dev[i].page;
2206		}
2207		nsh->hash_lock_index = hash;
2208		kmem_cache_free(conf->slab_cache, osh);
2209		cnt++;
2210		if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
2211		    !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
2212			hash++;
2213			cnt = 0;
2214		}
2215	}
2216	kmem_cache_destroy(conf->slab_cache);
2217
2218	/* Step 3.
2219	 * At this point, we are holding all the stripes so the array
2220	 * is completely stalled, so now is a good time to resize
2221	 * conf->disks and the scribble region
2222	 */
2223	ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
2224	if (ndisks) {
2225		for (i=0; i<conf->raid_disks; i++)
2226			ndisks[i] = conf->disks[i];
2227		kfree(conf->disks);
2228		conf->disks = ndisks;
2229	} else
2230		err = -ENOMEM;
2231
2232	mutex_unlock(&conf->cache_size_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2233	/* Step 4, return new stripes to service */
2234	while(!list_empty(&newstripes)) {
2235		nsh = list_entry(newstripes.next, struct stripe_head, lru);
2236		list_del_init(&nsh->lru);
2237
2238		for (i=conf->raid_disks; i < newsize; i++)
2239			if (nsh->dev[i].page == NULL) {
2240				struct page *p = alloc_page(GFP_NOIO);
2241				nsh->dev[i].page = p;
2242				nsh->dev[i].orig_page = p;
2243				if (!p)
2244					err = -ENOMEM;
2245			}
2246		raid5_release_stripe(nsh);
2247	}
2248	/* critical section pass, GFP_NOIO no longer needed */
2249
2250	conf->slab_cache = sc;
2251	conf->active_name = 1-conf->active_name;
2252	if (!err)
2253		conf->pool_size = newsize;
2254	return err;
2255}
2256
2257static int drop_one_stripe(struct r5conf *conf)
2258{
2259	struct stripe_head *sh;
2260	int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2261
2262	spin_lock_irq(conf->hash_locks + hash);
2263	sh = get_free_stripe(conf, hash);
2264	spin_unlock_irq(conf->hash_locks + hash);
2265	if (!sh)
2266		return 0;
2267	BUG_ON(atomic_read(&sh->count));
2268	shrink_buffers(sh);
2269	kmem_cache_free(conf->slab_cache, sh);
2270	atomic_dec(&conf->active_stripes);
2271	conf->max_nr_stripes--;
2272	return 1;
2273}
2274
2275static void shrink_stripes(struct r5conf *conf)
2276{
2277	while (conf->max_nr_stripes &&
2278	       drop_one_stripe(conf))
2279		;
2280
2281	kmem_cache_destroy(conf->slab_cache);
 
2282	conf->slab_cache = NULL;
2283}
2284
2285static void raid5_end_read_request(struct bio * bi)
2286{
2287	struct stripe_head *sh = bi->bi_private;
2288	struct r5conf *conf = sh->raid_conf;
2289	int disks = sh->disks, i;
 
2290	char b[BDEVNAME_SIZE];
2291	struct md_rdev *rdev = NULL;
2292	sector_t s;
2293
2294	for (i=0 ; i<disks; i++)
2295		if (bi == &sh->dev[i].req)
2296			break;
2297
2298	pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
2299		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
2300		bi->bi_error);
2301	if (i == disks) {
2302		BUG();
2303		return;
2304	}
2305	if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2306		/* If replacement finished while this request was outstanding,
2307		 * 'replacement' might be NULL already.
2308		 * In that case it moved down to 'rdev'.
2309		 * rdev is not removed until all requests are finished.
2310		 */
2311		rdev = conf->disks[i].replacement;
2312	if (!rdev)
2313		rdev = conf->disks[i].rdev;
2314
2315	if (use_new_offset(conf, sh))
2316		s = sh->sector + rdev->new_data_offset;
2317	else
2318		s = sh->sector + rdev->data_offset;
2319	if (!bi->bi_error) {
2320		set_bit(R5_UPTODATE, &sh->dev[i].flags);
2321		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2322			/* Note that this cannot happen on a
2323			 * replacement device.  We just fail those on
2324			 * any error
2325			 */
2326			printk_ratelimited(
2327				KERN_INFO
2328				"md/raid:%s: read error corrected"
2329				" (%lu sectors at %llu on %s)\n",
2330				mdname(conf->mddev), STRIPE_SECTORS,
2331				(unsigned long long)s,
 
2332				bdevname(rdev->bdev, b));
2333			atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
2334			clear_bit(R5_ReadError, &sh->dev[i].flags);
2335			clear_bit(R5_ReWrite, &sh->dev[i].flags);
2336		} else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2337			clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2338
2339		if (atomic_read(&rdev->read_errors))
2340			atomic_set(&rdev->read_errors, 0);
2341	} else {
2342		const char *bdn = bdevname(rdev->bdev, b);
2343		int retry = 0;
2344		int set_bad = 0;
2345
2346		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2347		atomic_inc(&rdev->read_errors);
2348		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2349			printk_ratelimited(
2350				KERN_WARNING
2351				"md/raid:%s: read error on replacement device "
2352				"(sector %llu on %s).\n",
2353				mdname(conf->mddev),
2354				(unsigned long long)s,
2355				bdn);
2356		else if (conf->mddev->degraded >= conf->max_degraded) {
2357			set_bad = 1;
2358			printk_ratelimited(
2359				KERN_WARNING
2360				"md/raid:%s: read error not correctable "
2361				"(sector %llu on %s).\n",
2362				mdname(conf->mddev),
2363				(unsigned long long)s,
 
2364				bdn);
2365		} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
2366			/* Oh, no!!! */
2367			set_bad = 1;
2368			printk_ratelimited(
2369				KERN_WARNING
2370				"md/raid:%s: read error NOT corrected!! "
2371				"(sector %llu on %s).\n",
2372				mdname(conf->mddev),
2373				(unsigned long long)s,
 
2374				bdn);
2375		} else if (atomic_read(&rdev->read_errors)
2376			 > conf->max_nr_stripes)
2377			printk(KERN_WARNING
2378			       "md/raid:%s: Too many read errors, failing device %s.\n",
2379			       mdname(conf->mddev), bdn);
2380		else
2381			retry = 1;
2382		if (set_bad && test_bit(In_sync, &rdev->flags)
2383		    && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2384			retry = 1;
2385		if (retry)
2386			if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2387				set_bit(R5_ReadError, &sh->dev[i].flags);
2388				clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2389			} else
2390				set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2391		else {
2392			clear_bit(R5_ReadError, &sh->dev[i].flags);
2393			clear_bit(R5_ReWrite, &sh->dev[i].flags);
2394			if (!(set_bad
2395			      && test_bit(In_sync, &rdev->flags)
2396			      && rdev_set_badblocks(
2397				      rdev, sh->sector, STRIPE_SECTORS, 0)))
2398				md_error(conf->mddev, rdev);
2399		}
2400	}
2401	rdev_dec_pending(rdev, conf->mddev);
2402	clear_bit(R5_LOCKED, &sh->dev[i].flags);
2403	set_bit(STRIPE_HANDLE, &sh->state);
2404	raid5_release_stripe(sh);
2405}
2406
2407static void raid5_end_write_request(struct bio *bi)
2408{
2409	struct stripe_head *sh = bi->bi_private;
2410	struct r5conf *conf = sh->raid_conf;
2411	int disks = sh->disks, i;
2412	struct md_rdev *uninitialized_var(rdev);
2413	sector_t first_bad;
2414	int bad_sectors;
2415	int replacement = 0;
2416
2417	for (i = 0 ; i < disks; i++) {
2418		if (bi == &sh->dev[i].req) {
2419			rdev = conf->disks[i].rdev;
2420			break;
2421		}
2422		if (bi == &sh->dev[i].rreq) {
2423			rdev = conf->disks[i].replacement;
2424			if (rdev)
2425				replacement = 1;
2426			else
2427				/* rdev was removed and 'replacement'
2428				 * replaced it.  rdev is not removed
2429				 * until all requests are finished.
2430				 */
2431				rdev = conf->disks[i].rdev;
2432			break;
2433		}
2434	}
2435	pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
2436		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
2437		bi->bi_error);
2438	if (i == disks) {
2439		BUG();
2440		return;
2441	}
2442
2443	if (replacement) {
2444		if (bi->bi_error)
2445			md_error(conf->mddev, rdev);
2446		else if (is_badblock(rdev, sh->sector,
2447				     STRIPE_SECTORS,
2448				     &first_bad, &bad_sectors))
2449			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2450	} else {
2451		if (bi->bi_error) {
2452			set_bit(STRIPE_DEGRADED, &sh->state);
2453			set_bit(WriteErrorSeen, &rdev->flags);
2454			set_bit(R5_WriteError, &sh->dev[i].flags);
2455			if (!test_and_set_bit(WantReplacement, &rdev->flags))
2456				set_bit(MD_RECOVERY_NEEDED,
2457					&rdev->mddev->recovery);
2458		} else if (is_badblock(rdev, sh->sector,
2459				       STRIPE_SECTORS,
2460				       &first_bad, &bad_sectors)) {
2461			set_bit(R5_MadeGood, &sh->dev[i].flags);
2462			if (test_bit(R5_ReadError, &sh->dev[i].flags))
2463				/* That was a successful write so make
2464				 * sure it looks like we already did
2465				 * a re-write.
2466				 */
2467				set_bit(R5_ReWrite, &sh->dev[i].flags);
2468		}
2469	}
2470	rdev_dec_pending(rdev, conf->mddev);
2471
2472	if (sh->batch_head && bi->bi_error && !replacement)
2473		set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2474
2475	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2476		clear_bit(R5_LOCKED, &sh->dev[i].flags);
2477	set_bit(STRIPE_HANDLE, &sh->state);
2478	raid5_release_stripe(sh);
2479
2480	if (sh->batch_head && sh != sh->batch_head)
2481		raid5_release_stripe(sh->batch_head);
2482}
2483
 
 
 
2484static void raid5_build_block(struct stripe_head *sh, int i, int previous)
2485{
2486	struct r5dev *dev = &sh->dev[i];
2487
2488	bio_init(&dev->req);
2489	dev->req.bi_io_vec = &dev->vec;
2490	dev->req.bi_max_vecs = 1;
2491	dev->req.bi_private = sh;
 
 
 
2492
2493	bio_init(&dev->rreq);
2494	dev->rreq.bi_io_vec = &dev->rvec;
2495	dev->rreq.bi_max_vecs = 1;
2496	dev->rreq.bi_private = sh;
2497
2498	dev->flags = 0;
2499	dev->sector = raid5_compute_blocknr(sh, i, previous);
2500}
2501
2502static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2503{
2504	char b[BDEVNAME_SIZE];
2505	struct r5conf *conf = mddev->private;
2506	unsigned long flags;
2507	pr_debug("raid456: error called\n");
2508
2509	spin_lock_irqsave(&conf->device_lock, flags);
2510	clear_bit(In_sync, &rdev->flags);
2511	mddev->degraded = calc_degraded(conf);
2512	spin_unlock_irqrestore(&conf->device_lock, flags);
2513	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2514
 
 
 
 
2515	set_bit(Blocked, &rdev->flags);
2516	set_bit(Faulty, &rdev->flags);
2517	set_bit(MD_CHANGE_DEVS, &mddev->flags);
2518	set_bit(MD_CHANGE_PENDING, &mddev->flags);
2519	printk(KERN_ALERT
2520	       "md/raid:%s: Disk failure on %s, disabling device.\n"
2521	       "md/raid:%s: Operation continuing on %d devices.\n",
2522	       mdname(mddev),
2523	       bdevname(rdev->bdev, b),
2524	       mdname(mddev),
2525	       conf->raid_disks - mddev->degraded);
2526}
2527
2528/*
2529 * Input: a 'big' sector number,
2530 * Output: index of the data and parity disk, and the sector # in them.
2531 */
2532sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2533			      int previous, int *dd_idx,
2534			      struct stripe_head *sh)
2535{
2536	sector_t stripe, stripe2;
2537	sector_t chunk_number;
2538	unsigned int chunk_offset;
2539	int pd_idx, qd_idx;
2540	int ddf_layout = 0;
2541	sector_t new_sector;
2542	int algorithm = previous ? conf->prev_algo
2543				 : conf->algorithm;
2544	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2545					 : conf->chunk_sectors;
2546	int raid_disks = previous ? conf->previous_raid_disks
2547				  : conf->raid_disks;
2548	int data_disks = raid_disks - conf->max_degraded;
2549
2550	/* First compute the information on this sector */
2551
2552	/*
2553	 * Compute the chunk number and the sector offset inside the chunk
2554	 */
2555	chunk_offset = sector_div(r_sector, sectors_per_chunk);
2556	chunk_number = r_sector;
2557
2558	/*
2559	 * Compute the stripe number
2560	 */
2561	stripe = chunk_number;
2562	*dd_idx = sector_div(stripe, data_disks);
2563	stripe2 = stripe;
2564	/*
2565	 * Select the parity disk based on the user selected algorithm.
2566	 */
2567	pd_idx = qd_idx = -1;
2568	switch(conf->level) {
2569	case 4:
2570		pd_idx = data_disks;
2571		break;
2572	case 5:
2573		switch (algorithm) {
2574		case ALGORITHM_LEFT_ASYMMETRIC:
2575			pd_idx = data_disks - sector_div(stripe2, raid_disks);
2576			if (*dd_idx >= pd_idx)
2577				(*dd_idx)++;
2578			break;
2579		case ALGORITHM_RIGHT_ASYMMETRIC:
2580			pd_idx = sector_div(stripe2, raid_disks);
2581			if (*dd_idx >= pd_idx)
2582				(*dd_idx)++;
2583			break;
2584		case ALGORITHM_LEFT_SYMMETRIC:
2585			pd_idx = data_disks - sector_div(stripe2, raid_disks);
2586			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2587			break;
2588		case ALGORITHM_RIGHT_SYMMETRIC:
2589			pd_idx = sector_div(stripe2, raid_disks);
2590			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2591			break;
2592		case ALGORITHM_PARITY_0:
2593			pd_idx = 0;
2594			(*dd_idx)++;
2595			break;
2596		case ALGORITHM_PARITY_N:
2597			pd_idx = data_disks;
2598			break;
2599		default:
2600			BUG();
2601		}
2602		break;
2603	case 6:
2604
2605		switch (algorithm) {
2606		case ALGORITHM_LEFT_ASYMMETRIC:
2607			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2608			qd_idx = pd_idx + 1;
2609			if (pd_idx == raid_disks-1) {
2610				(*dd_idx)++;	/* Q D D D P */
2611				qd_idx = 0;
2612			} else if (*dd_idx >= pd_idx)
2613				(*dd_idx) += 2; /* D D P Q D */
2614			break;
2615		case ALGORITHM_RIGHT_ASYMMETRIC:
2616			pd_idx = sector_div(stripe2, raid_disks);
2617			qd_idx = pd_idx + 1;
2618			if (pd_idx == raid_disks-1) {
2619				(*dd_idx)++;	/* Q D D D P */
2620				qd_idx = 0;
2621			} else if (*dd_idx >= pd_idx)
2622				(*dd_idx) += 2; /* D D P Q D */
2623			break;
2624		case ALGORITHM_LEFT_SYMMETRIC:
2625			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2626			qd_idx = (pd_idx + 1) % raid_disks;
2627			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2628			break;
2629		case ALGORITHM_RIGHT_SYMMETRIC:
2630			pd_idx = sector_div(stripe2, raid_disks);
2631			qd_idx = (pd_idx + 1) % raid_disks;
2632			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
2633			break;
2634
2635		case ALGORITHM_PARITY_0:
2636			pd_idx = 0;
2637			qd_idx = 1;
2638			(*dd_idx) += 2;
2639			break;
2640		case ALGORITHM_PARITY_N:
2641			pd_idx = data_disks;
2642			qd_idx = data_disks + 1;
2643			break;
2644
2645		case ALGORITHM_ROTATING_ZERO_RESTART:
2646			/* Exactly the same as RIGHT_ASYMMETRIC, but or
2647			 * of blocks for computing Q is different.
2648			 */
2649			pd_idx = sector_div(stripe2, raid_disks);
2650			qd_idx = pd_idx + 1;
2651			if (pd_idx == raid_disks-1) {
2652				(*dd_idx)++;	/* Q D D D P */
2653				qd_idx = 0;
2654			} else if (*dd_idx >= pd_idx)
2655				(*dd_idx) += 2; /* D D P Q D */
2656			ddf_layout = 1;
2657			break;
2658
2659		case ALGORITHM_ROTATING_N_RESTART:
2660			/* Same a left_asymmetric, by first stripe is
2661			 * D D D P Q  rather than
2662			 * Q D D D P
2663			 */
2664			stripe2 += 1;
2665			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2666			qd_idx = pd_idx + 1;
2667			if (pd_idx == raid_disks-1) {
2668				(*dd_idx)++;	/* Q D D D P */
2669				qd_idx = 0;
2670			} else if (*dd_idx >= pd_idx)
2671				(*dd_idx) += 2; /* D D P Q D */
2672			ddf_layout = 1;
2673			break;
2674
2675		case ALGORITHM_ROTATING_N_CONTINUE:
2676			/* Same as left_symmetric but Q is before P */
2677			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2678			qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
2679			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2680			ddf_layout = 1;
2681			break;
2682
2683		case ALGORITHM_LEFT_ASYMMETRIC_6:
2684			/* RAID5 left_asymmetric, with Q on last device */
2685			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2686			if (*dd_idx >= pd_idx)
2687				(*dd_idx)++;
2688			qd_idx = raid_disks - 1;
2689			break;
2690
2691		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2692			pd_idx = sector_div(stripe2, raid_disks-1);
2693			if (*dd_idx >= pd_idx)
2694				(*dd_idx)++;
2695			qd_idx = raid_disks - 1;
2696			break;
2697
2698		case ALGORITHM_LEFT_SYMMETRIC_6:
2699			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
2700			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2701			qd_idx = raid_disks - 1;
2702			break;
2703
2704		case ALGORITHM_RIGHT_SYMMETRIC_6:
2705			pd_idx = sector_div(stripe2, raid_disks-1);
2706			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
2707			qd_idx = raid_disks - 1;
2708			break;
2709
2710		case ALGORITHM_PARITY_0_6:
2711			pd_idx = 0;
2712			(*dd_idx)++;
2713			qd_idx = raid_disks - 1;
2714			break;
2715
2716		default:
2717			BUG();
2718		}
2719		break;
2720	}
2721
2722	if (sh) {
2723		sh->pd_idx = pd_idx;
2724		sh->qd_idx = qd_idx;
2725		sh->ddf_layout = ddf_layout;
2726	}
2727	/*
2728	 * Finally, compute the new sector number
2729	 */
2730	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
2731	return new_sector;
2732}
2733
2734sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
 
2735{
2736	struct r5conf *conf = sh->raid_conf;
2737	int raid_disks = sh->disks;
2738	int data_disks = raid_disks - conf->max_degraded;
2739	sector_t new_sector = sh->sector, check;
2740	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2741					 : conf->chunk_sectors;
2742	int algorithm = previous ? conf->prev_algo
2743				 : conf->algorithm;
2744	sector_t stripe;
2745	int chunk_offset;
2746	sector_t chunk_number;
2747	int dummy1, dd_idx = i;
2748	sector_t r_sector;
2749	struct stripe_head sh2;
2750
 
2751	chunk_offset = sector_div(new_sector, sectors_per_chunk);
2752	stripe = new_sector;
2753
2754	if (i == sh->pd_idx)
2755		return 0;
2756	switch(conf->level) {
2757	case 4: break;
2758	case 5:
2759		switch (algorithm) {
2760		case ALGORITHM_LEFT_ASYMMETRIC:
2761		case ALGORITHM_RIGHT_ASYMMETRIC:
2762			if (i > sh->pd_idx)
2763				i--;
2764			break;
2765		case ALGORITHM_LEFT_SYMMETRIC:
2766		case ALGORITHM_RIGHT_SYMMETRIC:
2767			if (i < sh->pd_idx)
2768				i += raid_disks;
2769			i -= (sh->pd_idx + 1);
2770			break;
2771		case ALGORITHM_PARITY_0:
2772			i -= 1;
2773			break;
2774		case ALGORITHM_PARITY_N:
2775			break;
2776		default:
2777			BUG();
2778		}
2779		break;
2780	case 6:
2781		if (i == sh->qd_idx)
2782			return 0; /* It is the Q disk */
2783		switch (algorithm) {
2784		case ALGORITHM_LEFT_ASYMMETRIC:
2785		case ALGORITHM_RIGHT_ASYMMETRIC:
2786		case ALGORITHM_ROTATING_ZERO_RESTART:
2787		case ALGORITHM_ROTATING_N_RESTART:
2788			if (sh->pd_idx == raid_disks-1)
2789				i--;	/* Q D D D P */
2790			else if (i > sh->pd_idx)
2791				i -= 2; /* D D P Q D */
2792			break;
2793		case ALGORITHM_LEFT_SYMMETRIC:
2794		case ALGORITHM_RIGHT_SYMMETRIC:
2795			if (sh->pd_idx == raid_disks-1)
2796				i--; /* Q D D D P */
2797			else {
2798				/* D D P Q D */
2799				if (i < sh->pd_idx)
2800					i += raid_disks;
2801				i -= (sh->pd_idx + 2);
2802			}
2803			break;
2804		case ALGORITHM_PARITY_0:
2805			i -= 2;
2806			break;
2807		case ALGORITHM_PARITY_N:
2808			break;
2809		case ALGORITHM_ROTATING_N_CONTINUE:
2810			/* Like left_symmetric, but P is before Q */
2811			if (sh->pd_idx == 0)
2812				i--;	/* P D D D Q */
2813			else {
2814				/* D D Q P D */
2815				if (i < sh->pd_idx)
2816					i += raid_disks;
2817				i -= (sh->pd_idx + 1);
2818			}
2819			break;
2820		case ALGORITHM_LEFT_ASYMMETRIC_6:
2821		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2822			if (i > sh->pd_idx)
2823				i--;
2824			break;
2825		case ALGORITHM_LEFT_SYMMETRIC_6:
2826		case ALGORITHM_RIGHT_SYMMETRIC_6:
2827			if (i < sh->pd_idx)
2828				i += data_disks + 1;
2829			i -= (sh->pd_idx + 1);
2830			break;
2831		case ALGORITHM_PARITY_0_6:
2832			i -= 1;
2833			break;
2834		default:
2835			BUG();
2836		}
2837		break;
2838	}
2839
2840	chunk_number = stripe * data_disks + i;
2841	r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2842
2843	check = raid5_compute_sector(conf, r_sector,
2844				     previous, &dummy1, &sh2);
2845	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2846		|| sh2.qd_idx != sh->qd_idx) {
2847		printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2848		       mdname(conf->mddev));
2849		return 0;
2850	}
2851	return r_sector;
2852}
2853
 
2854static void
2855schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2856			 int rcw, int expand)
2857{
2858	int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
2859	struct r5conf *conf = sh->raid_conf;
2860	int level = conf->level;
2861
2862	if (rcw) {
2863
2864		for (i = disks; i--; ) {
2865			struct r5dev *dev = &sh->dev[i];
2866
2867			if (dev->towrite) {
2868				set_bit(R5_LOCKED, &dev->flags);
2869				set_bit(R5_Wantdrain, &dev->flags);
2870				if (!expand)
2871					clear_bit(R5_UPTODATE, &dev->flags);
2872				s->locked++;
2873			}
2874		}
2875		/* if we are not expanding this is a proper write request, and
2876		 * there will be bios with new data to be drained into the
2877		 * stripe cache
2878		 */
2879		if (!expand) {
2880			if (!s->locked)
2881				/* False alarm, nothing to do */
2882				return;
2883			sh->reconstruct_state = reconstruct_state_drain_run;
2884			set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2885		} else
2886			sh->reconstruct_state = reconstruct_state_run;
2887
2888		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2889
 
 
 
 
 
 
 
 
 
 
 
2890		if (s->locked + conf->max_degraded == disks)
2891			if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2892				atomic_inc(&conf->pending_full_writes);
2893	} else {
 
2894		BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2895			test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2896		BUG_ON(level == 6 &&
2897			(!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
2898			   test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
 
 
2899
2900		for (i = disks; i--; ) {
2901			struct r5dev *dev = &sh->dev[i];
2902			if (i == pd_idx || i == qd_idx)
2903				continue;
2904
2905			if (dev->towrite &&
2906			    (test_bit(R5_UPTODATE, &dev->flags) ||
2907			     test_bit(R5_Wantcompute, &dev->flags))) {
2908				set_bit(R5_Wantdrain, &dev->flags);
2909				set_bit(R5_LOCKED, &dev->flags);
2910				clear_bit(R5_UPTODATE, &dev->flags);
2911				s->locked++;
2912			}
2913		}
2914		if (!s->locked)
2915			/* False alarm - nothing to do */
2916			return;
2917		sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2918		set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2919		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2920		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2921	}
2922
2923	/* keep the parity disk(s) locked while asynchronous operations
2924	 * are in flight
2925	 */
2926	set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2927	clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2928	s->locked++;
2929
2930	if (level == 6) {
2931		int qd_idx = sh->qd_idx;
2932		struct r5dev *dev = &sh->dev[qd_idx];
2933
2934		set_bit(R5_LOCKED, &dev->flags);
2935		clear_bit(R5_UPTODATE, &dev->flags);
2936		s->locked++;
2937	}
2938
2939	pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2940		__func__, (unsigned long long)sh->sector,
2941		s->locked, s->ops_request);
2942}
2943
2944/*
2945 * Each stripe/dev can have one or more bion attached.
2946 * toread/towrite point to the first in a chain.
2947 * The bi_next chain must be in order.
2948 */
2949static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
2950			  int forwrite, int previous)
2951{
2952	struct bio **bip;
2953	struct r5conf *conf = sh->raid_conf;
2954	int firstwrite=0;
2955
2956	pr_debug("adding bi b#%llu to stripe s#%llu\n",
2957		(unsigned long long)bi->bi_iter.bi_sector,
2958		(unsigned long long)sh->sector);
2959
2960	/*
2961	 * If several bio share a stripe. The bio bi_phys_segments acts as a
2962	 * reference count to avoid race. The reference count should already be
2963	 * increased before this function is called (for example, in
2964	 * raid5_make_request()), so other bio sharing this stripe will not free the
2965	 * stripe. If a stripe is owned by one stripe, the stripe lock will
2966	 * protect it.
2967	 */
2968	spin_lock_irq(&sh->stripe_lock);
2969	/* Don't allow new IO added to stripes in batch list */
2970	if (sh->batch_head)
2971		goto overlap;
2972	if (forwrite) {
2973		bip = &sh->dev[dd_idx].towrite;
2974		if (*bip == NULL)
2975			firstwrite = 1;
2976	} else
2977		bip = &sh->dev[dd_idx].toread;
2978	while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2979		if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2980			goto overlap;
2981		bip = & (*bip)->bi_next;
2982	}
2983	if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2984		goto overlap;
2985
2986	if (!forwrite || previous)
2987		clear_bit(STRIPE_BATCH_READY, &sh->state);
2988
2989	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2990	if (*bip)
2991		bi->bi_next = *bip;
2992	*bip = bi;
2993	raid5_inc_bi_active_stripes(bi);
2994
2995	if (forwrite) {
2996		/* check if page is covered */
2997		sector_t sector = sh->dev[dd_idx].sector;
2998		for (bi=sh->dev[dd_idx].towrite;
2999		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
3000			     bi && bi->bi_iter.bi_sector <= sector;
3001		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
3002			if (bio_end_sector(bi) >= sector)
3003				sector = bio_end_sector(bi);
3004		}
3005		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
3006			if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
3007				sh->overwrite_disks++;
3008	}
 
3009
3010	pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
3011		(unsigned long long)(*bip)->bi_iter.bi_sector,
3012		(unsigned long long)sh->sector, dd_idx);
3013
3014	if (conf->mddev->bitmap && firstwrite) {
3015		/* Cannot hold spinlock over bitmap_startwrite,
3016		 * but must ensure this isn't added to a batch until
3017		 * we have added to the bitmap and set bm_seq.
3018		 * So set STRIPE_BITMAP_PENDING to prevent
3019		 * batching.
3020		 * If multiple add_stripe_bio() calls race here they
3021		 * much all set STRIPE_BITMAP_PENDING.  So only the first one
3022		 * to complete "bitmap_startwrite" gets to set
3023		 * STRIPE_BIT_DELAY.  This is important as once a stripe
3024		 * is added to a batch, STRIPE_BIT_DELAY cannot be changed
3025		 * any more.
3026		 */
3027		set_bit(STRIPE_BITMAP_PENDING, &sh->state);
3028		spin_unlock_irq(&sh->stripe_lock);
3029		bitmap_startwrite(conf->mddev->bitmap, sh->sector,
3030				  STRIPE_SECTORS, 0);
3031		spin_lock_irq(&sh->stripe_lock);
3032		clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
3033		if (!sh->batch_head) {
3034			sh->bm_seq = conf->seq_flush+1;
3035			set_bit(STRIPE_BIT_DELAY, &sh->state);
3036		}
3037	}
3038	spin_unlock_irq(&sh->stripe_lock);
3039
3040	if (stripe_can_batch(sh))
3041		stripe_add_to_batch_list(conf, sh);
3042	return 1;
3043
3044 overlap:
3045	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
3046	spin_unlock_irq(&sh->stripe_lock);
3047	return 0;
3048}
3049
3050static void end_reshape(struct r5conf *conf);
3051
3052static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3053			    struct stripe_head *sh)
3054{
3055	int sectors_per_chunk =
3056		previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
3057	int dd_idx;
3058	int chunk_offset = sector_div(stripe, sectors_per_chunk);
3059	int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
3060
3061	raid5_compute_sector(conf,
3062			     stripe * (disks - conf->max_degraded)
3063			     *sectors_per_chunk + chunk_offset,
3064			     previous,
3065			     &dd_idx, sh);
3066}
3067
3068static void
3069handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3070				struct stripe_head_state *s, int disks,
3071				struct bio_list *return_bi)
3072{
3073	int i;
3074	BUG_ON(sh->batch_head);
3075	for (i = disks; i--; ) {
3076		struct bio *bi;
3077		int bitmap_end = 0;
3078
3079		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
3080			struct md_rdev *rdev;
3081			rcu_read_lock();
3082			rdev = rcu_dereference(conf->disks[i].rdev);
3083			if (rdev && test_bit(In_sync, &rdev->flags))
3084				atomic_inc(&rdev->nr_pending);
3085			else
3086				rdev = NULL;
3087			rcu_read_unlock();
3088			if (rdev) {
3089				if (!rdev_set_badblocks(
3090					    rdev,
3091					    sh->sector,
3092					    STRIPE_SECTORS, 0))
3093					md_error(conf->mddev, rdev);
3094				rdev_dec_pending(rdev, conf->mddev);
3095			}
3096		}
3097		spin_lock_irq(&sh->stripe_lock);
3098		/* fail all writes first */
3099		bi = sh->dev[i].towrite;
3100		sh->dev[i].towrite = NULL;
3101		sh->overwrite_disks = 0;
3102		spin_unlock_irq(&sh->stripe_lock);
3103		if (bi)
3104			bitmap_end = 1;
3105
3106		r5l_stripe_write_finished(sh);
3107
3108		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3109			wake_up(&conf->wait_for_overlap);
3110
3111		while (bi && bi->bi_iter.bi_sector <
3112			sh->dev[i].sector + STRIPE_SECTORS) {
3113			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3114
3115			bi->bi_error = -EIO;
3116			if (!raid5_dec_bi_active_stripes(bi)) {
3117				md_write_end(conf->mddev);
3118				bio_list_add(return_bi, bi);
 
3119			}
3120			bi = nextbi;
3121		}
3122		if (bitmap_end)
3123			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3124				STRIPE_SECTORS, 0, 0);
3125		bitmap_end = 0;
3126		/* and fail all 'written' */
3127		bi = sh->dev[i].written;
3128		sh->dev[i].written = NULL;
3129		if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
3130			WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
3131			sh->dev[i].page = sh->dev[i].orig_page;
3132		}
3133
3134		if (bi) bitmap_end = 1;
3135		while (bi && bi->bi_iter.bi_sector <
3136		       sh->dev[i].sector + STRIPE_SECTORS) {
3137			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3138
3139			bi->bi_error = -EIO;
3140			if (!raid5_dec_bi_active_stripes(bi)) {
3141				md_write_end(conf->mddev);
3142				bio_list_add(return_bi, bi);
 
3143			}
3144			bi = bi2;
3145		}
3146
3147		/* fail any reads if this device is non-operational and
3148		 * the data has not reached the cache yet.
3149		 */
3150		if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
3151		    s->failed > conf->max_degraded &&
3152		    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
3153		      test_bit(R5_ReadError, &sh->dev[i].flags))) {
3154			spin_lock_irq(&sh->stripe_lock);
3155			bi = sh->dev[i].toread;
3156			sh->dev[i].toread = NULL;
3157			spin_unlock_irq(&sh->stripe_lock);
3158			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3159				wake_up(&conf->wait_for_overlap);
3160			if (bi)
3161				s->to_read--;
3162			while (bi && bi->bi_iter.bi_sector <
3163			       sh->dev[i].sector + STRIPE_SECTORS) {
3164				struct bio *nextbi =
3165					r5_next_bio(bi, sh->dev[i].sector);
3166
3167				bi->bi_error = -EIO;
3168				if (!raid5_dec_bi_active_stripes(bi))
3169					bio_list_add(return_bi, bi);
 
3170				bi = nextbi;
3171			}
3172		}
 
3173		if (bitmap_end)
3174			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3175					STRIPE_SECTORS, 0, 0);
3176		/* If we were in the middle of a write the parity block might
3177		 * still be locked - so just clear all R5_LOCKED flags
3178		 */
3179		clear_bit(R5_LOCKED, &sh->dev[i].flags);
3180	}
3181	s->to_write = 0;
3182	s->written = 0;
3183
3184	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3185		if (atomic_dec_and_test(&conf->pending_full_writes))
3186			md_wakeup_thread(conf->mddev->thread);
3187}
3188
3189static void
3190handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
3191		   struct stripe_head_state *s)
3192{
3193	int abort = 0;
3194	int i;
3195
3196	BUG_ON(sh->batch_head);
3197	clear_bit(STRIPE_SYNCING, &sh->state);
3198	if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3199		wake_up(&conf->wait_for_overlap);
3200	s->syncing = 0;
3201	s->replacing = 0;
3202	/* There is nothing more to do for sync/check/repair.
3203	 * Don't even need to abort as that is handled elsewhere
3204	 * if needed, and not always wanted e.g. if there is a known
3205	 * bad block here.
3206	 * For recover/replace we need to record a bad block on all
3207	 * non-sync devices, or abort the recovery
3208	 */
3209	if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3210		/* During recovery devices cannot be removed, so
3211		 * locking and refcounting of rdevs is not needed
3212		 */
3213		for (i = 0; i < conf->raid_disks; i++) {
3214			struct md_rdev *rdev = conf->disks[i].rdev;
3215			if (rdev
3216			    && !test_bit(Faulty, &rdev->flags)
3217			    && !test_bit(In_sync, &rdev->flags)
3218			    && !rdev_set_badblocks(rdev, sh->sector,
3219						   STRIPE_SECTORS, 0))
3220				abort = 1;
3221			rdev = conf->disks[i].replacement;
3222			if (rdev
3223			    && !test_bit(Faulty, &rdev->flags)
3224			    && !test_bit(In_sync, &rdev->flags)
3225			    && !rdev_set_badblocks(rdev, sh->sector,
3226						   STRIPE_SECTORS, 0))
3227				abort = 1;
3228		}
3229		if (abort)
3230			conf->recovery_disabled =
3231				conf->mddev->recovery_disabled;
3232	}
3233	md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
3234}
3235
3236static int want_replace(struct stripe_head *sh, int disk_idx)
3237{
3238	struct md_rdev *rdev;
3239	int rv = 0;
3240	/* Doing recovery so rcu locking not required */
3241	rdev = sh->raid_conf->disks[disk_idx].replacement;
3242	if (rdev
3243	    && !test_bit(Faulty, &rdev->flags)
3244	    && !test_bit(In_sync, &rdev->flags)
3245	    && (rdev->recovery_offset <= sh->sector
3246		|| rdev->mddev->recovery_cp <= sh->sector))
3247		rv = 1;
3248
3249	return rv;
3250}
3251
3252/* fetch_block - checks the given member device to see if its data needs
3253 * to be read or computed to satisfy a request.
3254 *
3255 * Returns 1 when no more member devices need to be checked, otherwise returns
3256 * 0 to tell the loop in handle_stripe_fill to continue
3257 */
3258
3259static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3260			   int disk_idx, int disks)
3261{
3262	struct r5dev *dev = &sh->dev[disk_idx];
3263	struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
3264				  &sh->dev[s->failed_num[1]] };
3265	int i;
3266
3267
3268	if (test_bit(R5_LOCKED, &dev->flags) ||
3269	    test_bit(R5_UPTODATE, &dev->flags))
3270		/* No point reading this as we already have it or have
3271		 * decided to get it.
3272		 */
3273		return 0;
3274
3275	if (dev->toread ||
3276	    (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
3277		/* We need this block to directly satisfy a request */
3278		return 1;
3279
3280	if (s->syncing || s->expanding ||
3281	    (s->replacing && want_replace(sh, disk_idx)))
3282		/* When syncing, or expanding we read everything.
3283		 * When replacing, we need the replaced block.
3284		 */
3285		return 1;
3286
3287	if ((s->failed >= 1 && fdev[0]->toread) ||
3288	    (s->failed >= 2 && fdev[1]->toread))
3289		/* If we want to read from a failed device, then
3290		 * we need to actually read every other device.
3291		 */
3292		return 1;
3293
3294	/* Sometimes neither read-modify-write nor reconstruct-write
3295	 * cycles can work.  In those cases we read every block we
3296	 * can.  Then the parity-update is certain to have enough to
3297	 * work with.
3298	 * This can only be a problem when we need to write something,
3299	 * and some device has failed.  If either of those tests
3300	 * fail we need look no further.
3301	 */
3302	if (!s->failed || !s->to_write)
3303		return 0;
3304
3305	if (test_bit(R5_Insync, &dev->flags) &&
3306	    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3307		/* Pre-reads at not permitted until after short delay
3308		 * to gather multiple requests.  However if this
3309		 * device is no Insync, the block could only be be computed
3310		 * and there is no need to delay that.
3311		 */
3312		return 0;
3313
3314	for (i = 0; i < s->failed && i < 2; i++) {
3315		if (fdev[i]->towrite &&
3316		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3317		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3318			/* If we have a partial write to a failed
3319			 * device, then we will need to reconstruct
3320			 * the content of that device, so all other
3321			 * devices must be read.
3322			 */
3323			return 1;
3324	}
3325
3326	/* If we are forced to do a reconstruct-write, either because
3327	 * the current RAID6 implementation only supports that, or
3328	 * or because parity cannot be trusted and we are currently
3329	 * recovering it, there is extra need to be careful.
3330	 * If one of the devices that we would need to read, because
3331	 * it is not being overwritten (and maybe not written at all)
3332	 * is missing/faulty, then we need to read everything we can.
3333	 */
3334	if (sh->raid_conf->level != 6 &&
3335	    sh->sector < sh->raid_conf->mddev->recovery_cp)
3336		/* reconstruct-write isn't being forced */
3337		return 0;
3338	for (i = 0; i < s->failed && i < 2; i++) {
3339		if (s->failed_num[i] != sh->pd_idx &&
3340		    s->failed_num[i] != sh->qd_idx &&
3341		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3342		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3343			return 1;
3344	}
3345
3346	return 0;
3347}
3348
3349static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3350		       int disk_idx, int disks)
3351{
3352	struct r5dev *dev = &sh->dev[disk_idx];
 
 
3353
3354	/* is the data in this block needed, and can we get it? */
3355	if (need_this_block(sh, s, disk_idx, disks)) {
 
 
 
 
 
 
 
 
 
3356		/* we would like to get this block, possibly by computing it,
3357		 * otherwise read it if the backing disk is insync
3358		 */
3359		BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3360		BUG_ON(test_bit(R5_Wantread, &dev->flags));
3361		BUG_ON(sh->batch_head);
3362		if ((s->uptodate == disks - 1) &&
3363		    (s->failed && (disk_idx == s->failed_num[0] ||
3364				   disk_idx == s->failed_num[1]))) {
3365			/* have disk failed, and we're requested to fetch it;
3366			 * do compute it
3367			 */
3368			pr_debug("Computing stripe %llu block %d\n",
3369			       (unsigned long long)sh->sector, disk_idx);
3370			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3371			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3372			set_bit(R5_Wantcompute, &dev->flags);
3373			sh->ops.target = disk_idx;
3374			sh->ops.target2 = -1; /* no 2nd target */
3375			s->req_compute = 1;
3376			/* Careful: from this point on 'uptodate' is in the eye
3377			 * of raid_run_ops which services 'compute' operations
3378			 * before writes. R5_Wantcompute flags a block that will
3379			 * be R5_UPTODATE by the time it is needed for a
3380			 * subsequent operation.
3381			 */
3382			s->uptodate++;
3383			return 1;
3384		} else if (s->uptodate == disks-2 && s->failed >= 2) {
3385			/* Computing 2-failure is *very* expensive; only
3386			 * do it if failed >= 2
3387			 */
3388			int other;
3389			for (other = disks; other--; ) {
3390				if (other == disk_idx)
3391					continue;
3392				if (!test_bit(R5_UPTODATE,
3393				      &sh->dev[other].flags))
3394					break;
3395			}
3396			BUG_ON(other < 0);
3397			pr_debug("Computing stripe %llu blocks %d,%d\n",
3398			       (unsigned long long)sh->sector,
3399			       disk_idx, other);
3400			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3401			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3402			set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
3403			set_bit(R5_Wantcompute, &sh->dev[other].flags);
3404			sh->ops.target = disk_idx;
3405			sh->ops.target2 = other;
3406			s->uptodate += 2;
3407			s->req_compute = 1;
3408			return 1;
3409		} else if (test_bit(R5_Insync, &dev->flags)) {
3410			set_bit(R5_LOCKED, &dev->flags);
3411			set_bit(R5_Wantread, &dev->flags);
3412			s->locked++;
3413			pr_debug("Reading block %d (sync=%d)\n",
3414				disk_idx, s->syncing);
3415		}
3416	}
3417
3418	return 0;
3419}
3420
3421/**
3422 * handle_stripe_fill - read or compute data to satisfy pending requests.
3423 */
3424static void handle_stripe_fill(struct stripe_head *sh,
3425			       struct stripe_head_state *s,
3426			       int disks)
3427{
3428	int i;
3429
3430	/* look for blocks to read/compute, skip this if a compute
3431	 * is already in flight, or if the stripe contents are in the
3432	 * midst of changing due to a write
3433	 */
3434	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3435	    !sh->reconstruct_state)
3436		for (i = disks; i--; )
3437			if (fetch_block(sh, s, i, disks))
3438				break;
3439	set_bit(STRIPE_HANDLE, &sh->state);
3440}
3441
3442static void break_stripe_batch_list(struct stripe_head *head_sh,
3443				    unsigned long handle_flags);
3444/* handle_stripe_clean_event
3445 * any written block on an uptodate or failed drive can be returned.
3446 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3447 * never LOCKED, so we don't need to test 'failed' directly.
3448 */
3449static void handle_stripe_clean_event(struct r5conf *conf,
3450	struct stripe_head *sh, int disks, struct bio_list *return_bi)
3451{
3452	int i;
3453	struct r5dev *dev;
3454	int discard_pending = 0;
3455	struct stripe_head *head_sh = sh;
3456	bool do_endio = false;
3457
3458	for (i = disks; i--; )
3459		if (sh->dev[i].written) {
3460			dev = &sh->dev[i];
3461			if (!test_bit(R5_LOCKED, &dev->flags) &&
3462			    (test_bit(R5_UPTODATE, &dev->flags) ||
3463			     test_bit(R5_Discard, &dev->flags) ||
3464			     test_bit(R5_SkipCopy, &dev->flags))) {
3465				/* We can return any write requests */
3466				struct bio *wbi, *wbi2;
 
3467				pr_debug("Return write for disc %d\n", i);
3468				if (test_and_clear_bit(R5_Discard, &dev->flags))
3469					clear_bit(R5_UPTODATE, &dev->flags);
3470				if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
3471					WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
3472				}
3473				do_endio = true;
3474
3475returnbi:
3476				dev->page = dev->orig_page;
3477				wbi = dev->written;
3478				dev->written = NULL;
3479				while (wbi && wbi->bi_iter.bi_sector <
3480					dev->sector + STRIPE_SECTORS) {
3481					wbi2 = r5_next_bio(wbi, dev->sector);
3482					if (!raid5_dec_bi_active_stripes(wbi)) {
3483						md_write_end(conf->mddev);
3484						bio_list_add(return_bi, wbi);
 
3485					}
3486					wbi = wbi2;
3487				}
3488				bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3489						STRIPE_SECTORS,
 
 
 
 
 
3490					 !test_bit(STRIPE_DEGRADED, &sh->state),
3491						0);
3492				if (head_sh->batch_head) {
3493					sh = list_first_entry(&sh->batch_list,
3494							      struct stripe_head,
3495							      batch_list);
3496					if (sh != head_sh) {
3497						dev = &sh->dev[i];
3498						goto returnbi;
3499					}
3500				}
3501				sh = head_sh;
3502				dev = &sh->dev[i];
3503			} else if (test_bit(R5_Discard, &dev->flags))
3504				discard_pending = 1;
3505		}
3506
3507	r5l_stripe_write_finished(sh);
3508
3509	if (!discard_pending &&
3510	    test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
3511		int hash;
3512		clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
3513		clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3514		if (sh->qd_idx >= 0) {
3515			clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
3516			clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
3517		}
3518		/* now that discard is done we can proceed with any sync */
3519		clear_bit(STRIPE_DISCARD, &sh->state);
3520		/*
3521		 * SCSI discard will change some bio fields and the stripe has
3522		 * no updated data, so remove it from hash list and the stripe
3523		 * will be reinitialized
3524		 */
3525unhash:
3526		hash = sh->hash_lock_index;
3527		spin_lock_irq(conf->hash_locks + hash);
3528		remove_hash(sh);
3529		spin_unlock_irq(conf->hash_locks + hash);
3530		if (head_sh->batch_head) {
3531			sh = list_first_entry(&sh->batch_list,
3532					      struct stripe_head, batch_list);
3533			if (sh != head_sh)
3534					goto unhash;
3535		}
3536		sh = head_sh;
3537
3538		if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
3539			set_bit(STRIPE_HANDLE, &sh->state);
3540
3541	}
3542
3543	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3544		if (atomic_dec_and_test(&conf->pending_full_writes))
3545			md_wakeup_thread(conf->mddev->thread);
3546
3547	if (head_sh->batch_head && do_endio)
3548		break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
3549}
3550
3551static void handle_stripe_dirtying(struct r5conf *conf,
3552				   struct stripe_head *sh,
3553				   struct stripe_head_state *s,
3554				   int disks)
3555{
3556	int rmw = 0, rcw = 0, i;
3557	sector_t recovery_cp = conf->mddev->recovery_cp;
3558
3559	/* Check whether resync is now happening or should start.
3560	 * If yes, then the array is dirty (after unclean shutdown or
3561	 * initial creation), so parity in some stripes might be inconsistent.
3562	 * In this case, we need to always do reconstruct-write, to ensure
3563	 * that in case of drive failure or read-error correction, we
3564	 * generate correct data from the parity.
3565	 */
3566	if (conf->rmw_level == PARITY_DISABLE_RMW ||
3567	    (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
3568	     s->failed == 0)) {
3569		/* Calculate the real rcw later - for now make it
3570		 * look like rcw is cheaper
3571		 */
3572		rcw = 1; rmw = 2;
3573		pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
3574			 conf->rmw_level, (unsigned long long)recovery_cp,
3575			 (unsigned long long)sh->sector);
3576	} else for (i = disks; i--; ) {
3577		/* would I have to read this buffer for read_modify_write */
3578		struct r5dev *dev = &sh->dev[i];
3579		if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
3580		    !test_bit(R5_LOCKED, &dev->flags) &&
3581		    !(test_bit(R5_UPTODATE, &dev->flags) ||
3582		      test_bit(R5_Wantcompute, &dev->flags))) {
3583			if (test_bit(R5_Insync, &dev->flags))
3584				rmw++;
3585			else
3586				rmw += 2*disks;  /* cannot read it */
3587		}
3588		/* Would I have to read this buffer for reconstruct_write */
3589		if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3590		    i != sh->pd_idx && i != sh->qd_idx &&
3591		    !test_bit(R5_LOCKED, &dev->flags) &&
3592		    !(test_bit(R5_UPTODATE, &dev->flags) ||
3593		    test_bit(R5_Wantcompute, &dev->flags))) {
3594			if (test_bit(R5_Insync, &dev->flags))
3595				rcw++;
3596			else
3597				rcw += 2*disks;
3598		}
3599	}
3600	pr_debug("for sector %llu, rmw=%d rcw=%d\n",
3601		(unsigned long long)sh->sector, rmw, rcw);
3602	set_bit(STRIPE_HANDLE, &sh->state);
3603	if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
3604		/* prefer read-modify-write, but need to get some data */
3605		if (conf->mddev->queue)
3606			blk_add_trace_msg(conf->mddev->queue,
3607					  "raid5 rmw %llu %d",
3608					  (unsigned long long)sh->sector, rmw);
3609		for (i = disks; i--; ) {
3610			struct r5dev *dev = &sh->dev[i];
3611			if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
3612			    !test_bit(R5_LOCKED, &dev->flags) &&
3613			    !(test_bit(R5_UPTODATE, &dev->flags) ||
3614			    test_bit(R5_Wantcompute, &dev->flags)) &&
3615			    test_bit(R5_Insync, &dev->flags)) {
3616				if (test_bit(STRIPE_PREREAD_ACTIVE,
3617					     &sh->state)) {
3618					pr_debug("Read_old block %d for r-m-w\n",
3619						 i);
3620					set_bit(R5_LOCKED, &dev->flags);
3621					set_bit(R5_Wantread, &dev->flags);
3622					s->locked++;
3623				} else {
3624					set_bit(STRIPE_DELAYED, &sh->state);
3625					set_bit(STRIPE_HANDLE, &sh->state);
3626				}
3627			}
3628		}
3629	}
3630	if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
3631		/* want reconstruct write, but need to get some data */
3632		int qread =0;
3633		rcw = 0;
3634		for (i = disks; i--; ) {
3635			struct r5dev *dev = &sh->dev[i];
3636			if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3637			    i != sh->pd_idx && i != sh->qd_idx &&
3638			    !test_bit(R5_LOCKED, &dev->flags) &&
3639			    !(test_bit(R5_UPTODATE, &dev->flags) ||
3640			      test_bit(R5_Wantcompute, &dev->flags))) {
3641				rcw++;
3642				if (test_bit(R5_Insync, &dev->flags) &&
3643				    test_bit(STRIPE_PREREAD_ACTIVE,
3644					     &sh->state)) {
 
3645					pr_debug("Read_old block "
3646						"%d for Reconstruct\n", i);
3647					set_bit(R5_LOCKED, &dev->flags);
3648					set_bit(R5_Wantread, &dev->flags);
3649					s->locked++;
3650					qread++;
3651				} else {
3652					set_bit(STRIPE_DELAYED, &sh->state);
3653					set_bit(STRIPE_HANDLE, &sh->state);
3654				}
3655			}
3656		}
3657		if (rcw && conf->mddev->queue)
3658			blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
3659					  (unsigned long long)sh->sector,
3660					  rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
3661	}
3662
3663	if (rcw > disks && rmw > disks &&
3664	    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3665		set_bit(STRIPE_DELAYED, &sh->state);
3666
3667	/* now if nothing is locked, and if we have enough data,
3668	 * we can start a write request
3669	 */
3670	/* since handle_stripe can be called at any time we need to handle the
3671	 * case where a compute block operation has been submitted and then a
3672	 * subsequent call wants to start a write request.  raid_run_ops only
3673	 * handles the case where compute block and reconstruct are requested
3674	 * simultaneously.  If this is not the case then new writes need to be
3675	 * held off until the compute completes.
3676	 */
3677	if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
3678	    (s->locked == 0 && (rcw == 0 || rmw == 0) &&
3679	    !test_bit(STRIPE_BIT_DELAY, &sh->state)))
3680		schedule_reconstruction(sh, s, rcw == 0, 0);
3681}
3682
3683static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
3684				struct stripe_head_state *s, int disks)
3685{
3686	struct r5dev *dev = NULL;
3687
3688	BUG_ON(sh->batch_head);
3689	set_bit(STRIPE_HANDLE, &sh->state);
3690
3691	switch (sh->check_state) {
3692	case check_state_idle:
3693		/* start a new check operation if there are no failures */
3694		if (s->failed == 0) {
3695			BUG_ON(s->uptodate != disks);
3696			sh->check_state = check_state_run;
3697			set_bit(STRIPE_OP_CHECK, &s->ops_request);
3698			clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
3699			s->uptodate--;
3700			break;
3701		}
3702		dev = &sh->dev[s->failed_num[0]];
3703		/* fall through */
3704	case check_state_compute_result:
3705		sh->check_state = check_state_idle;
3706		if (!dev)
3707			dev = &sh->dev[sh->pd_idx];
3708
3709		/* check that a write has not made the stripe insync */
3710		if (test_bit(STRIPE_INSYNC, &sh->state))
3711			break;
3712
3713		/* either failed parity check, or recovery is happening */
3714		BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3715		BUG_ON(s->uptodate != disks);
3716
3717		set_bit(R5_LOCKED, &dev->flags);
3718		s->locked++;
3719		set_bit(R5_Wantwrite, &dev->flags);
3720
3721		clear_bit(STRIPE_DEGRADED, &sh->state);
3722		set_bit(STRIPE_INSYNC, &sh->state);
3723		break;
3724	case check_state_run:
3725		break; /* we will be called again upon completion */
3726	case check_state_check_result:
3727		sh->check_state = check_state_idle;
3728
3729		/* if a failure occurred during the check operation, leave
3730		 * STRIPE_INSYNC not set and let the stripe be handled again
3731		 */
3732		if (s->failed)
3733			break;
3734
3735		/* handle a successful check operation, if parity is correct
3736		 * we are done.  Otherwise update the mismatch count and repair
3737		 * parity if !MD_RECOVERY_CHECK
3738		 */
3739		if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
3740			/* parity is correct (on disc,
3741			 * not in buffer any more)
3742			 */
3743			set_bit(STRIPE_INSYNC, &sh->state);
3744		else {
3745			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3746			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3747				/* don't try to repair!! */
3748				set_bit(STRIPE_INSYNC, &sh->state);
3749			else {
3750				sh->check_state = check_state_compute_run;
3751				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3752				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3753				set_bit(R5_Wantcompute,
3754					&sh->dev[sh->pd_idx].flags);
3755				sh->ops.target = sh->pd_idx;
3756				sh->ops.target2 = -1;
3757				s->uptodate++;
3758			}
3759		}
3760		break;
3761	case check_state_compute_run:
3762		break;
3763	default:
3764		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3765		       __func__, sh->check_state,
3766		       (unsigned long long) sh->sector);
3767		BUG();
3768	}
3769}
3770
3771static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
 
3772				  struct stripe_head_state *s,
3773				  int disks)
3774{
3775	int pd_idx = sh->pd_idx;
3776	int qd_idx = sh->qd_idx;
3777	struct r5dev *dev;
3778
3779	BUG_ON(sh->batch_head);
3780	set_bit(STRIPE_HANDLE, &sh->state);
3781
3782	BUG_ON(s->failed > 2);
3783
3784	/* Want to check and possibly repair P and Q.
3785	 * However there could be one 'failed' device, in which
3786	 * case we can only check one of them, possibly using the
3787	 * other to generate missing data
3788	 */
3789
3790	switch (sh->check_state) {
3791	case check_state_idle:
3792		/* start a new check operation if there are < 2 failures */
3793		if (s->failed == s->q_failed) {
3794			/* The only possible failed device holds Q, so it
3795			 * makes sense to check P (If anything else were failed,
3796			 * we would have used P to recreate it).
3797			 */
3798			sh->check_state = check_state_run;
3799		}
3800		if (!s->q_failed && s->failed < 2) {
3801			/* Q is not failed, and we didn't use it to generate
3802			 * anything, so it makes sense to check it
3803			 */
3804			if (sh->check_state == check_state_run)
3805				sh->check_state = check_state_run_pq;
3806			else
3807				sh->check_state = check_state_run_q;
3808		}
3809
3810		/* discard potentially stale zero_sum_result */
3811		sh->ops.zero_sum_result = 0;
3812
3813		if (sh->check_state == check_state_run) {
3814			/* async_xor_zero_sum destroys the contents of P */
3815			clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3816			s->uptodate--;
3817		}
3818		if (sh->check_state >= check_state_run &&
3819		    sh->check_state <= check_state_run_pq) {
3820			/* async_syndrome_zero_sum preserves P and Q, so
3821			 * no need to mark them !uptodate here
3822			 */
3823			set_bit(STRIPE_OP_CHECK, &s->ops_request);
3824			break;
3825		}
3826
3827		/* we have 2-disk failure */
3828		BUG_ON(s->failed != 2);
3829		/* fall through */
3830	case check_state_compute_result:
3831		sh->check_state = check_state_idle;
3832
3833		/* check that a write has not made the stripe insync */
3834		if (test_bit(STRIPE_INSYNC, &sh->state))
3835			break;
3836
3837		/* now write out any block on a failed drive,
3838		 * or P or Q if they were recomputed
3839		 */
3840		BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
3841		if (s->failed == 2) {
3842			dev = &sh->dev[s->failed_num[1]];
3843			s->locked++;
3844			set_bit(R5_LOCKED, &dev->flags);
3845			set_bit(R5_Wantwrite, &dev->flags);
3846		}
3847		if (s->failed >= 1) {
3848			dev = &sh->dev[s->failed_num[0]];
3849			s->locked++;
3850			set_bit(R5_LOCKED, &dev->flags);
3851			set_bit(R5_Wantwrite, &dev->flags);
3852		}
3853		if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3854			dev = &sh->dev[pd_idx];
3855			s->locked++;
3856			set_bit(R5_LOCKED, &dev->flags);
3857			set_bit(R5_Wantwrite, &dev->flags);
3858		}
3859		if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3860			dev = &sh->dev[qd_idx];
3861			s->locked++;
3862			set_bit(R5_LOCKED, &dev->flags);
3863			set_bit(R5_Wantwrite, &dev->flags);
3864		}
3865		clear_bit(STRIPE_DEGRADED, &sh->state);
3866
3867		set_bit(STRIPE_INSYNC, &sh->state);
3868		break;
3869	case check_state_run:
3870	case check_state_run_q:
3871	case check_state_run_pq:
3872		break; /* we will be called again upon completion */
3873	case check_state_check_result:
3874		sh->check_state = check_state_idle;
3875
3876		/* handle a successful check operation, if parity is correct
3877		 * we are done.  Otherwise update the mismatch count and repair
3878		 * parity if !MD_RECOVERY_CHECK
3879		 */
3880		if (sh->ops.zero_sum_result == 0) {
3881			/* both parities are correct */
3882			if (!s->failed)
3883				set_bit(STRIPE_INSYNC, &sh->state);
3884			else {
3885				/* in contrast to the raid5 case we can validate
3886				 * parity, but still have a failure to write
3887				 * back
3888				 */
3889				sh->check_state = check_state_compute_result;
3890				/* Returning at this point means that we may go
3891				 * off and bring p and/or q uptodate again so
3892				 * we make sure to check zero_sum_result again
3893				 * to verify if p or q need writeback
3894				 */
3895			}
3896		} else {
3897			atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
3898			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
3899				/* don't try to repair!! */
3900				set_bit(STRIPE_INSYNC, &sh->state);
3901			else {
3902				int *target = &sh->ops.target;
3903
3904				sh->ops.target = -1;
3905				sh->ops.target2 = -1;
3906				sh->check_state = check_state_compute_run;
3907				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3908				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3909				if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
3910					set_bit(R5_Wantcompute,
3911						&sh->dev[pd_idx].flags);
3912					*target = pd_idx;
3913					target = &sh->ops.target2;
3914					s->uptodate++;
3915				}
3916				if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
3917					set_bit(R5_Wantcompute,
3918						&sh->dev[qd_idx].flags);
3919					*target = qd_idx;
3920					s->uptodate++;
3921				}
3922			}
3923		}
3924		break;
3925	case check_state_compute_run:
3926		break;
3927	default:
3928		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
3929		       __func__, sh->check_state,
3930		       (unsigned long long) sh->sector);
3931		BUG();
3932	}
3933}
3934
3935static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
3936{
3937	int i;
3938
3939	/* We have read all the blocks in this stripe and now we need to
3940	 * copy some of them into a target stripe for expand.
3941	 */
3942	struct dma_async_tx_descriptor *tx = NULL;
3943	BUG_ON(sh->batch_head);
3944	clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3945	for (i = 0; i < sh->disks; i++)
3946		if (i != sh->pd_idx && i != sh->qd_idx) {
3947			int dd_idx, j;
3948			struct stripe_head *sh2;
3949			struct async_submit_ctl submit;
3950
3951			sector_t bn = raid5_compute_blocknr(sh, i, 1);
3952			sector_t s = raid5_compute_sector(conf, bn, 0,
3953							  &dd_idx, NULL);
3954			sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
3955			if (sh2 == NULL)
3956				/* so far only the early blocks of this stripe
3957				 * have been requested.  When later blocks
3958				 * get requested, we will try again
3959				 */
3960				continue;
3961			if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
3962			   test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
3963				/* must have already done this block */
3964				raid5_release_stripe(sh2);
3965				continue;
3966			}
3967
3968			/* place all the copies on one channel */
3969			init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
3970			tx = async_memcpy(sh2->dev[dd_idx].page,
3971					  sh->dev[i].page, 0, 0, STRIPE_SIZE,
3972					  &submit);
3973
3974			set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
3975			set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
3976			for (j = 0; j < conf->raid_disks; j++)
3977				if (j != sh2->pd_idx &&
3978				    j != sh2->qd_idx &&
3979				    !test_bit(R5_Expanded, &sh2->dev[j].flags))
3980					break;
3981			if (j == conf->raid_disks) {
3982				set_bit(STRIPE_EXPAND_READY, &sh2->state);
3983				set_bit(STRIPE_HANDLE, &sh2->state);
3984			}
3985			raid5_release_stripe(sh2);
3986
3987		}
3988	/* done submitting copies, wait for them to complete */
3989	async_tx_quiesce(&tx);
 
 
 
3990}
3991
 
3992/*
3993 * handle_stripe - do things to a stripe.
3994 *
3995 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
3996 * state of various bits to see what needs to be done.
3997 * Possible results:
3998 *    return some read requests which now have data
3999 *    return some write requests which are safely on storage
4000 *    schedule a read on some buffers
4001 *    schedule a write of some buffers
4002 *    return confirmation of parity correctness
4003 *
 
 
 
4004 */
4005
4006static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4007{
4008	struct r5conf *conf = sh->raid_conf;
4009	int disks = sh->disks;
4010	struct r5dev *dev;
4011	int i;
4012	int do_recovery = 0;
4013
4014	memset(s, 0, sizeof(*s));
4015
4016	s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
4017	s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
 
4018	s->failed_num[0] = -1;
4019	s->failed_num[1] = -1;
4020	s->log_failed = r5l_log_disk_error(conf);
4021
4022	/* Now to look around and see what can be done */
4023	rcu_read_lock();
 
4024	for (i=disks; i--; ) {
4025		struct md_rdev *rdev;
4026		sector_t first_bad;
4027		int bad_sectors;
4028		int is_bad = 0;
4029
4030		dev = &sh->dev[i];
4031
4032		pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
4033			 i, dev->flags,
4034			 dev->toread, dev->towrite, dev->written);
4035		/* maybe we can reply to a read
4036		 *
4037		 * new wantfill requests are only permitted while
4038		 * ops_complete_biofill is guaranteed to be inactive
4039		 */
4040		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
4041		    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
4042			set_bit(R5_Wantfill, &dev->flags);
4043
4044		/* now count some things */
4045		if (test_bit(R5_LOCKED, &dev->flags))
4046			s->locked++;
4047		if (test_bit(R5_UPTODATE, &dev->flags))
4048			s->uptodate++;
4049		if (test_bit(R5_Wantcompute, &dev->flags)) {
4050			s->compute++;
4051			BUG_ON(s->compute > 2);
4052		}
4053
4054		if (test_bit(R5_Wantfill, &dev->flags))
4055			s->to_fill++;
4056		else if (dev->toread)
4057			s->to_read++;
4058		if (dev->towrite) {
4059			s->to_write++;
4060			if (!test_bit(R5_OVERWRITE, &dev->flags))
4061				s->non_overwrite++;
4062		}
4063		if (dev->written)
4064			s->written++;
4065		/* Prefer to use the replacement for reads, but only
4066		 * if it is recovered enough and has no bad blocks.
4067		 */
4068		rdev = rcu_dereference(conf->disks[i].replacement);
4069		if (rdev && !test_bit(Faulty, &rdev->flags) &&
4070		    rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
4071		    !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
4072				 &first_bad, &bad_sectors))
4073			set_bit(R5_ReadRepl, &dev->flags);
4074		else {
4075			if (rdev && !test_bit(Faulty, &rdev->flags))
4076				set_bit(R5_NeedReplace, &dev->flags);
4077			else
4078				clear_bit(R5_NeedReplace, &dev->flags);
4079			rdev = rcu_dereference(conf->disks[i].rdev);
4080			clear_bit(R5_ReadRepl, &dev->flags);
4081		}
4082		if (rdev && test_bit(Faulty, &rdev->flags))
4083			rdev = NULL;
4084		if (rdev) {
4085			is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
4086					     &first_bad, &bad_sectors);
4087			if (s->blocked_rdev == NULL
4088			    && (test_bit(Blocked, &rdev->flags)
4089				|| is_bad < 0)) {
4090				if (is_bad < 0)
4091					set_bit(BlockedBadBlocks,
4092						&rdev->flags);
4093				s->blocked_rdev = rdev;
4094				atomic_inc(&rdev->nr_pending);
4095			}
4096		}
4097		clear_bit(R5_Insync, &dev->flags);
4098		if (!rdev)
4099			/* Not in-sync */;
4100		else if (is_bad) {
4101			/* also not in-sync */
4102			if (!test_bit(WriteErrorSeen, &rdev->flags) &&
4103			    test_bit(R5_UPTODATE, &dev->flags)) {
4104				/* treat as in-sync, but with a read error
4105				 * which we can now try to correct
4106				 */
4107				set_bit(R5_Insync, &dev->flags);
4108				set_bit(R5_ReadError, &dev->flags);
4109			}
4110		} else if (test_bit(In_sync, &rdev->flags))
4111			set_bit(R5_Insync, &dev->flags);
4112		else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
4113			/* in sync if before recovery_offset */
4114			set_bit(R5_Insync, &dev->flags);
4115		else if (test_bit(R5_UPTODATE, &dev->flags) &&
4116			 test_bit(R5_Expanded, &dev->flags))
4117			/* If we've reshaped into here, we assume it is Insync.
4118			 * We will shortly update recovery_offset to make
4119			 * it official.
4120			 */
4121			set_bit(R5_Insync, &dev->flags);
4122
4123		if (test_bit(R5_WriteError, &dev->flags)) {
4124			/* This flag does not apply to '.replacement'
4125			 * only to .rdev, so make sure to check that*/
4126			struct md_rdev *rdev2 = rcu_dereference(
4127				conf->disks[i].rdev);
4128			if (rdev2 == rdev)
4129				clear_bit(R5_Insync, &dev->flags);
4130			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4131				s->handle_bad_blocks = 1;
4132				atomic_inc(&rdev2->nr_pending);
4133			} else
4134				clear_bit(R5_WriteError, &dev->flags);
4135		}
4136		if (test_bit(R5_MadeGood, &dev->flags)) {
4137			/* This flag does not apply to '.replacement'
4138			 * only to .rdev, so make sure to check that*/
4139			struct md_rdev *rdev2 = rcu_dereference(
4140				conf->disks[i].rdev);
4141			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4142				s->handle_bad_blocks = 1;
4143				atomic_inc(&rdev2->nr_pending);
4144			} else
4145				clear_bit(R5_MadeGood, &dev->flags);
4146		}
4147		if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
4148			struct md_rdev *rdev2 = rcu_dereference(
4149				conf->disks[i].replacement);
4150			if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4151				s->handle_bad_blocks = 1;
4152				atomic_inc(&rdev2->nr_pending);
4153			} else
4154				clear_bit(R5_MadeGoodRepl, &dev->flags);
4155		}
4156		if (!test_bit(R5_Insync, &dev->flags)) {
4157			/* The ReadError flag will just be confusing now */
4158			clear_bit(R5_ReadError, &dev->flags);
4159			clear_bit(R5_ReWrite, &dev->flags);
4160		}
4161		if (test_bit(R5_ReadError, &dev->flags))
4162			clear_bit(R5_Insync, &dev->flags);
4163		if (!test_bit(R5_Insync, &dev->flags)) {
4164			if (s->failed < 2)
4165				s->failed_num[s->failed] = i;
4166			s->failed++;
4167			if (rdev && !test_bit(Faulty, &rdev->flags))
4168				do_recovery = 1;
4169		}
4170	}
4171	if (test_bit(STRIPE_SYNCING, &sh->state)) {
4172		/* If there is a failed device being replaced,
4173		 *     we must be recovering.
4174		 * else if we are after recovery_cp, we must be syncing
4175		 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
4176		 * else we can only be replacing
4177		 * sync and recovery both need to read all devices, and so
4178		 * use the same flag.
4179		 */
4180		if (do_recovery ||
4181		    sh->sector >= conf->mddev->recovery_cp ||
4182		    test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4183			s->syncing = 1;
4184		else
4185			s->replacing = 1;
4186	}
4187	rcu_read_unlock();
4188}
4189
4190static int clear_batch_ready(struct stripe_head *sh)
4191{
4192	/* Return '1' if this is a member of batch, or
4193	 * '0' if it is a lone stripe or a head which can now be
4194	 * handled.
4195	 */
4196	struct stripe_head *tmp;
4197	if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
4198		return (sh->batch_head && sh->batch_head != sh);
4199	spin_lock(&sh->stripe_lock);
4200	if (!sh->batch_head) {
4201		spin_unlock(&sh->stripe_lock);
4202		return 0;
4203	}
4204
4205	/*
4206	 * this stripe could be added to a batch list before we check
4207	 * BATCH_READY, skips it
4208	 */
4209	if (sh->batch_head != sh) {
4210		spin_unlock(&sh->stripe_lock);
4211		return 1;
4212	}
4213	spin_lock(&sh->batch_lock);
4214	list_for_each_entry(tmp, &sh->batch_list, batch_list)
4215		clear_bit(STRIPE_BATCH_READY, &tmp->state);
4216	spin_unlock(&sh->batch_lock);
4217	spin_unlock(&sh->stripe_lock);
4218
4219	/*
4220	 * BATCH_READY is cleared, no new stripes can be added.
4221	 * batch_list can be accessed without lock
4222	 */
4223	return 0;
4224}
4225
4226static void break_stripe_batch_list(struct stripe_head *head_sh,
4227				    unsigned long handle_flags)
4228{
4229	struct stripe_head *sh, *next;
4230	int i;
4231	int do_wakeup = 0;
4232
4233	list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4234
4235		list_del_init(&sh->batch_list);
4236
4237		WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
4238					  (1 << STRIPE_SYNCING) |
4239					  (1 << STRIPE_REPLACED) |
4240					  (1 << STRIPE_DELAYED) |
4241					  (1 << STRIPE_BIT_DELAY) |
4242					  (1 << STRIPE_FULL_WRITE) |
4243					  (1 << STRIPE_BIOFILL_RUN) |
4244					  (1 << STRIPE_COMPUTE_RUN)  |
4245					  (1 << STRIPE_OPS_REQ_PENDING) |
4246					  (1 << STRIPE_DISCARD) |
4247					  (1 << STRIPE_BATCH_READY) |
4248					  (1 << STRIPE_BATCH_ERR) |
4249					  (1 << STRIPE_BITMAP_PENDING)),
4250			"stripe state: %lx\n", sh->state);
4251		WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
4252					      (1 << STRIPE_REPLACED)),
4253			"head stripe state: %lx\n", head_sh->state);
4254
4255		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4256					    (1 << STRIPE_PREREAD_ACTIVE) |
4257					    (1 << STRIPE_DEGRADED)),
4258			      head_sh->state & (1 << STRIPE_INSYNC));
4259
4260		sh->check_state = head_sh->check_state;
4261		sh->reconstruct_state = head_sh->reconstruct_state;
4262		for (i = 0; i < sh->disks; i++) {
4263			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
4264				do_wakeup = 1;
4265			sh->dev[i].flags = head_sh->dev[i].flags &
4266				(~((1 << R5_WriteError) | (1 << R5_Overlap)));
4267		}
4268		spin_lock_irq(&sh->stripe_lock);
4269		sh->batch_head = NULL;
4270		spin_unlock_irq(&sh->stripe_lock);
4271		if (handle_flags == 0 ||
4272		    sh->state & handle_flags)
4273			set_bit(STRIPE_HANDLE, &sh->state);
4274		raid5_release_stripe(sh);
4275	}
4276	spin_lock_irq(&head_sh->stripe_lock);
4277	head_sh->batch_head = NULL;
4278	spin_unlock_irq(&head_sh->stripe_lock);
4279	for (i = 0; i < head_sh->disks; i++)
4280		if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
4281			do_wakeup = 1;
4282	if (head_sh->state & handle_flags)
4283		set_bit(STRIPE_HANDLE, &head_sh->state);
4284
4285	if (do_wakeup)
4286		wake_up(&head_sh->raid_conf->wait_for_overlap);
4287}
4288
4289static void handle_stripe(struct stripe_head *sh)
4290{
4291	struct stripe_head_state s;
4292	struct r5conf *conf = sh->raid_conf;
4293	int i;
4294	int prexor;
4295	int disks = sh->disks;
4296	struct r5dev *pdev, *qdev;
4297
4298	clear_bit(STRIPE_HANDLE, &sh->state);
4299	if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
4300		/* already being handled, ensure it gets handled
4301		 * again when current action finishes */
4302		set_bit(STRIPE_HANDLE, &sh->state);
4303		return;
4304	}
4305
4306	if (clear_batch_ready(sh) ) {
4307		clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4308		return;
4309	}
4310
4311	if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4312		break_stripe_batch_list(sh, 0);
4313
4314	if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4315		spin_lock(&sh->stripe_lock);
4316		/* Cannot process 'sync' concurrently with 'discard' */
4317		if (!test_bit(STRIPE_DISCARD, &sh->state) &&
4318		    test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4319			set_bit(STRIPE_SYNCING, &sh->state);
4320			clear_bit(STRIPE_INSYNC, &sh->state);
4321			clear_bit(STRIPE_REPLACED, &sh->state);
4322		}
4323		spin_unlock(&sh->stripe_lock);
4324	}
4325	clear_bit(STRIPE_DELAYED, &sh->state);
4326
4327	pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4328		"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
4329	       (unsigned long long)sh->sector, sh->state,
4330	       atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
4331	       sh->check_state, sh->reconstruct_state);
4332
4333	analyse_stripe(sh, &s);
4334
4335	if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
4336		goto finish;
4337
4338	if (s.handle_bad_blocks) {
4339		set_bit(STRIPE_HANDLE, &sh->state);
4340		goto finish;
4341	}
4342
4343	if (unlikely(s.blocked_rdev)) {
4344		if (s.syncing || s.expanding || s.expanded ||
4345		    s.replacing || s.to_write || s.written) {
4346			set_bit(STRIPE_HANDLE, &sh->state);
4347			goto finish;
4348		}
4349		/* There is nothing for the blocked_rdev to block */
4350		rdev_dec_pending(s.blocked_rdev, conf->mddev);
4351		s.blocked_rdev = NULL;
4352	}
4353
4354	if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
4355		set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
4356		set_bit(STRIPE_BIOFILL_RUN, &sh->state);
4357	}
4358
4359	pr_debug("locked=%d uptodate=%d to_read=%d"
4360	       " to_write=%d failed=%d failed_num=%d,%d\n",
4361	       s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4362	       s.failed_num[0], s.failed_num[1]);
4363	/* check if the array has lost more than max_degraded devices and,
4364	 * if so, some requests might need to be failed.
4365	 */
4366	if (s.failed > conf->max_degraded || s.log_failed) {
4367		sh->check_state = 0;
4368		sh->reconstruct_state = 0;
4369		break_stripe_batch_list(sh, 0);
4370		if (s.to_read+s.to_write+s.written)
4371			handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
4372		if (s.syncing + s.replacing)
4373			handle_failed_sync(conf, sh, &s);
4374	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4375
4376	/* Now we check to see if any write operations have recently
4377	 * completed
4378	 */
4379	prexor = 0;
4380	if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
4381		prexor = 1;
4382	if (sh->reconstruct_state == reconstruct_state_drain_result ||
4383	    sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
4384		sh->reconstruct_state = reconstruct_state_idle;
4385
4386		/* All the 'written' buffers and the parity block are ready to
4387		 * be written back to disk
4388		 */
4389		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
4390		       !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
4391		BUG_ON(sh->qd_idx >= 0 &&
4392		       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
4393		       !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
4394		for (i = disks; i--; ) {
4395			struct r5dev *dev = &sh->dev[i];
4396			if (test_bit(R5_LOCKED, &dev->flags) &&
4397				(i == sh->pd_idx || i == sh->qd_idx ||
4398				 dev->written)) {
4399				pr_debug("Writing block %d\n", i);
4400				set_bit(R5_Wantwrite, &dev->flags);
4401				if (prexor)
4402					continue;
4403				if (s.failed > 1)
4404					continue;
4405				if (!test_bit(R5_Insync, &dev->flags) ||
4406				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
4407				     s.failed == 0))
4408					set_bit(STRIPE_INSYNC, &sh->state);
4409			}
4410		}
4411		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4412			s.dec_preread_active = 1;
4413	}
4414
4415	/*
4416	 * might be able to return some write requests if the parity blocks
4417	 * are safe, or on a failed drive
4418	 */
4419	pdev = &sh->dev[sh->pd_idx];
4420	s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
4421		|| (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
4422	qdev = &sh->dev[sh->qd_idx];
4423	s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
4424		|| (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
4425		|| conf->level < 6;
4426
4427	if (s.written &&
4428	    (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
4429			     && !test_bit(R5_LOCKED, &pdev->flags)
4430			     && (test_bit(R5_UPTODATE, &pdev->flags) ||
4431				 test_bit(R5_Discard, &pdev->flags))))) &&
4432	    (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
4433			     && !test_bit(R5_LOCKED, &qdev->flags)
4434			     && (test_bit(R5_UPTODATE, &qdev->flags) ||
4435				 test_bit(R5_Discard, &qdev->flags))))))
4436		handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
4437
4438	/* Now we might consider reading some blocks, either to check/generate
4439	 * parity, or to satisfy requests
4440	 * or to load a block that is being partially written.
4441	 */
4442	if (s.to_read || s.non_overwrite
4443	    || (conf->level == 6 && s.to_write && s.failed)
4444	    || (s.syncing && (s.uptodate + s.compute < disks))
4445	    || s.replacing
4446	    || s.expanding)
4447		handle_stripe_fill(sh, &s, disks);
4448
4449	/* Now to consider new write requests and what else, if anything
4450	 * should be read.  We do not handle new writes when:
4451	 * 1/ A 'write' operation (copy+xor) is already in flight.
4452	 * 2/ A 'check' operation is in flight, as it may clobber the parity
4453	 *    block.
4454	 */
4455	if (s.to_write && !sh->reconstruct_state && !sh->check_state)
4456		handle_stripe_dirtying(conf, sh, &s, disks);
4457
4458	/* maybe we need to check and possibly fix the parity for this stripe
4459	 * Any reads will already have been scheduled, so we just see if enough
4460	 * data is available.  The parity check is held off while parity
4461	 * dependent operations are in flight.
4462	 */
4463	if (sh->check_state ||
4464	    (s.syncing && s.locked == 0 &&
4465	     !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
4466	     !test_bit(STRIPE_INSYNC, &sh->state))) {
4467		if (conf->level == 6)
4468			handle_parity_checks6(conf, sh, &s, disks);
4469		else
4470			handle_parity_checks5(conf, sh, &s, disks);
4471	}
4472
4473	if ((s.replacing || s.syncing) && s.locked == 0
4474	    && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
4475	    && !test_bit(STRIPE_REPLACED, &sh->state)) {
4476		/* Write out to replacement devices where possible */
4477		for (i = 0; i < conf->raid_disks; i++)
4478			if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
4479				WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
4480				set_bit(R5_WantReplace, &sh->dev[i].flags);
4481				set_bit(R5_LOCKED, &sh->dev[i].flags);
4482				s.locked++;
4483			}
4484		if (s.replacing)
4485			set_bit(STRIPE_INSYNC, &sh->state);
4486		set_bit(STRIPE_REPLACED, &sh->state);
4487	}
4488	if ((s.syncing || s.replacing) && s.locked == 0 &&
4489	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
4490	    test_bit(STRIPE_INSYNC, &sh->state)) {
4491		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
4492		clear_bit(STRIPE_SYNCING, &sh->state);
4493		if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
4494			wake_up(&conf->wait_for_overlap);
4495	}
4496
4497	/* If the failed drives are just a ReadError, then we might need
4498	 * to progress the repair/check process
4499	 */
4500	if (s.failed <= conf->max_degraded && !conf->mddev->ro)
4501		for (i = 0; i < s.failed; i++) {
4502			struct r5dev *dev = &sh->dev[s.failed_num[i]];
4503			if (test_bit(R5_ReadError, &dev->flags)
4504			    && !test_bit(R5_LOCKED, &dev->flags)
4505			    && test_bit(R5_UPTODATE, &dev->flags)
4506				) {
4507				if (!test_bit(R5_ReWrite, &dev->flags)) {
4508					set_bit(R5_Wantwrite, &dev->flags);
4509					set_bit(R5_ReWrite, &dev->flags);
4510					set_bit(R5_LOCKED, &dev->flags);
4511					s.locked++;
4512				} else {
4513					/* let's read it back */
4514					set_bit(R5_Wantread, &dev->flags);
4515					set_bit(R5_LOCKED, &dev->flags);
4516					s.locked++;
4517				}
4518			}
4519		}
4520
 
4521	/* Finish reconstruct operations initiated by the expansion process */
4522	if (sh->reconstruct_state == reconstruct_state_result) {
4523		struct stripe_head *sh_src
4524			= raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
4525		if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
4526			/* sh cannot be written until sh_src has been read.
4527			 * so arrange for sh to be delayed a little
4528			 */
4529			set_bit(STRIPE_DELAYED, &sh->state);
4530			set_bit(STRIPE_HANDLE, &sh->state);
4531			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
4532					      &sh_src->state))
4533				atomic_inc(&conf->preread_active_stripes);
4534			raid5_release_stripe(sh_src);
4535			goto finish;
4536		}
4537		if (sh_src)
4538			raid5_release_stripe(sh_src);
4539
4540		sh->reconstruct_state = reconstruct_state_idle;
4541		clear_bit(STRIPE_EXPANDING, &sh->state);
4542		for (i = conf->raid_disks; i--; ) {
4543			set_bit(R5_Wantwrite, &sh->dev[i].flags);
4544			set_bit(R5_LOCKED, &sh->dev[i].flags);
4545			s.locked++;
4546		}
4547	}
4548
4549	if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
4550	    !sh->reconstruct_state) {
4551		/* Need to write out all blocks after computing parity */
4552		sh->disks = conf->raid_disks;
4553		stripe_set_idx(sh->sector, conf, 0, sh);
4554		schedule_reconstruction(sh, &s, 1, 1);
4555	} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
4556		clear_bit(STRIPE_EXPAND_READY, &sh->state);
4557		atomic_dec(&conf->reshape_stripes);
4558		wake_up(&conf->wait_for_overlap);
4559		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
4560	}
4561
4562	if (s.expanding && s.locked == 0 &&
4563	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
4564		handle_stripe_expansion(conf, sh);
4565
4566finish:
4567	/* wait for this device to become unblocked */
4568	if (unlikely(s.blocked_rdev)) {
4569		if (conf->mddev->external)
4570			md_wait_for_blocked_rdev(s.blocked_rdev,
4571						 conf->mddev);
4572		else
4573			/* Internal metadata will immediately
4574			 * be written by raid5d, so we don't
4575			 * need to wait here.
4576			 */
4577			rdev_dec_pending(s.blocked_rdev,
4578					 conf->mddev);
4579	}
4580
4581	if (s.handle_bad_blocks)
4582		for (i = disks; i--; ) {
4583			struct md_rdev *rdev;
4584			struct r5dev *dev = &sh->dev[i];
4585			if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
4586				/* We own a safe reference to the rdev */
4587				rdev = conf->disks[i].rdev;
4588				if (!rdev_set_badblocks(rdev, sh->sector,
4589							STRIPE_SECTORS, 0))
4590					md_error(conf->mddev, rdev);
4591				rdev_dec_pending(rdev, conf->mddev);
4592			}
4593			if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
4594				rdev = conf->disks[i].rdev;
4595				rdev_clear_badblocks(rdev, sh->sector,
4596						     STRIPE_SECTORS, 0);
4597				rdev_dec_pending(rdev, conf->mddev);
4598			}
4599			if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
4600				rdev = conf->disks[i].replacement;
4601				if (!rdev)
4602					/* rdev have been moved down */
4603					rdev = conf->disks[i].rdev;
4604				rdev_clear_badblocks(rdev, sh->sector,
4605						     STRIPE_SECTORS, 0);
4606				rdev_dec_pending(rdev, conf->mddev);
4607			}
4608		}
4609
4610	if (s.ops_request)
4611		raid_run_ops(sh, s.ops_request);
4612
4613	ops_run_io(sh, &s);
4614
4615	if (s.dec_preread_active) {
4616		/* We delay this until after ops_run_io so that if make_request
4617		 * is waiting on a flush, it won't continue until the writes
4618		 * have actually been submitted.
4619		 */
4620		atomic_dec(&conf->preread_active_stripes);
4621		if (atomic_read(&conf->preread_active_stripes) <
4622		    IO_THRESHOLD)
4623			md_wakeup_thread(conf->mddev->thread);
4624	}
4625
4626	if (!bio_list_empty(&s.return_bi)) {
4627		if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
4628			spin_lock_irq(&conf->device_lock);
4629			bio_list_merge(&conf->return_bi, &s.return_bi);
4630			spin_unlock_irq(&conf->device_lock);
4631			md_wakeup_thread(conf->mddev->thread);
4632		} else
4633			return_io(&s.return_bi);
4634	}
4635
4636	clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
4637}
4638
4639static void raid5_activate_delayed(struct r5conf *conf)
4640{
4641	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
4642		while (!list_empty(&conf->delayed_list)) {
4643			struct list_head *l = conf->delayed_list.next;
4644			struct stripe_head *sh;
4645			sh = list_entry(l, struct stripe_head, lru);
4646			list_del_init(l);
4647			clear_bit(STRIPE_DELAYED, &sh->state);
4648			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4649				atomic_inc(&conf->preread_active_stripes);
4650			list_add_tail(&sh->lru, &conf->hold_list);
4651			raid5_wakeup_stripe_thread(sh);
4652		}
4653	}
4654}
4655
4656static void activate_bit_delay(struct r5conf *conf,
4657	struct list_head *temp_inactive_list)
4658{
4659	/* device_lock is held */
4660	struct list_head head;
4661	list_add(&head, &conf->bitmap_list);
4662	list_del_init(&conf->bitmap_list);
4663	while (!list_empty(&head)) {
4664		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
4665		int hash;
4666		list_del_init(&sh->lru);
4667		atomic_inc(&sh->count);
4668		hash = sh->hash_lock_index;
4669		__release_stripe(conf, sh, &temp_inactive_list[hash]);
4670	}
4671}
4672
4673static int raid5_congested(struct mddev *mddev, int bits)
4674{
4675	struct r5conf *conf = mddev->private;
4676
4677	/* No difference between reads and writes.  Just check
4678	 * how busy the stripe_cache is
4679	 */
4680
4681	if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
4682		return 1;
4683	if (conf->quiesce)
4684		return 1;
4685	if (atomic_read(&conf->empty_inactive_list_nr))
4686		return 1;
4687
4688	return 0;
4689}
 
4690
4691static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4692{
4693	struct r5conf *conf = mddev->private;
4694	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4695	unsigned int chunk_sectors;
4696	unsigned int bio_sectors = bio_sectors(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4697
4698	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
 
 
 
 
 
 
 
4699	return  chunk_sectors >=
4700		((sector & (chunk_sectors - 1)) + bio_sectors);
4701}
4702
4703/*
4704 *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
4705 *  later sampled by raid5d.
4706 */
4707static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
4708{
4709	unsigned long flags;
4710
4711	spin_lock_irqsave(&conf->device_lock, flags);
4712
4713	bi->bi_next = conf->retry_read_aligned_list;
4714	conf->retry_read_aligned_list = bi;
4715
4716	spin_unlock_irqrestore(&conf->device_lock, flags);
4717	md_wakeup_thread(conf->mddev->thread);
4718}
4719
4720static struct bio *remove_bio_from_retry(struct r5conf *conf)
 
4721{
4722	struct bio *bi;
4723
4724	bi = conf->retry_read_aligned;
4725	if (bi) {
4726		conf->retry_read_aligned = NULL;
4727		return bi;
4728	}
4729	bi = conf->retry_read_aligned_list;
4730	if(bi) {
4731		conf->retry_read_aligned_list = bi->bi_next;
4732		bi->bi_next = NULL;
4733		/*
4734		 * this sets the active strip count to 1 and the processed
4735		 * strip count to zero (upper 8 bits)
4736		 */
4737		raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
4738	}
4739
4740	return bi;
4741}
4742
 
4743/*
4744 *  The "raid5_align_endio" should check if the read succeeded and if it
4745 *  did, call bio_endio on the original bio (having bio_put the new bio
4746 *  first).
4747 *  If the read failed..
4748 */
4749static void raid5_align_endio(struct bio *bi)
4750{
4751	struct bio* raid_bi  = bi->bi_private;
4752	struct mddev *mddev;
4753	struct r5conf *conf;
4754	struct md_rdev *rdev;
4755	int error = bi->bi_error;
4756
4757	bio_put(bi);
4758
4759	rdev = (void*)raid_bi->bi_next;
4760	raid_bi->bi_next = NULL;
4761	mddev = rdev->mddev;
4762	conf = mddev->private;
4763
4764	rdev_dec_pending(rdev, conf->mddev);
4765
4766	if (!error) {
4767		trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
4768					 raid_bi, 0);
4769		bio_endio(raid_bi);
4770		if (atomic_dec_and_test(&conf->active_aligned_reads))
4771			wake_up(&conf->wait_for_quiescent);
4772		return;
4773	}
4774
 
4775	pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
4776
4777	add_bio_to_retry(raid_bi, conf);
4778}
4779
4780static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
4781{
4782	struct r5conf *conf = mddev->private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4783	int dd_idx;
4784	struct bio* align_bi;
4785	struct md_rdev *rdev;
4786	sector_t end_sector;
4787
4788	if (!in_chunk_boundary(mddev, raid_bio)) {
4789		pr_debug("%s: non aligned\n", __func__);
4790		return 0;
4791	}
4792	/*
4793	 * use bio_clone_mddev to make a copy of the bio
4794	 */
4795	align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
4796	if (!align_bi)
4797		return 0;
4798	/*
4799	 *   set bi_end_io to a new function, and set bi_private to the
4800	 *     original bio.
4801	 */
4802	align_bi->bi_end_io  = raid5_align_endio;
4803	align_bi->bi_private = raid_bio;
4804	/*
4805	 *	compute position
4806	 */
4807	align_bi->bi_iter.bi_sector =
4808		raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4809				     0, &dd_idx, NULL);
4810
4811	end_sector = bio_end_sector(align_bi);
4812	rcu_read_lock();
4813	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
4814	if (!rdev || test_bit(Faulty, &rdev->flags) ||
4815	    rdev->recovery_offset < end_sector) {
4816		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
4817		if (rdev &&
4818		    (test_bit(Faulty, &rdev->flags) ||
4819		    !(test_bit(In_sync, &rdev->flags) ||
4820		      rdev->recovery_offset >= end_sector)))
4821			rdev = NULL;
4822	}
4823	if (rdev) {
4824		sector_t first_bad;
4825		int bad_sectors;
4826
4827		atomic_inc(&rdev->nr_pending);
4828		rcu_read_unlock();
4829		raid_bio->bi_next = (void*)rdev;
4830		align_bi->bi_bdev =  rdev->bdev;
4831		bio_clear_flag(align_bi, BIO_SEG_VALID);
 
4832
4833		if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
4834				bio_sectors(align_bi),
4835				&first_bad, &bad_sectors)) {
 
4836			bio_put(align_bi);
4837			rdev_dec_pending(rdev, mddev);
4838			return 0;
4839		}
4840
4841		/* No reshape active, so we can trust rdev->data_offset */
4842		align_bi->bi_iter.bi_sector += rdev->data_offset;
4843
4844		spin_lock_irq(&conf->device_lock);
4845		wait_event_lock_irq(conf->wait_for_quiescent,
4846				    conf->quiesce == 0,
4847				    conf->device_lock);
4848		atomic_inc(&conf->active_aligned_reads);
4849		spin_unlock_irq(&conf->device_lock);
4850
4851		if (mddev->gendisk)
4852			trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4853					      align_bi, disk_devt(mddev->gendisk),
4854					      raid_bio->bi_iter.bi_sector);
4855		generic_make_request(align_bi);
4856		return 1;
4857	} else {
4858		rcu_read_unlock();
4859		bio_put(align_bi);
4860		return 0;
4861	}
4862}
4863
4864static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
4865{
4866	struct bio *split;
4867
4868	do {
4869		sector_t sector = raid_bio->bi_iter.bi_sector;
4870		unsigned chunk_sects = mddev->chunk_sectors;
4871		unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
4872
4873		if (sectors < bio_sectors(raid_bio)) {
4874			split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
4875			bio_chain(split, raid_bio);
4876		} else
4877			split = raid_bio;
4878
4879		if (!raid5_read_one_chunk(mddev, split)) {
4880			if (split != raid_bio)
4881				generic_make_request(raid_bio);
4882			return split;
4883		}
4884	} while (split != raid_bio);
4885
4886	return NULL;
4887}
4888
4889/* __get_priority_stripe - get the next stripe to process
4890 *
4891 * Full stripe writes are allowed to pass preread active stripes up until
4892 * the bypass_threshold is exceeded.  In general the bypass_count
4893 * increments when the handle_list is handled before the hold_list; however, it
4894 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
4895 * stripe with in flight i/o.  The bypass_count will be reset when the
4896 * head of the hold_list has changed, i.e. the head was promoted to the
4897 * handle_list.
4898 */
4899static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
4900{
4901	struct stripe_head *sh = NULL, *tmp;
4902	struct list_head *handle_list = NULL;
4903	struct r5worker_group *wg = NULL;
4904
4905	if (conf->worker_cnt_per_group == 0) {
4906		handle_list = &conf->handle_list;
4907	} else if (group != ANY_GROUP) {
4908		handle_list = &conf->worker_groups[group].handle_list;
4909		wg = &conf->worker_groups[group];
4910	} else {
4911		int i;
4912		for (i = 0; i < conf->group_cnt; i++) {
4913			handle_list = &conf->worker_groups[i].handle_list;
4914			wg = &conf->worker_groups[i];
4915			if (!list_empty(handle_list))
4916				break;
4917		}
4918	}
4919
4920	pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
4921		  __func__,
4922		  list_empty(handle_list) ? "empty" : "busy",
4923		  list_empty(&conf->hold_list) ? "empty" : "busy",
4924		  atomic_read(&conf->pending_full_writes), conf->bypass_count);
4925
4926	if (!list_empty(handle_list)) {
4927		sh = list_entry(handle_list->next, typeof(*sh), lru);
4928
4929		if (list_empty(&conf->hold_list))
4930			conf->bypass_count = 0;
4931		else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
4932			if (conf->hold_list.next == conf->last_hold)
4933				conf->bypass_count++;
4934			else {
4935				conf->last_hold = conf->hold_list.next;
4936				conf->bypass_count -= conf->bypass_threshold;
4937				if (conf->bypass_count < 0)
4938					conf->bypass_count = 0;
4939			}
4940		}
4941	} else if (!list_empty(&conf->hold_list) &&
4942		   ((conf->bypass_threshold &&
4943		     conf->bypass_count > conf->bypass_threshold) ||
4944		    atomic_read(&conf->pending_full_writes) == 0)) {
4945
4946		list_for_each_entry(tmp, &conf->hold_list,  lru) {
4947			if (conf->worker_cnt_per_group == 0 ||
4948			    group == ANY_GROUP ||
4949			    !cpu_online(tmp->cpu) ||
4950			    cpu_to_group(tmp->cpu) == group) {
4951				sh = tmp;
4952				break;
4953			}
4954		}
4955
4956		if (sh) {
4957			conf->bypass_count -= conf->bypass_threshold;
4958			if (conf->bypass_count < 0)
4959				conf->bypass_count = 0;
4960		}
4961		wg = NULL;
4962	}
4963
4964	if (!sh)
4965		return NULL;
4966
4967	if (wg) {
4968		wg->stripes_cnt--;
4969		sh->group = NULL;
4970	}
4971	list_del_init(&sh->lru);
4972	BUG_ON(atomic_inc_return(&sh->count) != 1);
 
4973	return sh;
4974}
4975
4976struct raid5_plug_cb {
4977	struct blk_plug_cb	cb;
4978	struct list_head	list;
4979	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
4980};
4981
4982static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
4983{
4984	struct raid5_plug_cb *cb = container_of(
4985		blk_cb, struct raid5_plug_cb, cb);
4986	struct stripe_head *sh;
4987	struct mddev *mddev = cb->cb.data;
4988	struct r5conf *conf = mddev->private;
4989	int cnt = 0;
4990	int hash;
4991
4992	if (cb->list.next && !list_empty(&cb->list)) {
4993		spin_lock_irq(&conf->device_lock);
4994		while (!list_empty(&cb->list)) {
4995			sh = list_first_entry(&cb->list, struct stripe_head, lru);
4996			list_del_init(&sh->lru);
4997			/*
4998			 * avoid race release_stripe_plug() sees
4999			 * STRIPE_ON_UNPLUG_LIST clear but the stripe
5000			 * is still in our list
5001			 */
5002			smp_mb__before_atomic();
5003			clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
5004			/*
5005			 * STRIPE_ON_RELEASE_LIST could be set here. In that
5006			 * case, the count is always > 1 here
5007			 */
5008			hash = sh->hash_lock_index;
5009			__release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
5010			cnt++;
5011		}
5012		spin_unlock_irq(&conf->device_lock);
5013	}
5014	release_inactive_stripe_list(conf, cb->temp_inactive_list,
5015				     NR_STRIPE_HASH_LOCKS);
5016	if (mddev->queue)
5017		trace_block_unplug(mddev->queue, cnt, !from_schedule);
5018	kfree(cb);
5019}
5020
5021static void release_stripe_plug(struct mddev *mddev,
5022				struct stripe_head *sh)
5023{
5024	struct blk_plug_cb *blk_cb = blk_check_plugged(
5025		raid5_unplug, mddev,
5026		sizeof(struct raid5_plug_cb));
5027	struct raid5_plug_cb *cb;
5028
5029	if (!blk_cb) {
5030		raid5_release_stripe(sh);
5031		return;
5032	}
5033
5034	cb = container_of(blk_cb, struct raid5_plug_cb, cb);
5035
5036	if (cb->list.next == NULL) {
5037		int i;
5038		INIT_LIST_HEAD(&cb->list);
5039		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5040			INIT_LIST_HEAD(cb->temp_inactive_list + i);
5041	}
5042
5043	if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
5044		list_add_tail(&sh->lru, &cb->list);
5045	else
5046		raid5_release_stripe(sh);
5047}
5048
5049static void make_discard_request(struct mddev *mddev, struct bio *bi)
5050{
5051	struct r5conf *conf = mddev->private;
5052	sector_t logical_sector, last_sector;
5053	struct stripe_head *sh;
5054	int remaining;
5055	int stripe_sectors;
5056
5057	if (mddev->reshape_position != MaxSector)
5058		/* Skip discard while reshape is happening */
5059		return;
5060
5061	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5062	last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
5063
5064	bi->bi_next = NULL;
5065	bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
5066
5067	stripe_sectors = conf->chunk_sectors *
5068		(conf->raid_disks - conf->max_degraded);
5069	logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
5070					       stripe_sectors);
5071	sector_div(last_sector, stripe_sectors);
5072
5073	logical_sector *= conf->chunk_sectors;
5074	last_sector *= conf->chunk_sectors;
5075
5076	for (; logical_sector < last_sector;
5077	     logical_sector += STRIPE_SECTORS) {
5078		DEFINE_WAIT(w);
5079		int d;
5080	again:
5081		sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
5082		prepare_to_wait(&conf->wait_for_overlap, &w,
5083				TASK_UNINTERRUPTIBLE);
5084		set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5085		if (test_bit(STRIPE_SYNCING, &sh->state)) {
5086			raid5_release_stripe(sh);
5087			schedule();
5088			goto again;
5089		}
5090		clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5091		spin_lock_irq(&sh->stripe_lock);
5092		for (d = 0; d < conf->raid_disks; d++) {
5093			if (d == sh->pd_idx || d == sh->qd_idx)
5094				continue;
5095			if (sh->dev[d].towrite || sh->dev[d].toread) {
5096				set_bit(R5_Overlap, &sh->dev[d].flags);
5097				spin_unlock_irq(&sh->stripe_lock);
5098				raid5_release_stripe(sh);
5099				schedule();
5100				goto again;
5101			}
5102		}
5103		set_bit(STRIPE_DISCARD, &sh->state);
5104		finish_wait(&conf->wait_for_overlap, &w);
5105		sh->overwrite_disks = 0;
5106		for (d = 0; d < conf->raid_disks; d++) {
5107			if (d == sh->pd_idx || d == sh->qd_idx)
5108				continue;
5109			sh->dev[d].towrite = bi;
5110			set_bit(R5_OVERWRITE, &sh->dev[d].flags);
5111			raid5_inc_bi_active_stripes(bi);
5112			sh->overwrite_disks++;
5113		}
5114		spin_unlock_irq(&sh->stripe_lock);
5115		if (conf->mddev->bitmap) {
5116			for (d = 0;
5117			     d < conf->raid_disks - conf->max_degraded;
5118			     d++)
5119				bitmap_startwrite(mddev->bitmap,
5120						  sh->sector,
5121						  STRIPE_SECTORS,
5122						  0);
5123			sh->bm_seq = conf->seq_flush + 1;
5124			set_bit(STRIPE_BIT_DELAY, &sh->state);
5125		}
5126
5127		set_bit(STRIPE_HANDLE, &sh->state);
5128		clear_bit(STRIPE_DELAYED, &sh->state);
5129		if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5130			atomic_inc(&conf->preread_active_stripes);
5131		release_stripe_plug(mddev, sh);
5132	}
5133
5134	remaining = raid5_dec_bi_active_stripes(bi);
5135	if (remaining == 0) {
5136		md_write_end(mddev);
5137		bio_endio(bi);
5138	}
5139}
5140
5141static void raid5_make_request(struct mddev *mddev, struct bio * bi)
5142{
5143	struct r5conf *conf = mddev->private;
5144	int dd_idx;
5145	sector_t new_sector;
5146	sector_t logical_sector, last_sector;
5147	struct stripe_head *sh;
5148	const int rw = bio_data_dir(bi);
5149	int remaining;
5150	DEFINE_WAIT(w);
5151	bool do_prepare;
5152
5153	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
5154		int ret = r5l_handle_flush_request(conf->log, bi);
5155
5156		if (ret == 0)
5157			return;
5158		if (ret == -ENODEV) {
5159			md_flush_request(mddev, bi);
5160			return;
5161		}
5162		/* ret == -EAGAIN, fallback */
5163	}
5164
5165	md_write_start(mddev, bi);
5166
5167	/*
5168	 * If array is degraded, better not do chunk aligned read because
5169	 * later we might have to read it again in order to reconstruct
5170	 * data on failed drives.
5171	 */
5172	if (rw == READ && mddev->degraded == 0 &&
5173	    mddev->reshape_position == MaxSector) {
5174		bi = chunk_aligned_read(mddev, bi);
5175		if (!bi)
5176			return;
5177	}
5178
5179	if (unlikely(bi->bi_rw & REQ_DISCARD)) {
5180		make_discard_request(mddev, bi);
5181		return;
5182	}
5183
5184	logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
5185	last_sector = bio_end_sector(bi);
5186	bi->bi_next = NULL;
5187	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
5188
5189	prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
5190	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
 
 
5191		int previous;
5192		int seq;
5193
5194		do_prepare = false;
5195	retry:
5196		seq = read_seqcount_begin(&conf->gen_lock);
5197		previous = 0;
5198		if (do_prepare)
5199			prepare_to_wait(&conf->wait_for_overlap, &w,
5200				TASK_UNINTERRUPTIBLE);
5201		if (unlikely(conf->reshape_progress != MaxSector)) {
5202			/* spinlock is needed as reshape_progress may be
5203			 * 64bit on a 32bit platform, and so it might be
5204			 * possible to see a half-updated value
5205			 * Of course reshape_progress could change after
5206			 * the lock is dropped, so once we get a reference
5207			 * to the stripe that we think it is, we will have
5208			 * to check again.
5209			 */
5210			spin_lock_irq(&conf->device_lock);
5211			if (mddev->reshape_backwards
5212			    ? logical_sector < conf->reshape_progress
5213			    : logical_sector >= conf->reshape_progress) {
 
5214				previous = 1;
5215			} else {
5216				if (mddev->reshape_backwards
5217				    ? logical_sector < conf->reshape_safe
5218				    : logical_sector >= conf->reshape_safe) {
5219					spin_unlock_irq(&conf->device_lock);
5220					schedule();
5221					do_prepare = true;
5222					goto retry;
5223				}
5224			}
5225			spin_unlock_irq(&conf->device_lock);
5226		}
 
5227
5228		new_sector = raid5_compute_sector(conf, logical_sector,
5229						  previous,
5230						  &dd_idx, NULL);
5231		pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
5232			(unsigned long long)new_sector,
5233			(unsigned long long)logical_sector);
5234
5235		sh = raid5_get_active_stripe(conf, new_sector, previous,
5236				       (bi->bi_rw&RWA_MASK), 0);
5237		if (sh) {
5238			if (unlikely(previous)) {
5239				/* expansion might have moved on while waiting for a
5240				 * stripe, so we must do the range check again.
5241				 * Expansion could still move past after this
5242				 * test, but as we are holding a reference to
5243				 * 'sh', we know that if that happens,
5244				 *  STRIPE_EXPANDING will get set and the expansion
5245				 * won't proceed until we finish with the stripe.
5246				 */
5247				int must_retry = 0;
5248				spin_lock_irq(&conf->device_lock);
5249				if (mddev->reshape_backwards
5250				    ? logical_sector >= conf->reshape_progress
5251				    : logical_sector < conf->reshape_progress)
5252					/* mismatch, need to try again */
5253					must_retry = 1;
5254				spin_unlock_irq(&conf->device_lock);
5255				if (must_retry) {
5256					raid5_release_stripe(sh);
5257					schedule();
5258					do_prepare = true;
5259					goto retry;
5260				}
5261			}
5262			if (read_seqcount_retry(&conf->gen_lock, seq)) {
5263				/* Might have got the wrong stripe_head
5264				 * by accident
5265				 */
5266				raid5_release_stripe(sh);
5267				goto retry;
5268			}
5269
5270			if (rw == WRITE &&
5271			    logical_sector >= mddev->suspend_lo &&
5272			    logical_sector < mddev->suspend_hi) {
5273				raid5_release_stripe(sh);
5274				/* As the suspend_* range is controlled by
5275				 * userspace, we want an interruptible
5276				 * wait.
5277				 */
5278				flush_signals(current);
5279				prepare_to_wait(&conf->wait_for_overlap,
5280						&w, TASK_INTERRUPTIBLE);
5281				if (logical_sector >= mddev->suspend_lo &&
5282				    logical_sector < mddev->suspend_hi) {
5283					schedule();
5284					do_prepare = true;
5285				}
5286				goto retry;
5287			}
5288
5289			if (test_bit(STRIPE_EXPANDING, &sh->state) ||
5290			    !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
5291				/* Stripe is busy expanding or
5292				 * add failed due to overlap.  Flush everything
5293				 * and wait a while
5294				 */
5295				md_wakeup_thread(mddev->thread);
5296				raid5_release_stripe(sh);
5297				schedule();
5298				do_prepare = true;
5299				goto retry;
5300			}
 
5301			set_bit(STRIPE_HANDLE, &sh->state);
5302			clear_bit(STRIPE_DELAYED, &sh->state);
5303			if ((!sh->batch_head || sh == sh->batch_head) &&
5304			    (bi->bi_rw & REQ_SYNC) &&
5305			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5306				atomic_inc(&conf->preread_active_stripes);
5307			release_stripe_plug(mddev, sh);
5308		} else {
5309			/* cannot get stripe for read-ahead, just give-up */
5310			bi->bi_error = -EIO;
 
5311			break;
5312		}
 
5313	}
5314	finish_wait(&conf->wait_for_overlap, &w);
 
5315
5316	remaining = raid5_dec_bi_active_stripes(bi);
 
 
5317	if (remaining == 0) {
5318
5319		if ( rw == WRITE )
5320			md_write_end(mddev);
5321
5322		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
5323					 bi, 0);
5324		bio_endio(bi);
5325	}
 
 
5326}
5327
5328static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5329
5330static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
5331{
5332	/* reshaping is quite different to recovery/resync so it is
5333	 * handled quite separately ... here.
5334	 *
5335	 * On each call to sync_request, we gather one chunk worth of
5336	 * destination stripes and flag them as expanding.
5337	 * Then we find all the source stripes and request reads.
5338	 * As the reads complete, handle_stripe will copy the data
5339	 * into the destination stripe and release that stripe.
5340	 */
5341	struct r5conf *conf = mddev->private;
5342	struct stripe_head *sh;
5343	sector_t first_sector, last_sector;
5344	int raid_disks = conf->previous_raid_disks;
5345	int data_disks = raid_disks - conf->max_degraded;
5346	int new_data_disks = conf->raid_disks - conf->max_degraded;
5347	int i;
5348	int dd_idx;
5349	sector_t writepos, readpos, safepos;
5350	sector_t stripe_addr;
5351	int reshape_sectors;
5352	struct list_head stripes;
5353	sector_t retn;
5354
5355	if (sector_nr == 0) {
5356		/* If restarting in the middle, skip the initial sectors */
5357		if (mddev->reshape_backwards &&
5358		    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
5359			sector_nr = raid5_size(mddev, 0, 0)
5360				- conf->reshape_progress;
5361		} else if (mddev->reshape_backwards &&
5362			   conf->reshape_progress == MaxSector) {
5363			/* shouldn't happen, but just in case, finish up.*/
5364			sector_nr = MaxSector;
5365		} else if (!mddev->reshape_backwards &&
5366			   conf->reshape_progress > 0)
5367			sector_nr = conf->reshape_progress;
5368		sector_div(sector_nr, new_data_disks);
5369		if (sector_nr) {
5370			mddev->curr_resync_completed = sector_nr;
5371			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5372			*skipped = 1;
5373			retn = sector_nr;
5374			goto finish;
5375		}
5376	}
5377
5378	/* We need to process a full chunk at a time.
5379	 * If old and new chunk sizes differ, we need to process the
5380	 * largest of these
5381	 */
 
 
 
 
5382
5383	reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
5384
5385	/* We update the metadata at least every 10 seconds, or when
5386	 * the data about to be copied would over-write the source of
5387	 * the data at the front of the range.  i.e. one new_stripe
5388	 * along from reshape_progress new_maps to after where
5389	 * reshape_safe old_maps to
5390	 */
5391	writepos = conf->reshape_progress;
5392	sector_div(writepos, new_data_disks);
5393	readpos = conf->reshape_progress;
5394	sector_div(readpos, data_disks);
5395	safepos = conf->reshape_safe;
5396	sector_div(safepos, data_disks);
5397	if (mddev->reshape_backwards) {
5398		BUG_ON(writepos < reshape_sectors);
5399		writepos -= reshape_sectors;
5400		readpos += reshape_sectors;
5401		safepos += reshape_sectors;
5402	} else {
5403		writepos += reshape_sectors;
5404		/* readpos and safepos are worst-case calculations.
5405		 * A negative number is overly pessimistic, and causes
5406		 * obvious problems for unsigned storage.  So clip to 0.
5407		 */
5408		readpos -= min_t(sector_t, reshape_sectors, readpos);
5409		safepos -= min_t(sector_t, reshape_sectors, safepos);
5410	}
5411
5412	/* Having calculated the 'writepos' possibly use it
5413	 * to set 'stripe_addr' which is where we will write to.
5414	 */
5415	if (mddev->reshape_backwards) {
5416		BUG_ON(conf->reshape_progress == 0);
5417		stripe_addr = writepos;
5418		BUG_ON((mddev->dev_sectors &
5419			~((sector_t)reshape_sectors - 1))
5420		       - reshape_sectors - stripe_addr
5421		       != sector_nr);
5422	} else {
5423		BUG_ON(writepos != sector_nr + reshape_sectors);
5424		stripe_addr = sector_nr;
5425	}
5426
5427	/* 'writepos' is the most advanced device address we might write.
5428	 * 'readpos' is the least advanced device address we might read.
5429	 * 'safepos' is the least address recorded in the metadata as having
5430	 *     been reshaped.
5431	 * If there is a min_offset_diff, these are adjusted either by
5432	 * increasing the safepos/readpos if diff is negative, or
5433	 * increasing writepos if diff is positive.
5434	 * If 'readpos' is then behind 'writepos', there is no way that we can
5435	 * ensure safety in the face of a crash - that must be done by userspace
5436	 * making a backup of the data.  So in that case there is no particular
5437	 * rush to update metadata.
5438	 * Otherwise if 'safepos' is behind 'writepos', then we really need to
5439	 * update the metadata to advance 'safepos' to match 'readpos' so that
5440	 * we can be safe in the event of a crash.
5441	 * So we insist on updating metadata if safepos is behind writepos and
5442	 * readpos is beyond writepos.
5443	 * In any case, update the metadata every 10 seconds.
5444	 * Maybe that number should be configurable, but I'm not sure it is
5445	 * worth it.... maybe it could be a multiple of safemode_delay???
5446	 */
5447	if (conf->min_offset_diff < 0) {
5448		safepos += -conf->min_offset_diff;
5449		readpos += -conf->min_offset_diff;
5450	} else
5451		writepos += conf->min_offset_diff;
5452
5453	if ((mddev->reshape_backwards
5454	     ? (safepos > writepos && readpos < writepos)
5455	     : (safepos < writepos && readpos > writepos)) ||
5456	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
5457		/* Cannot proceed until we've updated the superblock... */
5458		wait_event(conf->wait_for_overlap,
5459			   atomic_read(&conf->reshape_stripes)==0
5460			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5461		if (atomic_read(&conf->reshape_stripes) != 0)
5462			return 0;
5463		mddev->reshape_position = conf->reshape_progress;
5464		mddev->curr_resync_completed = sector_nr;
5465		conf->reshape_checkpoint = jiffies;
5466		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5467		md_wakeup_thread(mddev->thread);
5468		wait_event(mddev->sb_wait, mddev->flags == 0 ||
5469			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5470		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5471			return 0;
5472		spin_lock_irq(&conf->device_lock);
5473		conf->reshape_safe = mddev->reshape_position;
5474		spin_unlock_irq(&conf->device_lock);
5475		wake_up(&conf->wait_for_overlap);
5476		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5477	}
5478
 
 
 
 
 
 
 
 
 
 
 
5479	INIT_LIST_HEAD(&stripes);
5480	for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
5481		int j;
5482		int skipped_disk = 0;
5483		sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
5484		set_bit(STRIPE_EXPANDING, &sh->state);
5485		atomic_inc(&conf->reshape_stripes);
5486		/* If any of this stripe is beyond the end of the old
5487		 * array, then we need to zero those blocks
5488		 */
5489		for (j=sh->disks; j--;) {
5490			sector_t s;
5491			if (j == sh->pd_idx)
5492				continue;
5493			if (conf->level == 6 &&
5494			    j == sh->qd_idx)
5495				continue;
5496			s = raid5_compute_blocknr(sh, j, 0);
5497			if (s < raid5_size(mddev, 0, 0)) {
5498				skipped_disk = 1;
5499				continue;
5500			}
5501			memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
5502			set_bit(R5_Expanded, &sh->dev[j].flags);
5503			set_bit(R5_UPTODATE, &sh->dev[j].flags);
5504		}
5505		if (!skipped_disk) {
5506			set_bit(STRIPE_EXPAND_READY, &sh->state);
5507			set_bit(STRIPE_HANDLE, &sh->state);
5508		}
5509		list_add(&sh->lru, &stripes);
5510	}
5511	spin_lock_irq(&conf->device_lock);
5512	if (mddev->reshape_backwards)
5513		conf->reshape_progress -= reshape_sectors * new_data_disks;
5514	else
5515		conf->reshape_progress += reshape_sectors * new_data_disks;
5516	spin_unlock_irq(&conf->device_lock);
5517	/* Ok, those stripe are ready. We can start scheduling
5518	 * reads on the source stripes.
5519	 * The source stripes are determined by mapping the first and last
5520	 * block on the destination stripes.
5521	 */
5522	first_sector =
5523		raid5_compute_sector(conf, stripe_addr*(new_data_disks),
5524				     1, &dd_idx, NULL);
5525	last_sector =
5526		raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
5527					    * new_data_disks - 1),
5528				     1, &dd_idx, NULL);
5529	if (last_sector >= mddev->dev_sectors)
5530		last_sector = mddev->dev_sectors - 1;
5531	while (first_sector <= last_sector) {
5532		sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
5533		set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
5534		set_bit(STRIPE_HANDLE, &sh->state);
5535		raid5_release_stripe(sh);
5536		first_sector += STRIPE_SECTORS;
5537	}
5538	/* Now that the sources are clearly marked, we can release
5539	 * the destination stripes
5540	 */
5541	while (!list_empty(&stripes)) {
5542		sh = list_entry(stripes.next, struct stripe_head, lru);
5543		list_del_init(&sh->lru);
5544		raid5_release_stripe(sh);
5545	}
5546	/* If this takes us to the resync_max point where we have to pause,
5547	 * then we need to write out the superblock.
5548	 */
5549	sector_nr += reshape_sectors;
5550	retn = reshape_sectors;
5551finish:
5552	if (mddev->curr_resync_completed > mddev->resync_max ||
5553	    (sector_nr - mddev->curr_resync_completed) * 2
5554	    >= mddev->resync_max - mddev->curr_resync_completed) {
5555		/* Cannot proceed until we've updated the superblock... */
5556		wait_event(conf->wait_for_overlap,
5557			   atomic_read(&conf->reshape_stripes) == 0
5558			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5559		if (atomic_read(&conf->reshape_stripes) != 0)
5560			goto ret;
5561		mddev->reshape_position = conf->reshape_progress;
5562		mddev->curr_resync_completed = sector_nr;
5563		conf->reshape_checkpoint = jiffies;
5564		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5565		md_wakeup_thread(mddev->thread);
5566		wait_event(mddev->sb_wait,
5567			   !test_bit(MD_CHANGE_DEVS, &mddev->flags)
5568			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
5569		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5570			goto ret;
5571		spin_lock_irq(&conf->device_lock);
5572		conf->reshape_safe = mddev->reshape_position;
5573		spin_unlock_irq(&conf->device_lock);
5574		wake_up(&conf->wait_for_overlap);
5575		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5576	}
5577ret:
5578	return retn;
5579}
5580
5581static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
5582					  int *skipped)
5583{
5584	struct r5conf *conf = mddev->private;
5585	struct stripe_head *sh;
5586	sector_t max_sector = mddev->dev_sectors;
5587	sector_t sync_blocks;
5588	int still_degraded = 0;
5589	int i;
5590
5591	if (sector_nr >= max_sector) {
5592		/* just being told to finish up .. nothing much to do */
5593
5594		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
5595			end_reshape(conf);
5596			return 0;
5597		}
5598
5599		if (mddev->curr_resync < max_sector) /* aborted */
5600			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
5601					&sync_blocks, 1);
5602		else /* completed sync */
5603			conf->fullsync = 0;
5604		bitmap_close_sync(mddev->bitmap);
5605
5606		return 0;
5607	}
5608
5609	/* Allow raid5_quiesce to complete */
5610	wait_event(conf->wait_for_overlap, conf->quiesce != 2);
5611
5612	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5613		return reshape_request(mddev, sector_nr, skipped);
5614
5615	/* No need to check resync_max as we never do more than one
5616	 * stripe, and as resync_max will always be on a chunk boundary,
5617	 * if the check in md_do_sync didn't fire, there is no chance
5618	 * of overstepping resync_max here
5619	 */
5620
5621	/* if there is too many failed drives and we are trying
5622	 * to resync, then assert that we are finished, because there is
5623	 * nothing we can do.
5624	 */
5625	if (mddev->degraded >= conf->max_degraded &&
5626	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5627		sector_t rv = mddev->dev_sectors - sector_nr;
5628		*skipped = 1;
5629		return rv;
5630	}
5631	if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
5632	    !conf->fullsync &&
5633	    !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
5634	    sync_blocks >= STRIPE_SECTORS) {
5635		/* we can skip this block, and probably more */
5636		sync_blocks /= STRIPE_SECTORS;
5637		*skipped = 1;
5638		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
5639	}
5640
5641	bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
5642
5643	sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
 
 
5644	if (sh == NULL) {
5645		sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
5646		/* make sure we don't swamp the stripe cache if someone else
5647		 * is trying to get access
5648		 */
5649		schedule_timeout_uninterruptible(1);
5650	}
5651	/* Need to check if array will still be degraded after recovery/resync
5652	 * Note in case of > 1 drive failures it's possible we're rebuilding
5653	 * one drive while leaving another faulty drive in array.
5654	 */
5655	rcu_read_lock();
5656	for (i = 0; i < conf->raid_disks; i++) {
5657		struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
5658
5659		if (rdev == NULL || test_bit(Faulty, &rdev->flags))
5660			still_degraded = 1;
5661	}
5662	rcu_read_unlock();
5663
5664	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
5665
5666	set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
5667	set_bit(STRIPE_HANDLE, &sh->state);
5668
5669	raid5_release_stripe(sh);
 
5670
5671	return STRIPE_SECTORS;
5672}
5673
5674static int  retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5675{
5676	/* We may not be able to submit a whole bio at once as there
5677	 * may not be enough stripe_heads available.
5678	 * We cannot pre-allocate enough stripe_heads as we may need
5679	 * more than exist in the cache (if we allow ever large chunks).
5680	 * So we do one stripe head at a time and record in
5681	 * ->bi_hw_segments how many have been done.
5682	 *
5683	 * We *know* that this entire raid_bio is in one chunk, so
5684	 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
5685	 */
5686	struct stripe_head *sh;
5687	int dd_idx;
5688	sector_t sector, logical_sector, last_sector;
5689	int scnt = 0;
5690	int remaining;
5691	int handled = 0;
5692
5693	logical_sector = raid_bio->bi_iter.bi_sector &
5694		~((sector_t)STRIPE_SECTORS-1);
5695	sector = raid5_compute_sector(conf, logical_sector,
5696				      0, &dd_idx, NULL);
5697	last_sector = bio_end_sector(raid_bio);
5698
5699	for (; logical_sector < last_sector;
5700	     logical_sector += STRIPE_SECTORS,
5701		     sector += STRIPE_SECTORS,
5702		     scnt++) {
5703
5704		if (scnt < raid5_bi_processed_stripes(raid_bio))
5705			/* already done this stripe */
5706			continue;
5707
5708		sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
5709
5710		if (!sh) {
5711			/* failed to get a stripe - must wait */
5712			raid5_set_bi_processed_stripes(raid_bio, scnt);
5713			conf->retry_read_aligned = raid_bio;
5714			return handled;
5715		}
5716
5717		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
5718			raid5_release_stripe(sh);
5719			raid5_set_bi_processed_stripes(raid_bio, scnt);
 
5720			conf->retry_read_aligned = raid_bio;
5721			return handled;
5722		}
5723
5724		set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
5725		handle_stripe(sh);
5726		raid5_release_stripe(sh);
5727		handled++;
5728	}
5729	remaining = raid5_dec_bi_active_stripes(raid_bio);
5730	if (remaining == 0) {
5731		trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
5732					 raid_bio, 0);
5733		bio_endio(raid_bio);
5734	}
5735	if (atomic_dec_and_test(&conf->active_aligned_reads))
5736		wake_up(&conf->wait_for_quiescent);
5737	return handled;
5738}
5739
5740static int handle_active_stripes(struct r5conf *conf, int group,
5741				 struct r5worker *worker,
5742				 struct list_head *temp_inactive_list)
5743{
5744	struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
5745	int i, batch_size = 0, hash;
5746	bool release_inactive = false;
5747
5748	while (batch_size < MAX_STRIPE_BATCH &&
5749			(sh = __get_priority_stripe(conf, group)) != NULL)
5750		batch[batch_size++] = sh;
5751
5752	if (batch_size == 0) {
5753		for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5754			if (!list_empty(temp_inactive_list + i))
5755				break;
5756		if (i == NR_STRIPE_HASH_LOCKS) {
5757			spin_unlock_irq(&conf->device_lock);
5758			r5l_flush_stripe_to_raid(conf->log);
5759			spin_lock_irq(&conf->device_lock);
5760			return batch_size;
5761		}
5762		release_inactive = true;
5763	}
5764	spin_unlock_irq(&conf->device_lock);
5765
5766	release_inactive_stripe_list(conf, temp_inactive_list,
5767				     NR_STRIPE_HASH_LOCKS);
5768
5769	r5l_flush_stripe_to_raid(conf->log);
5770	if (release_inactive) {
5771		spin_lock_irq(&conf->device_lock);
5772		return 0;
5773	}
5774
5775	for (i = 0; i < batch_size; i++)
5776		handle_stripe(batch[i]);
5777	r5l_write_stripe_run(conf->log);
5778
5779	cond_resched();
5780
5781	spin_lock_irq(&conf->device_lock);
5782	for (i = 0; i < batch_size; i++) {
5783		hash = batch[i]->hash_lock_index;
5784		__release_stripe(conf, batch[i], &temp_inactive_list[hash]);
5785	}
5786	return batch_size;
5787}
5788
5789static void raid5_do_work(struct work_struct *work)
5790{
5791	struct r5worker *worker = container_of(work, struct r5worker, work);
5792	struct r5worker_group *group = worker->group;
5793	struct r5conf *conf = group->conf;
5794	int group_id = group - conf->worker_groups;
5795	int handled;
5796	struct blk_plug plug;
5797
5798	pr_debug("+++ raid5worker active\n");
5799
5800	blk_start_plug(&plug);
5801	handled = 0;
5802	spin_lock_irq(&conf->device_lock);
5803	while (1) {
5804		int batch_size, released;
5805
5806		released = release_stripe_list(conf, worker->temp_inactive_list);
5807
5808		batch_size = handle_active_stripes(conf, group_id, worker,
5809						   worker->temp_inactive_list);
5810		worker->working = false;
5811		if (!batch_size && !released)
5812			break;
5813		handled += batch_size;
5814	}
5815	pr_debug("%d stripes handled\n", handled);
5816
5817	spin_unlock_irq(&conf->device_lock);
5818	blk_finish_plug(&plug);
5819
5820	pr_debug("--- raid5worker inactive\n");
5821}
5822
5823/*
5824 * This is our raid5 kernel thread.
5825 *
5826 * We scan the hash table for stripes which can be handled now.
5827 * During the scan, completed stripes are saved for us by the interrupt
5828 * handler, so that they will not have to wait for our next wakeup.
5829 */
5830static void raid5d(struct md_thread *thread)
5831{
5832	struct mddev *mddev = thread->mddev;
5833	struct r5conf *conf = mddev->private;
5834	int handled;
5835	struct blk_plug plug;
5836
5837	pr_debug("+++ raid5d active\n");
5838
5839	md_check_recovery(mddev);
5840
5841	if (!bio_list_empty(&conf->return_bi) &&
5842	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
5843		struct bio_list tmp = BIO_EMPTY_LIST;
5844		spin_lock_irq(&conf->device_lock);
5845		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
5846			bio_list_merge(&tmp, &conf->return_bi);
5847			bio_list_init(&conf->return_bi);
5848		}
5849		spin_unlock_irq(&conf->device_lock);
5850		return_io(&tmp);
5851	}
5852
5853	blk_start_plug(&plug);
5854	handled = 0;
5855	spin_lock_irq(&conf->device_lock);
5856	while (1) {
5857		struct bio *bio;
5858		int batch_size, released;
5859
5860		released = release_stripe_list(conf, conf->temp_inactive_list);
5861		if (released)
5862			clear_bit(R5_DID_ALLOC, &conf->cache_state);
5863
5864		if (
5865		    !list_empty(&conf->bitmap_list)) {
5866			/* Now is a good time to flush some bitmap updates */
5867			conf->seq_flush++;
5868			spin_unlock_irq(&conf->device_lock);
5869			bitmap_unplug(mddev->bitmap);
5870			spin_lock_irq(&conf->device_lock);
5871			conf->seq_write = conf->seq_flush;
5872			activate_bit_delay(conf, conf->temp_inactive_list);
5873		}
5874		raid5_activate_delayed(conf);
 
5875
5876		while ((bio = remove_bio_from_retry(conf))) {
5877			int ok;
5878			spin_unlock_irq(&conf->device_lock);
5879			ok = retry_aligned_read(conf, bio);
5880			spin_lock_irq(&conf->device_lock);
5881			if (!ok)
5882				break;
5883			handled++;
5884		}
5885
5886		batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
5887						   conf->temp_inactive_list);
5888		if (!batch_size && !released)
5889			break;
5890		handled += batch_size;
 
 
 
 
 
5891
5892		if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
5893			spin_unlock_irq(&conf->device_lock);
5894			md_check_recovery(mddev);
5895			spin_lock_irq(&conf->device_lock);
5896		}
5897	}
5898	pr_debug("%d stripes handled\n", handled);
5899
5900	spin_unlock_irq(&conf->device_lock);
5901	if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
5902	    mutex_trylock(&conf->cache_size_mutex)) {
5903		grow_one_stripe(conf, __GFP_NOWARN);
5904		/* Set flag even if allocation failed.  This helps
5905		 * slow down allocation requests when mem is short
5906		 */
5907		set_bit(R5_DID_ALLOC, &conf->cache_state);
5908		mutex_unlock(&conf->cache_size_mutex);
5909	}
5910
5911	r5l_flush_stripe_to_raid(conf->log);
5912
5913	async_tx_issue_pending_all();
5914	blk_finish_plug(&plug);
5915
5916	pr_debug("--- raid5d inactive\n");
5917}
5918
5919static ssize_t
5920raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
5921{
5922	struct r5conf *conf;
5923	int ret = 0;
5924	spin_lock(&mddev->lock);
5925	conf = mddev->private;
5926	if (conf)
5927		ret = sprintf(page, "%d\n", conf->min_nr_stripes);
5928	spin_unlock(&mddev->lock);
5929	return ret;
5930}
5931
5932int
5933raid5_set_cache_size(struct mddev *mddev, int size)
5934{
5935	struct r5conf *conf = mddev->private;
5936	int err;
5937
5938	if (size <= 16 || size > 32768)
5939		return -EINVAL;
5940
5941	conf->min_nr_stripes = size;
5942	mutex_lock(&conf->cache_size_mutex);
5943	while (size < conf->max_nr_stripes &&
5944	       drop_one_stripe(conf))
5945		;
5946	mutex_unlock(&conf->cache_size_mutex);
5947
5948
5949	err = md_allow_write(mddev);
5950	if (err)
5951		return err;
5952
5953	mutex_lock(&conf->cache_size_mutex);
5954	while (size > conf->max_nr_stripes)
5955		if (!grow_one_stripe(conf, GFP_KERNEL))
5956			break;
5957	mutex_unlock(&conf->cache_size_mutex);
5958
5959	return 0;
5960}
5961EXPORT_SYMBOL(raid5_set_cache_size);
5962
5963static ssize_t
5964raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
5965{
5966	struct r5conf *conf;
5967	unsigned long new;
5968	int err;
5969
5970	if (len >= PAGE_SIZE)
5971		return -EINVAL;
5972	if (kstrtoul(page, 10, &new))
 
 
 
5973		return -EINVAL;
5974	err = mddev_lock(mddev);
5975	if (err)
5976		return err;
5977	conf = mddev->private;
5978	if (!conf)
5979		err = -ENODEV;
5980	else
5981		err = raid5_set_cache_size(mddev, new);
5982	mddev_unlock(mddev);
5983
5984	return err ?: len;
5985}
5986
5987static struct md_sysfs_entry
5988raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
5989				raid5_show_stripe_cache_size,
5990				raid5_store_stripe_cache_size);
5991
5992static ssize_t
5993raid5_show_rmw_level(struct mddev  *mddev, char *page)
5994{
5995	struct r5conf *conf = mddev->private;
5996	if (conf)
5997		return sprintf(page, "%d\n", conf->rmw_level);
5998	else
5999		return 0;
6000}
6001
6002static ssize_t
6003raid5_store_rmw_level(struct mddev  *mddev, const char *page, size_t len)
6004{
6005	struct r5conf *conf = mddev->private;
6006	unsigned long new;
6007
6008	if (!conf)
6009		return -ENODEV;
6010
6011	if (len >= PAGE_SIZE)
6012		return -EINVAL;
 
 
6013
6014	if (kstrtoul(page, 10, &new))
6015		return -EINVAL;
6016
6017	if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
6018		return -EINVAL;
6019
6020	if (new != PARITY_DISABLE_RMW &&
6021	    new != PARITY_ENABLE_RMW &&
6022	    new != PARITY_PREFER_RMW)
6023		return -EINVAL;
6024
6025	conf->rmw_level = new;
6026	return len;
6027}
6028
6029static struct md_sysfs_entry
6030raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
6031			 raid5_show_rmw_level,
6032			 raid5_store_rmw_level);
6033
6034
6035static ssize_t
6036raid5_show_preread_threshold(struct mddev *mddev, char *page)
6037{
6038	struct r5conf *conf;
6039	int ret = 0;
6040	spin_lock(&mddev->lock);
6041	conf = mddev->private;
6042	if (conf)
6043		ret = sprintf(page, "%d\n", conf->bypass_threshold);
6044	spin_unlock(&mddev->lock);
6045	return ret;
6046}
6047
6048static ssize_t
6049raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
6050{
6051	struct r5conf *conf;
6052	unsigned long new;
6053	int err;
6054
6055	if (len >= PAGE_SIZE)
6056		return -EINVAL;
6057	if (kstrtoul(page, 10, &new))
6058		return -EINVAL;
6059
6060	err = mddev_lock(mddev);
6061	if (err)
6062		return err;
6063	conf = mddev->private;
6064	if (!conf)
6065		err = -ENODEV;
6066	else if (new > conf->min_nr_stripes)
6067		err = -EINVAL;
6068	else
6069		conf->bypass_threshold = new;
6070	mddev_unlock(mddev);
6071	return err ?: len;
6072}
6073
6074static struct md_sysfs_entry
6075raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
6076					S_IRUGO | S_IWUSR,
6077					raid5_show_preread_threshold,
6078					raid5_store_preread_threshold);
6079
6080static ssize_t
6081raid5_show_skip_copy(struct mddev *mddev, char *page)
6082{
6083	struct r5conf *conf;
6084	int ret = 0;
6085	spin_lock(&mddev->lock);
6086	conf = mddev->private;
6087	if (conf)
6088		ret = sprintf(page, "%d\n", conf->skip_copy);
6089	spin_unlock(&mddev->lock);
6090	return ret;
6091}
6092
6093static ssize_t
6094raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6095{
6096	struct r5conf *conf;
6097	unsigned long new;
6098	int err;
6099
6100	if (len >= PAGE_SIZE)
6101		return -EINVAL;
6102	if (kstrtoul(page, 10, &new))
6103		return -EINVAL;
6104	new = !!new;
6105
6106	err = mddev_lock(mddev);
6107	if (err)
6108		return err;
6109	conf = mddev->private;
6110	if (!conf)
6111		err = -ENODEV;
6112	else if (new != conf->skip_copy) {
6113		mddev_suspend(mddev);
6114		conf->skip_copy = new;
6115		if (new)
6116			mddev->queue->backing_dev_info.capabilities |=
6117				BDI_CAP_STABLE_WRITES;
6118		else
6119			mddev->queue->backing_dev_info.capabilities &=
6120				~BDI_CAP_STABLE_WRITES;
6121		mddev_resume(mddev);
6122	}
6123	mddev_unlock(mddev);
6124	return err ?: len;
6125}
6126
6127static struct md_sysfs_entry
6128raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
6129					raid5_show_skip_copy,
6130					raid5_store_skip_copy);
6131
6132static ssize_t
6133stripe_cache_active_show(struct mddev *mddev, char *page)
6134{
6135	struct r5conf *conf = mddev->private;
6136	if (conf)
6137		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
6138	else
6139		return 0;
6140}
6141
6142static struct md_sysfs_entry
6143raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
6144
6145static ssize_t
6146raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
6147{
6148	struct r5conf *conf;
6149	int ret = 0;
6150	spin_lock(&mddev->lock);
6151	conf = mddev->private;
6152	if (conf)
6153		ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
6154	spin_unlock(&mddev->lock);
6155	return ret;
6156}
6157
6158static int alloc_thread_groups(struct r5conf *conf, int cnt,
6159			       int *group_cnt,
6160			       int *worker_cnt_per_group,
6161			       struct r5worker_group **worker_groups);
6162static ssize_t
6163raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6164{
6165	struct r5conf *conf;
6166	unsigned long new;
6167	int err;
6168	struct r5worker_group *new_groups, *old_groups;
6169	int group_cnt, worker_cnt_per_group;
6170
6171	if (len >= PAGE_SIZE)
6172		return -EINVAL;
6173	if (kstrtoul(page, 10, &new))
6174		return -EINVAL;
6175
6176	err = mddev_lock(mddev);
6177	if (err)
6178		return err;
6179	conf = mddev->private;
6180	if (!conf)
6181		err = -ENODEV;
6182	else if (new != conf->worker_cnt_per_group) {
6183		mddev_suspend(mddev);
6184
6185		old_groups = conf->worker_groups;
6186		if (old_groups)
6187			flush_workqueue(raid5_wq);
6188
6189		err = alloc_thread_groups(conf, new,
6190					  &group_cnt, &worker_cnt_per_group,
6191					  &new_groups);
6192		if (!err) {
6193			spin_lock_irq(&conf->device_lock);
6194			conf->group_cnt = group_cnt;
6195			conf->worker_cnt_per_group = worker_cnt_per_group;
6196			conf->worker_groups = new_groups;
6197			spin_unlock_irq(&conf->device_lock);
6198
6199			if (old_groups)
6200				kfree(old_groups[0].workers);
6201			kfree(old_groups);
6202		}
6203		mddev_resume(mddev);
6204	}
6205	mddev_unlock(mddev);
6206
6207	return err ?: len;
6208}
6209
6210static struct md_sysfs_entry
6211raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
6212				raid5_show_group_thread_cnt,
6213				raid5_store_group_thread_cnt);
6214
6215static struct attribute *raid5_attrs[] =  {
6216	&raid5_stripecache_size.attr,
6217	&raid5_stripecache_active.attr,
6218	&raid5_preread_bypass_threshold.attr,
6219	&raid5_group_thread_cnt.attr,
6220	&raid5_skip_copy.attr,
6221	&raid5_rmw_level.attr,
6222	NULL,
6223};
6224static struct attribute_group raid5_attrs_group = {
6225	.name = NULL,
6226	.attrs = raid5_attrs,
6227};
6228
6229static int alloc_thread_groups(struct r5conf *conf, int cnt,
6230			       int *group_cnt,
6231			       int *worker_cnt_per_group,
6232			       struct r5worker_group **worker_groups)
6233{
6234	int i, j, k;
6235	ssize_t size;
6236	struct r5worker *workers;
6237
6238	*worker_cnt_per_group = cnt;
6239	if (cnt == 0) {
6240		*group_cnt = 0;
6241		*worker_groups = NULL;
6242		return 0;
6243	}
6244	*group_cnt = num_possible_nodes();
6245	size = sizeof(struct r5worker) * cnt;
6246	workers = kzalloc(size * *group_cnt, GFP_NOIO);
6247	*worker_groups = kzalloc(sizeof(struct r5worker_group) *
6248				*group_cnt, GFP_NOIO);
6249	if (!*worker_groups || !workers) {
6250		kfree(workers);
6251		kfree(*worker_groups);
6252		return -ENOMEM;
6253	}
6254
6255	for (i = 0; i < *group_cnt; i++) {
6256		struct r5worker_group *group;
6257
6258		group = &(*worker_groups)[i];
6259		INIT_LIST_HEAD(&group->handle_list);
6260		group->conf = conf;
6261		group->workers = workers + i * cnt;
6262
6263		for (j = 0; j < cnt; j++) {
6264			struct r5worker *worker = group->workers + j;
6265			worker->group = group;
6266			INIT_WORK(&worker->work, raid5_do_work);
6267
6268			for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
6269				INIT_LIST_HEAD(worker->temp_inactive_list + k);
6270		}
6271	}
6272
6273	return 0;
6274}
6275
6276static void free_thread_groups(struct r5conf *conf)
6277{
6278	if (conf->worker_groups)
6279		kfree(conf->worker_groups[0].workers);
6280	kfree(conf->worker_groups);
6281	conf->worker_groups = NULL;
6282}
6283
6284static sector_t
6285raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
6286{
6287	struct r5conf *conf = mddev->private;
6288
6289	if (!sectors)
6290		sectors = mddev->dev_sectors;
6291	if (!raid_disks)
6292		/* size is defined by the smallest of previous and new size */
6293		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
6294
6295	sectors &= ~((sector_t)conf->chunk_sectors - 1);
6296	sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
6297	return sectors * (raid_disks - conf->max_degraded);
6298}
6299
6300static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
6301{
6302	safe_put_page(percpu->spare_page);
6303	if (percpu->scribble)
6304		flex_array_free(percpu->scribble);
6305	percpu->spare_page = NULL;
6306	percpu->scribble = NULL;
6307}
6308
6309static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
6310{
6311	if (conf->level == 6 && !percpu->spare_page)
6312		percpu->spare_page = alloc_page(GFP_KERNEL);
6313	if (!percpu->scribble)
6314		percpu->scribble = scribble_alloc(max(conf->raid_disks,
6315						      conf->previous_raid_disks),
6316						  max(conf->chunk_sectors,
6317						      conf->prev_chunk_sectors)
6318						   / STRIPE_SECTORS,
6319						  GFP_KERNEL);
6320
6321	if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
6322		free_scratch_buffer(conf, percpu);
6323		return -ENOMEM;
6324	}
6325
6326	return 0;
6327}
6328
6329static void raid5_free_percpu(struct r5conf *conf)
6330{
 
6331	unsigned long cpu;
6332
6333	if (!conf->percpu)
6334		return;
6335
 
 
 
 
 
 
6336#ifdef CONFIG_HOTPLUG_CPU
6337	unregister_cpu_notifier(&conf->cpu_notify);
6338#endif
6339
6340	get_online_cpus();
6341	for_each_possible_cpu(cpu)
6342		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6343	put_online_cpus();
6344
6345	free_percpu(conf->percpu);
6346}
6347
6348static void free_conf(struct r5conf *conf)
6349{
6350	if (conf->log)
6351		r5l_exit_log(conf->log);
6352	if (conf->shrinker.seeks)
6353		unregister_shrinker(&conf->shrinker);
6354
6355	free_thread_groups(conf);
6356	shrink_stripes(conf);
6357	raid5_free_percpu(conf);
6358	kfree(conf->disks);
6359	kfree(conf->stripe_hashtbl);
6360	kfree(conf);
6361}
6362
6363#ifdef CONFIG_HOTPLUG_CPU
6364static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
6365			      void *hcpu)
6366{
6367	struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
6368	long cpu = (long)hcpu;
6369	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
6370
6371	switch (action) {
6372	case CPU_UP_PREPARE:
6373	case CPU_UP_PREPARE_FROZEN:
6374		if (alloc_scratch_buffer(conf, percpu)) {
 
 
 
 
 
 
 
 
6375			pr_err("%s: failed memory allocation for cpu%ld\n",
6376			       __func__, cpu);
6377			return notifier_from_errno(-ENOMEM);
6378		}
6379		break;
6380	case CPU_DEAD:
6381	case CPU_DEAD_FROZEN:
6382	case CPU_UP_CANCELED:
6383	case CPU_UP_CANCELED_FROZEN:
6384		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
 
6385		break;
6386	default:
6387		break;
6388	}
6389	return NOTIFY_OK;
6390}
6391#endif
6392
6393static int raid5_alloc_percpu(struct r5conf *conf)
6394{
6395	unsigned long cpu;
6396	int err = 0;
 
 
 
6397
6398	conf->percpu = alloc_percpu(struct raid5_percpu);
6399	if (!conf->percpu)
6400		return -ENOMEM;
6401
6402#ifdef CONFIG_HOTPLUG_CPU
6403	conf->cpu_notify.notifier_call = raid456_cpu_notify;
6404	conf->cpu_notify.priority = 0;
6405	err = register_cpu_notifier(&conf->cpu_notify);
6406	if (err)
6407		return err;
6408#endif
6409
6410	get_online_cpus();
 
6411	for_each_present_cpu(cpu) {
6412		err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
6413		if (err) {
6414			pr_err("%s: failed memory allocation for cpu%ld\n",
6415			       __func__, cpu);
 
 
 
 
 
 
 
6416			break;
6417		}
 
6418	}
 
 
 
 
 
 
6419	put_online_cpus();
6420
6421	if (!err) {
6422		conf->scribble_disks = max(conf->raid_disks,
6423			conf->previous_raid_disks);
6424		conf->scribble_sectors = max(conf->chunk_sectors,
6425			conf->prev_chunk_sectors);
6426	}
6427	return err;
6428}
6429
6430static unsigned long raid5_cache_scan(struct shrinker *shrink,
6431				      struct shrink_control *sc)
6432{
6433	struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6434	unsigned long ret = SHRINK_STOP;
6435
6436	if (mutex_trylock(&conf->cache_size_mutex)) {
6437		ret= 0;
6438		while (ret < sc->nr_to_scan &&
6439		       conf->max_nr_stripes > conf->min_nr_stripes) {
6440			if (drop_one_stripe(conf) == 0) {
6441				ret = SHRINK_STOP;
6442				break;
6443			}
6444			ret++;
6445		}
6446		mutex_unlock(&conf->cache_size_mutex);
6447	}
6448	return ret;
6449}
6450
6451static unsigned long raid5_cache_count(struct shrinker *shrink,
6452				       struct shrink_control *sc)
6453{
6454	struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6455
6456	if (conf->max_nr_stripes < conf->min_nr_stripes)
6457		/* unlikely, but not impossible */
6458		return 0;
6459	return conf->max_nr_stripes - conf->min_nr_stripes;
6460}
6461
6462static struct r5conf *setup_conf(struct mddev *mddev)
6463{
6464	struct r5conf *conf;
6465	int raid_disk, memory, max_disks;
6466	struct md_rdev *rdev;
6467	struct disk_info *disk;
6468	char pers_name[6];
6469	int i;
6470	int group_cnt, worker_cnt_per_group;
6471	struct r5worker_group *new_group;
6472
6473	if (mddev->new_level != 5
6474	    && mddev->new_level != 4
6475	    && mddev->new_level != 6) {
6476		printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
6477		       mdname(mddev), mddev->new_level);
6478		return ERR_PTR(-EIO);
6479	}
6480	if ((mddev->new_level == 5
6481	     && !algorithm_valid_raid5(mddev->new_layout)) ||
6482	    (mddev->new_level == 6
6483	     && !algorithm_valid_raid6(mddev->new_layout))) {
6484		printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
6485		       mdname(mddev), mddev->new_layout);
6486		return ERR_PTR(-EIO);
6487	}
6488	if (mddev->new_level == 6 && mddev->raid_disks < 4) {
6489		printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
6490		       mdname(mddev), mddev->raid_disks);
6491		return ERR_PTR(-EINVAL);
6492	}
6493
6494	if (!mddev->new_chunk_sectors ||
6495	    (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
6496	    !is_power_of_2(mddev->new_chunk_sectors)) {
6497		printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
6498		       mdname(mddev), mddev->new_chunk_sectors << 9);
6499		return ERR_PTR(-EINVAL);
6500	}
6501
6502	conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
6503	if (conf == NULL)
6504		goto abort;
6505	/* Don't enable multi-threading by default*/
6506	if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
6507				 &new_group)) {
6508		conf->group_cnt = group_cnt;
6509		conf->worker_cnt_per_group = worker_cnt_per_group;
6510		conf->worker_groups = new_group;
6511	} else
6512		goto abort;
6513	spin_lock_init(&conf->device_lock);
6514	seqcount_init(&conf->gen_lock);
6515	mutex_init(&conf->cache_size_mutex);
6516	init_waitqueue_head(&conf->wait_for_quiescent);
6517	init_waitqueue_head(&conf->wait_for_stripe);
6518	init_waitqueue_head(&conf->wait_for_overlap);
6519	INIT_LIST_HEAD(&conf->handle_list);
6520	INIT_LIST_HEAD(&conf->hold_list);
6521	INIT_LIST_HEAD(&conf->delayed_list);
6522	INIT_LIST_HEAD(&conf->bitmap_list);
6523	bio_list_init(&conf->return_bi);
6524	init_llist_head(&conf->released_stripes);
6525	atomic_set(&conf->active_stripes, 0);
6526	atomic_set(&conf->preread_active_stripes, 0);
6527	atomic_set(&conf->active_aligned_reads, 0);
6528	conf->bypass_threshold = BYPASS_THRESHOLD;
6529	conf->recovery_disabled = mddev->recovery_disabled - 1;
6530
6531	conf->raid_disks = mddev->raid_disks;
6532	if (mddev->reshape_position == MaxSector)
6533		conf->previous_raid_disks = mddev->raid_disks;
6534	else
6535		conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
6536	max_disks = max(conf->raid_disks, conf->previous_raid_disks);
 
6537
6538	conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
6539			      GFP_KERNEL);
6540	if (!conf->disks)
6541		goto abort;
6542
6543	conf->mddev = mddev;
6544
6545	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
6546		goto abort;
6547
6548	/* We init hash_locks[0] separately to that it can be used
6549	 * as the reference lock in the spin_lock_nest_lock() call
6550	 * in lock_all_device_hash_locks_irq in order to convince
6551	 * lockdep that we know what we are doing.
6552	 */
6553	spin_lock_init(conf->hash_locks);
6554	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
6555		spin_lock_init(conf->hash_locks + i);
6556
6557	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6558		INIT_LIST_HEAD(conf->inactive_list + i);
6559
6560	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6561		INIT_LIST_HEAD(conf->temp_inactive_list + i);
6562
6563	conf->level = mddev->new_level;
6564	conf->chunk_sectors = mddev->new_chunk_sectors;
6565	if (raid5_alloc_percpu(conf) != 0)
6566		goto abort;
6567
6568	pr_debug("raid456: run(%s) called.\n", mdname(mddev));
6569
6570	rdev_for_each(rdev, mddev) {
6571		raid_disk = rdev->raid_disk;
6572		if (raid_disk >= max_disks
6573		    || raid_disk < 0 || test_bit(Journal, &rdev->flags))
6574			continue;
6575		disk = conf->disks + raid_disk;
6576
6577		if (test_bit(Replacement, &rdev->flags)) {
6578			if (disk->replacement)
6579				goto abort;
6580			disk->replacement = rdev;
6581		} else {
6582			if (disk->rdev)
6583				goto abort;
6584			disk->rdev = rdev;
6585		}
6586
6587		if (test_bit(In_sync, &rdev->flags)) {
6588			char b[BDEVNAME_SIZE];
6589			printk(KERN_INFO "md/raid:%s: device %s operational as raid"
6590			       " disk %d\n",
6591			       mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
6592		} else if (rdev->saved_raid_disk != raid_disk)
6593			/* Cannot rely on bitmap to complete recovery */
6594			conf->fullsync = 1;
6595	}
6596
 
6597	conf->level = mddev->new_level;
6598	if (conf->level == 6) {
6599		conf->max_degraded = 2;
6600		if (raid6_call.xor_syndrome)
6601			conf->rmw_level = PARITY_ENABLE_RMW;
6602		else
6603			conf->rmw_level = PARITY_DISABLE_RMW;
6604	} else {
6605		conf->max_degraded = 1;
6606		conf->rmw_level = PARITY_ENABLE_RMW;
6607	}
6608	conf->algorithm = mddev->new_layout;
 
6609	conf->reshape_progress = mddev->reshape_position;
6610	if (conf->reshape_progress != MaxSector) {
6611		conf->prev_chunk_sectors = mddev->chunk_sectors;
6612		conf->prev_algo = mddev->layout;
6613	} else {
6614		conf->prev_chunk_sectors = conf->chunk_sectors;
6615		conf->prev_algo = conf->algorithm;
6616	}
6617
6618	conf->min_nr_stripes = NR_STRIPES;
6619	memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
6620		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
6621	atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
6622	if (grow_stripes(conf, conf->min_nr_stripes)) {
6623		printk(KERN_ERR
6624		       "md/raid:%s: couldn't allocate %dkB for buffers\n",
6625		       mdname(mddev), memory);
6626		goto abort;
6627	} else
6628		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
6629		       mdname(mddev), memory);
6630	/*
6631	 * Losing a stripe head costs more than the time to refill it,
6632	 * it reduces the queue depth and so can hurt throughput.
6633	 * So set it rather large, scaled by number of devices.
6634	 */
6635	conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
6636	conf->shrinker.scan_objects = raid5_cache_scan;
6637	conf->shrinker.count_objects = raid5_cache_count;
6638	conf->shrinker.batch = 128;
6639	conf->shrinker.flags = 0;
6640	register_shrinker(&conf->shrinker);
6641
6642	sprintf(pers_name, "raid%d", mddev->new_level);
6643	conf->thread = md_register_thread(raid5d, mddev, pers_name);
6644	if (!conf->thread) {
6645		printk(KERN_ERR
6646		       "md/raid:%s: couldn't allocate thread.\n",
6647		       mdname(mddev));
6648		goto abort;
6649	}
6650
6651	return conf;
6652
6653 abort:
6654	if (conf) {
6655		free_conf(conf);
6656		return ERR_PTR(-EIO);
6657	} else
6658		return ERR_PTR(-ENOMEM);
6659}
6660
 
6661static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
6662{
6663	switch (algo) {
6664	case ALGORITHM_PARITY_0:
6665		if (raid_disk < max_degraded)
6666			return 1;
6667		break;
6668	case ALGORITHM_PARITY_N:
6669		if (raid_disk >= raid_disks - max_degraded)
6670			return 1;
6671		break;
6672	case ALGORITHM_PARITY_0_6:
6673		if (raid_disk == 0 ||
6674		    raid_disk == raid_disks - 1)
6675			return 1;
6676		break;
6677	case ALGORITHM_LEFT_ASYMMETRIC_6:
6678	case ALGORITHM_RIGHT_ASYMMETRIC_6:
6679	case ALGORITHM_LEFT_SYMMETRIC_6:
6680	case ALGORITHM_RIGHT_SYMMETRIC_6:
6681		if (raid_disk == raid_disks - 1)
6682			return 1;
6683	}
6684	return 0;
6685}
6686
6687static int raid5_run(struct mddev *mddev)
6688{
6689	struct r5conf *conf;
6690	int working_disks = 0;
6691	int dirty_parity_disks = 0;
6692	struct md_rdev *rdev;
6693	struct md_rdev *journal_dev = NULL;
6694	sector_t reshape_offset = 0;
6695	int i;
6696	long long min_offset_diff = 0;
6697	int first = 1;
6698
6699	if (mddev->recovery_cp != MaxSector)
6700		printk(KERN_NOTICE "md/raid:%s: not clean"
6701		       " -- starting background reconstruction\n",
6702		       mdname(mddev));
6703
6704	rdev_for_each(rdev, mddev) {
6705		long long diff;
6706
6707		if (test_bit(Journal, &rdev->flags)) {
6708			journal_dev = rdev;
6709			continue;
6710		}
6711		if (rdev->raid_disk < 0)
6712			continue;
6713		diff = (rdev->new_data_offset - rdev->data_offset);
6714		if (first) {
6715			min_offset_diff = diff;
6716			first = 0;
6717		} else if (mddev->reshape_backwards &&
6718			 diff < min_offset_diff)
6719			min_offset_diff = diff;
6720		else if (!mddev->reshape_backwards &&
6721			 diff > min_offset_diff)
6722			min_offset_diff = diff;
6723	}
6724
6725	if (mddev->reshape_position != MaxSector) {
6726		/* Check that we can continue the reshape.
6727		 * Difficulties arise if the stripe we would write to
6728		 * next is at or after the stripe we would read from next.
6729		 * For a reshape that changes the number of devices, this
6730		 * is only possible for a very short time, and mdadm makes
6731		 * sure that time appears to have past before assembling
6732		 * the array.  So we fail if that time hasn't passed.
6733		 * For a reshape that keeps the number of devices the same
6734		 * mdadm must be monitoring the reshape can keeping the
6735		 * critical areas read-only and backed up.  It will start
6736		 * the array in read-only mode, so we check for that.
6737		 */
6738		sector_t here_new, here_old;
6739		int old_disks;
6740		int max_degraded = (mddev->level == 6 ? 2 : 1);
6741		int chunk_sectors;
6742		int new_data_disks;
6743
6744		if (journal_dev) {
6745			printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
6746			       mdname(mddev));
6747			return -EINVAL;
6748		}
6749
6750		if (mddev->new_level != mddev->level) {
6751			printk(KERN_ERR "md/raid:%s: unsupported reshape "
6752			       "required - aborting.\n",
6753			       mdname(mddev));
6754			return -EINVAL;
6755		}
6756		old_disks = mddev->raid_disks - mddev->delta_disks;
6757		/* reshape_position must be on a new-stripe boundary, and one
6758		 * further up in new geometry must map after here in old
6759		 * geometry.
6760		 * If the chunk sizes are different, then as we perform reshape
6761		 * in units of the largest of the two, reshape_position needs
6762		 * be a multiple of the largest chunk size times new data disks.
6763		 */
6764		here_new = mddev->reshape_position;
6765		chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
6766		new_data_disks = mddev->raid_disks - max_degraded;
6767		if (sector_div(here_new, chunk_sectors * new_data_disks)) {
6768			printk(KERN_ERR "md/raid:%s: reshape_position not "
6769			       "on a stripe boundary\n", mdname(mddev));
6770			return -EINVAL;
6771		}
6772		reshape_offset = here_new * chunk_sectors;
6773		/* here_new is the stripe we will write to */
6774		here_old = mddev->reshape_position;
6775		sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
 
6776		/* here_old is the first stripe that we might need to read
6777		 * from */
6778		if (mddev->delta_disks == 0) {
6779			/* We cannot be sure it is safe to start an in-place
6780			 * reshape.  It is only safe if user-space is monitoring
6781			 * and taking constant backups.
6782			 * mdadm always starts a situation like this in
6783			 * readonly mode so it can take control before
6784			 * allowing any writes.  So just check for that.
6785			 */
6786			if (abs(min_offset_diff) >= mddev->chunk_sectors &&
6787			    abs(min_offset_diff) >= mddev->new_chunk_sectors)
6788				/* not really in-place - so OK */;
6789			else if (mddev->ro == 0) {
6790				printk(KERN_ERR "md/raid:%s: in-place reshape "
6791				       "must be started in read-only mode "
6792				       "- aborting\n",
6793				       mdname(mddev));
6794				return -EINVAL;
6795			}
6796		} else if (mddev->reshape_backwards
6797		    ? (here_new * chunk_sectors + min_offset_diff <=
6798		       here_old * chunk_sectors)
6799		    : (here_new * chunk_sectors >=
6800		       here_old * chunk_sectors + (-min_offset_diff))) {
6801			/* Reading from the same stripe as writing to - bad */
6802			printk(KERN_ERR "md/raid:%s: reshape_position too early for "
6803			       "auto-recovery - aborting.\n",
6804			       mdname(mddev));
6805			return -EINVAL;
6806		}
6807		printk(KERN_INFO "md/raid:%s: reshape will continue\n",
6808		       mdname(mddev));
6809		/* OK, we should be able to continue; */
6810	} else {
6811		BUG_ON(mddev->level != mddev->new_level);
6812		BUG_ON(mddev->layout != mddev->new_layout);
6813		BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
6814		BUG_ON(mddev->delta_disks != 0);
6815	}
6816
6817	if (mddev->private == NULL)
6818		conf = setup_conf(mddev);
6819	else
6820		conf = mddev->private;
6821
6822	if (IS_ERR(conf))
6823		return PTR_ERR(conf);
6824
6825	if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
6826		printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
6827		       mdname(mddev));
6828		mddev->ro = 1;
6829		set_disk_ro(mddev->gendisk, 1);
6830	}
6831
6832	conf->min_offset_diff = min_offset_diff;
6833	mddev->thread = conf->thread;
6834	conf->thread = NULL;
6835	mddev->private = conf;
6836
6837	for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
6838	     i++) {
6839		rdev = conf->disks[i].rdev;
6840		if (!rdev && conf->disks[i].replacement) {
6841			/* The replacement is all we have yet */
6842			rdev = conf->disks[i].replacement;
6843			conf->disks[i].replacement = NULL;
6844			clear_bit(Replacement, &rdev->flags);
6845			conf->disks[i].rdev = rdev;
6846		}
6847		if (!rdev)
6848			continue;
6849		if (conf->disks[i].replacement &&
6850		    conf->reshape_progress != MaxSector) {
6851			/* replacements and reshape simply do not mix. */
6852			printk(KERN_ERR "md: cannot handle concurrent "
6853			       "replacement and reshape.\n");
6854			goto abort;
6855		}
6856		if (test_bit(In_sync, &rdev->flags)) {
6857			working_disks++;
6858			continue;
6859		}
6860		/* This disc is not fully in-sync.  However if it
6861		 * just stored parity (beyond the recovery_offset),
6862		 * when we don't need to be concerned about the
6863		 * array being dirty.
6864		 * When reshape goes 'backwards', we never have
6865		 * partially completed devices, so we only need
6866		 * to worry about reshape going forwards.
6867		 */
6868		/* Hack because v0.91 doesn't store recovery_offset properly. */
6869		if (mddev->major_version == 0 &&
6870		    mddev->minor_version > 90)
6871			rdev->recovery_offset = reshape_offset;
6872
6873		if (rdev->recovery_offset < reshape_offset) {
6874			/* We need to check old and new layout */
6875			if (!only_parity(rdev->raid_disk,
6876					 conf->algorithm,
6877					 conf->raid_disks,
6878					 conf->max_degraded))
6879				continue;
6880		}
6881		if (!only_parity(rdev->raid_disk,
6882				 conf->prev_algo,
6883				 conf->previous_raid_disks,
6884				 conf->max_degraded))
6885			continue;
6886		dirty_parity_disks++;
6887	}
6888
6889	/*
6890	 * 0 for a fully functional array, 1 or 2 for a degraded array.
6891	 */
6892	mddev->degraded = calc_degraded(conf);
6893
6894	if (has_failed(conf)) {
6895		printk(KERN_ERR "md/raid:%s: not enough operational devices"
6896			" (%d/%d failed)\n",
6897			mdname(mddev), mddev->degraded, conf->raid_disks);
6898		goto abort;
6899	}
6900
6901	/* device size must be a multiple of chunk size */
6902	mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
6903	mddev->resync_max_sectors = mddev->dev_sectors;
6904
6905	if (mddev->degraded > dirty_parity_disks &&
6906	    mddev->recovery_cp != MaxSector) {
6907		if (mddev->ok_start_degraded)
6908			printk(KERN_WARNING
6909			       "md/raid:%s: starting dirty degraded array"
6910			       " - data corruption possible.\n",
6911			       mdname(mddev));
6912		else {
6913			printk(KERN_ERR
6914			       "md/raid:%s: cannot start dirty degraded array.\n",
6915			       mdname(mddev));
6916			goto abort;
6917		}
6918	}
6919
6920	if (mddev->degraded == 0)
6921		printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
6922		       " devices, algorithm %d\n", mdname(mddev), conf->level,
6923		       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
6924		       mddev->new_layout);
6925	else
6926		printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
6927		       " out of %d devices, algorithm %d\n",
6928		       mdname(mddev), conf->level,
6929		       mddev->raid_disks - mddev->degraded,
6930		       mddev->raid_disks, mddev->new_layout);
6931
6932	print_raid5_conf(conf);
6933
6934	if (conf->reshape_progress != MaxSector) {
6935		conf->reshape_safe = conf->reshape_progress;
6936		atomic_set(&conf->reshape_stripes, 0);
6937		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6938		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6939		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6940		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6941		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
6942							"reshape");
6943	}
6944
 
6945	/* Ok, everything is just fine now */
6946	if (mddev->to_remove == &raid5_attrs_group)
6947		mddev->to_remove = NULL;
6948	else if (mddev->kobj.sd &&
6949	    sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
6950		printk(KERN_WARNING
6951		       "raid5: failed to create sysfs attributes for %s\n",
6952		       mdname(mddev));
6953	md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
6954
6955	if (mddev->queue) {
6956		int chunk_size;
6957		bool discard_supported = true;
6958		/* read-ahead size must cover two whole stripes, which
6959		 * is 2 * (datadisks) * chunksize where 'n' is the
6960		 * number of raid devices
6961		 */
6962		int data_disks = conf->previous_raid_disks - conf->max_degraded;
6963		int stripe = data_disks *
6964			((mddev->chunk_sectors << 9) / PAGE_SIZE);
6965		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
6966			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
6967
 
 
 
 
 
6968		chunk_size = mddev->chunk_sectors << 9;
6969		blk_queue_io_min(mddev->queue, chunk_size);
6970		blk_queue_io_opt(mddev->queue, chunk_size *
6971				 (conf->raid_disks - conf->max_degraded));
6972		mddev->queue->limits.raid_partial_stripes_expensive = 1;
6973		/*
6974		 * We can only discard a whole stripe. It doesn't make sense to
6975		 * discard data disk but write parity disk
6976		 */
6977		stripe = stripe * PAGE_SIZE;
6978		/* Round up to power of 2, as discard handling
6979		 * currently assumes that */
6980		while ((stripe-1) & stripe)
6981			stripe = (stripe | (stripe-1)) + 1;
6982		mddev->queue->limits.discard_alignment = stripe;
6983		mddev->queue->limits.discard_granularity = stripe;
6984		/*
6985		 * unaligned part of discard request will be ignored, so can't
6986		 * guarantee discard_zeroes_data
6987		 */
6988		mddev->queue->limits.discard_zeroes_data = 0;
6989
6990		blk_queue_max_write_same_sectors(mddev->queue, 0);
6991
6992		rdev_for_each(rdev, mddev) {
6993			disk_stack_limits(mddev->gendisk, rdev->bdev,
6994					  rdev->data_offset << 9);
6995			disk_stack_limits(mddev->gendisk, rdev->bdev,
6996					  rdev->new_data_offset << 9);
6997			/*
6998			 * discard_zeroes_data is required, otherwise data
6999			 * could be lost. Consider a scenario: discard a stripe
7000			 * (the stripe could be inconsistent if
7001			 * discard_zeroes_data is 0); write one disk of the
7002			 * stripe (the stripe could be inconsistent again
7003			 * depending on which disks are used to calculate
7004			 * parity); the disk is broken; The stripe data of this
7005			 * disk is lost.
7006			 */
7007			if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
7008			    !bdev_get_queue(rdev->bdev)->
7009						limits.discard_zeroes_data)
7010				discard_supported = false;
7011			/* Unfortunately, discard_zeroes_data is not currently
7012			 * a guarantee - just a hint.  So we only allow DISCARD
7013			 * if the sysadmin has confirmed that only safe devices
7014			 * are in use by setting a module parameter.
7015			 */
7016			if (!devices_handle_discard_safely) {
7017				if (discard_supported) {
7018					pr_info("md/raid456: discard support disabled due to uncertainty.\n");
7019					pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
7020				}
7021				discard_supported = false;
7022			}
7023		}
7024
7025		if (discard_supported &&
7026		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7027		    mddev->queue->limits.discard_granularity >= stripe)
7028			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
7029						mddev->queue);
7030		else
7031			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
7032						mddev->queue);
7033	}
7034
7035	if (journal_dev) {
7036		char b[BDEVNAME_SIZE];
7037
7038		printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
7039		       mdname(mddev), bdevname(journal_dev->bdev, b));
7040		r5l_init_log(conf, journal_dev);
7041	}
7042
7043	return 0;
7044abort:
7045	md_unregister_thread(&mddev->thread);
7046	print_raid5_conf(conf);
7047	free_conf(conf);
 
 
7048	mddev->private = NULL;
7049	printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
7050	return -EIO;
7051}
7052
7053static void raid5_free(struct mddev *mddev, void *priv)
7054{
7055	struct r5conf *conf = priv;
7056
 
 
 
7057	free_conf(conf);
 
7058	mddev->to_remove = &raid5_attrs_group;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7059}
 
7060
7061static void raid5_status(struct seq_file *seq, struct mddev *mddev)
7062{
7063	struct r5conf *conf = mddev->private;
7064	int i;
7065
7066	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
7067		conf->chunk_sectors / 2, mddev->layout);
7068	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
7069	for (i = 0; i < conf->raid_disks; i++)
7070		seq_printf (seq, "%s",
7071			       conf->disks[i].rdev &&
7072			       test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
7073	seq_printf (seq, "]");
 
 
 
 
7074}
7075
7076static void print_raid5_conf (struct r5conf *conf)
7077{
7078	int i;
7079	struct disk_info *tmp;
7080
7081	printk(KERN_DEBUG "RAID conf printout:\n");
7082	if (!conf) {
7083		printk("(conf==NULL)\n");
7084		return;
7085	}
7086	printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
7087	       conf->raid_disks,
7088	       conf->raid_disks - conf->mddev->degraded);
7089
7090	for (i = 0; i < conf->raid_disks; i++) {
7091		char b[BDEVNAME_SIZE];
7092		tmp = conf->disks + i;
7093		if (tmp->rdev)
7094			printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
7095			       i, !test_bit(Faulty, &tmp->rdev->flags),
7096			       bdevname(tmp->rdev->bdev, b));
7097	}
7098}
7099
7100static int raid5_spare_active(struct mddev *mddev)
7101{
7102	int i;
7103	struct r5conf *conf = mddev->private;
7104	struct disk_info *tmp;
7105	int count = 0;
7106	unsigned long flags;
7107
7108	for (i = 0; i < conf->raid_disks; i++) {
7109		tmp = conf->disks + i;
7110		if (tmp->replacement
7111		    && tmp->replacement->recovery_offset == MaxSector
7112		    && !test_bit(Faulty, &tmp->replacement->flags)
7113		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
7114			/* Replacement has just become active. */
7115			if (!tmp->rdev
7116			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
7117				count++;
7118			if (tmp->rdev) {
7119				/* Replaced device not technically faulty,
7120				 * but we need to be sure it gets removed
7121				 * and never re-added.
7122				 */
7123				set_bit(Faulty, &tmp->rdev->flags);
7124				sysfs_notify_dirent_safe(
7125					tmp->rdev->sysfs_state);
7126			}
7127			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
7128		} else if (tmp->rdev
7129		    && tmp->rdev->recovery_offset == MaxSector
7130		    && !test_bit(Faulty, &tmp->rdev->flags)
7131		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
7132			count++;
7133			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
7134		}
7135	}
7136	spin_lock_irqsave(&conf->device_lock, flags);
7137	mddev->degraded = calc_degraded(conf);
7138	spin_unlock_irqrestore(&conf->device_lock, flags);
7139	print_raid5_conf(conf);
7140	return count;
7141}
7142
7143static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7144{
7145	struct r5conf *conf = mddev->private;
7146	int err = 0;
7147	int number = rdev->raid_disk;
7148	struct md_rdev **rdevp;
7149	struct disk_info *p = conf->disks + number;
7150
7151	print_raid5_conf(conf);
7152	if (test_bit(Journal, &rdev->flags) && conf->log) {
7153		struct r5l_log *log;
7154		/*
7155		 * we can't wait pending write here, as this is called in
7156		 * raid5d, wait will deadlock.
 
 
 
 
 
 
 
 
7157		 */
7158		if (atomic_read(&mddev->writes_pending))
7159			return -EBUSY;
7160		log = conf->log;
7161		conf->log = NULL;
 
 
 
 
7162		synchronize_rcu();
7163		r5l_exit_log(log);
7164		return 0;
7165	}
7166	if (rdev == p->rdev)
7167		rdevp = &p->rdev;
7168	else if (rdev == p->replacement)
7169		rdevp = &p->replacement;
7170	else
7171		return 0;
7172
7173	if (number >= conf->raid_disks &&
7174	    conf->reshape_progress == MaxSector)
7175		clear_bit(In_sync, &rdev->flags);
7176
7177	if (test_bit(In_sync, &rdev->flags) ||
7178	    atomic_read(&rdev->nr_pending)) {
7179		err = -EBUSY;
7180		goto abort;
7181	}
7182	/* Only remove non-faulty devices if recovery
7183	 * isn't possible.
7184	 */
7185	if (!test_bit(Faulty, &rdev->flags) &&
7186	    mddev->recovery_disabled != conf->recovery_disabled &&
7187	    !has_failed(conf) &&
7188	    (!p->replacement || p->replacement == rdev) &&
7189	    number < conf->raid_disks) {
7190		err = -EBUSY;
7191		goto abort;
7192	}
7193	*rdevp = NULL;
7194	synchronize_rcu();
7195	if (atomic_read(&rdev->nr_pending)) {
7196		/* lost the race, try later */
7197		err = -EBUSY;
7198		*rdevp = rdev;
7199	} else if (p->replacement) {
7200		/* We must have just cleared 'rdev' */
7201		p->rdev = p->replacement;
7202		clear_bit(Replacement, &p->replacement->flags);
7203		smp_mb(); /* Make sure other CPUs may see both as identical
7204			   * but will never see neither - if they are careful
7205			   */
7206		p->replacement = NULL;
7207		clear_bit(WantReplacement, &rdev->flags);
7208	} else
7209		/* We might have just removed the Replacement as faulty-
7210		 * clear the bit just in case
7211		 */
7212		clear_bit(WantReplacement, &rdev->flags);
7213abort:
7214
7215	print_raid5_conf(conf);
7216	return err;
7217}
7218
7219static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
7220{
7221	struct r5conf *conf = mddev->private;
7222	int err = -EEXIST;
7223	int disk;
7224	struct disk_info *p;
7225	int first = 0;
7226	int last = conf->raid_disks - 1;
7227
7228	if (test_bit(Journal, &rdev->flags)) {
7229		char b[BDEVNAME_SIZE];
7230		if (conf->log)
7231			return -EBUSY;
7232
7233		rdev->raid_disk = 0;
7234		/*
7235		 * The array is in readonly mode if journal is missing, so no
7236		 * write requests running. We should be safe
7237		 */
7238		r5l_init_log(conf, rdev);
7239		printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
7240		       mdname(mddev), bdevname(rdev->bdev, b));
7241		return 0;
7242	}
7243	if (mddev->recovery_disabled == conf->recovery_disabled)
7244		return -EBUSY;
7245
7246	if (rdev->saved_raid_disk < 0 && has_failed(conf))
7247		/* no point adding a device */
7248		return -EINVAL;
7249
7250	if (rdev->raid_disk >= 0)
7251		first = last = rdev->raid_disk;
7252
7253	/*
7254	 * find the disk ... but prefer rdev->saved_raid_disk
7255	 * if possible.
7256	 */
7257	if (rdev->saved_raid_disk >= 0 &&
7258	    rdev->saved_raid_disk >= first &&
7259	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
7260		first = rdev->saved_raid_disk;
7261
7262	for (disk = first; disk <= last; disk++) {
7263		p = conf->disks + disk;
7264		if (p->rdev == NULL) {
7265			clear_bit(In_sync, &rdev->flags);
7266			rdev->raid_disk = disk;
7267			err = 0;
7268			if (rdev->saved_raid_disk != disk)
7269				conf->fullsync = 1;
7270			rcu_assign_pointer(p->rdev, rdev);
7271			goto out;
7272		}
7273	}
7274	for (disk = first; disk <= last; disk++) {
7275		p = conf->disks + disk;
7276		if (test_bit(WantReplacement, &p->rdev->flags) &&
7277		    p->replacement == NULL) {
7278			clear_bit(In_sync, &rdev->flags);
7279			set_bit(Replacement, &rdev->flags);
7280			rdev->raid_disk = disk;
7281			err = 0;
7282			conf->fullsync = 1;
7283			rcu_assign_pointer(p->replacement, rdev);
7284			break;
7285		}
7286	}
7287out:
7288	print_raid5_conf(conf);
7289	return err;
7290}
7291
7292static int raid5_resize(struct mddev *mddev, sector_t sectors)
7293{
7294	/* no resync is happening, and there is enough space
7295	 * on all devices, so we can resize.
7296	 * We need to make sure resync covers any new space.
7297	 * If the array is shrinking we should possibly wait until
7298	 * any io in the removed space completes, but it hardly seems
7299	 * worth it.
7300	 */
7301	sector_t newsize;
7302	struct r5conf *conf = mddev->private;
7303
7304	if (conf->log)
7305		return -EINVAL;
7306	sectors &= ~((sector_t)conf->chunk_sectors - 1);
7307	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
7308	if (mddev->external_size &&
7309	    mddev->array_sectors > newsize)
7310		return -EINVAL;
7311	if (mddev->bitmap) {
7312		int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
7313		if (ret)
7314			return ret;
7315	}
7316	md_set_array_sectors(mddev, newsize);
7317	set_capacity(mddev->gendisk, mddev->array_sectors);
7318	revalidate_disk(mddev->gendisk);
7319	if (sectors > mddev->dev_sectors &&
7320	    mddev->recovery_cp > mddev->dev_sectors) {
7321		mddev->recovery_cp = mddev->dev_sectors;
7322		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7323	}
7324	mddev->dev_sectors = sectors;
7325	mddev->resync_max_sectors = sectors;
7326	return 0;
7327}
7328
7329static int check_stripe_cache(struct mddev *mddev)
7330{
7331	/* Can only proceed if there are plenty of stripe_heads.
7332	 * We need a minimum of one full stripe,, and for sensible progress
7333	 * it is best to have about 4 times that.
7334	 * If we require 4 times, then the default 256 4K stripe_heads will
7335	 * allow for chunk sizes up to 256K, which is probably OK.
7336	 * If the chunk size is greater, user-space should request more
7337	 * stripe_heads first.
7338	 */
7339	struct r5conf *conf = mddev->private;
7340	if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
7341	    > conf->min_nr_stripes ||
7342	    ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
7343	    > conf->min_nr_stripes) {
7344		printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
7345		       mdname(mddev),
7346		       ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
7347			/ STRIPE_SIZE)*4);
7348		return 0;
7349	}
7350	return 1;
7351}
7352
7353static int check_reshape(struct mddev *mddev)
7354{
7355	struct r5conf *conf = mddev->private;
7356
7357	if (conf->log)
7358		return -EINVAL;
7359	if (mddev->delta_disks == 0 &&
7360	    mddev->new_layout == mddev->layout &&
7361	    mddev->new_chunk_sectors == mddev->chunk_sectors)
7362		return 0; /* nothing to do */
 
 
 
7363	if (has_failed(conf))
7364		return -EINVAL;
7365	if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
7366		/* We might be able to shrink, but the devices must
7367		 * be made bigger first.
7368		 * For raid6, 4 is the minimum size.
7369		 * Otherwise 2 is the minimum
7370		 */
7371		int min = 2;
7372		if (mddev->level == 6)
7373			min = 4;
7374		if (mddev->raid_disks + mddev->delta_disks < min)
7375			return -EINVAL;
7376	}
7377
7378	if (!check_stripe_cache(mddev))
7379		return -ENOSPC;
7380
7381	if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
7382	    mddev->delta_disks > 0)
7383		if (resize_chunks(conf,
7384				  conf->previous_raid_disks
7385				  + max(0, mddev->delta_disks),
7386				  max(mddev->new_chunk_sectors,
7387				      mddev->chunk_sectors)
7388			    ) < 0)
7389			return -ENOMEM;
7390	return resize_stripes(conf, (conf->previous_raid_disks
7391				     + mddev->delta_disks));
7392}
7393
7394static int raid5_start_reshape(struct mddev *mddev)
7395{
7396	struct r5conf *conf = mddev->private;
7397	struct md_rdev *rdev;
7398	int spares = 0;
7399	unsigned long flags;
7400
7401	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
7402		return -EBUSY;
7403
7404	if (!check_stripe_cache(mddev))
7405		return -ENOSPC;
7406
7407	if (has_failed(conf))
7408		return -EINVAL;
7409
7410	rdev_for_each(rdev, mddev) {
7411		if (!test_bit(In_sync, &rdev->flags)
7412		    && !test_bit(Faulty, &rdev->flags))
7413			spares++;
7414	}
7415
7416	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
7417		/* Not enough devices even to make a degraded array
7418		 * of that size
7419		 */
7420		return -EINVAL;
7421
7422	/* Refuse to reduce size of the array.  Any reductions in
7423	 * array size must be through explicit setting of array_size
7424	 * attribute.
7425	 */
7426	if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
7427	    < mddev->array_sectors) {
7428		printk(KERN_ERR "md/raid:%s: array size must be reduced "
7429		       "before number of disks\n", mdname(mddev));
7430		return -EINVAL;
7431	}
7432
7433	atomic_set(&conf->reshape_stripes, 0);
7434	spin_lock_irq(&conf->device_lock);
7435	write_seqcount_begin(&conf->gen_lock);
7436	conf->previous_raid_disks = conf->raid_disks;
7437	conf->raid_disks += mddev->delta_disks;
7438	conf->prev_chunk_sectors = conf->chunk_sectors;
7439	conf->chunk_sectors = mddev->new_chunk_sectors;
7440	conf->prev_algo = conf->algorithm;
7441	conf->algorithm = mddev->new_layout;
7442	conf->generation++;
7443	/* Code that selects data_offset needs to see the generation update
7444	 * if reshape_progress has been set - so a memory barrier needed.
7445	 */
7446	smp_mb();
7447	if (mddev->reshape_backwards)
7448		conf->reshape_progress = raid5_size(mddev, 0, 0);
7449	else
7450		conf->reshape_progress = 0;
7451	conf->reshape_safe = conf->reshape_progress;
7452	write_seqcount_end(&conf->gen_lock);
7453	spin_unlock_irq(&conf->device_lock);
7454
7455	/* Now make sure any requests that proceeded on the assumption
7456	 * the reshape wasn't running - like Discard or Read - have
7457	 * completed.
7458	 */
7459	mddev_suspend(mddev);
7460	mddev_resume(mddev);
7461
7462	/* Add some new drives, as many as will fit.
7463	 * We know there are enough to make the newly sized array work.
7464	 * Don't add devices if we are reducing the number of
7465	 * devices in the array.  This is because it is not possible
7466	 * to correctly record the "partially reconstructed" state of
7467	 * such devices during the reshape and confusion could result.
7468	 */
7469	if (mddev->delta_disks >= 0) {
7470		rdev_for_each(rdev, mddev)
 
7471			if (rdev->raid_disk < 0 &&
7472			    !test_bit(Faulty, &rdev->flags)) {
7473				if (raid5_add_disk(mddev, rdev) == 0) {
7474					if (rdev->raid_disk
7475					    >= conf->previous_raid_disks)
7476						set_bit(In_sync, &rdev->flags);
7477					else
 
7478						rdev->recovery_offset = 0;
7479
7480					if (sysfs_link_rdev(mddev, rdev))
7481						/* Failure here is OK */;
7482				}
7483			} else if (rdev->raid_disk >= conf->previous_raid_disks
7484				   && !test_bit(Faulty, &rdev->flags)) {
7485				/* This is a spare that was manually added */
7486				set_bit(In_sync, &rdev->flags);
 
7487			}
7488
7489		/* When a reshape changes the number of devices,
7490		 * ->degraded is measured against the larger of the
7491		 * pre and post number of devices.
7492		 */
7493		spin_lock_irqsave(&conf->device_lock, flags);
7494		mddev->degraded = calc_degraded(conf);
 
7495		spin_unlock_irqrestore(&conf->device_lock, flags);
7496	}
7497	mddev->raid_disks = conf->raid_disks;
7498	mddev->reshape_position = conf->reshape_progress;
7499	set_bit(MD_CHANGE_DEVS, &mddev->flags);
7500
7501	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7502	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7503	clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7504	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7505	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7506	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
7507						"reshape");
7508	if (!mddev->sync_thread) {
7509		mddev->recovery = 0;
7510		spin_lock_irq(&conf->device_lock);
7511		write_seqcount_begin(&conf->gen_lock);
7512		mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
7513		mddev->new_chunk_sectors =
7514			conf->chunk_sectors = conf->prev_chunk_sectors;
7515		mddev->new_layout = conf->algorithm = conf->prev_algo;
7516		rdev_for_each(rdev, mddev)
7517			rdev->new_data_offset = rdev->data_offset;
7518		smp_wmb();
7519		conf->generation --;
7520		conf->reshape_progress = MaxSector;
7521		mddev->reshape_position = MaxSector;
7522		write_seqcount_end(&conf->gen_lock);
7523		spin_unlock_irq(&conf->device_lock);
7524		return -EAGAIN;
7525	}
7526	conf->reshape_checkpoint = jiffies;
7527	md_wakeup_thread(mddev->sync_thread);
7528	md_new_event(mddev);
7529	return 0;
7530}
7531
7532/* This is called from the reshape thread and should make any
7533 * changes needed in 'conf'
7534 */
7535static void end_reshape(struct r5conf *conf)
7536{
7537
7538	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
7539		struct md_rdev *rdev;
7540
7541		spin_lock_irq(&conf->device_lock);
7542		conf->previous_raid_disks = conf->raid_disks;
7543		rdev_for_each(rdev, conf->mddev)
7544			rdev->data_offset = rdev->new_data_offset;
7545		smp_wmb();
7546		conf->reshape_progress = MaxSector;
7547		conf->mddev->reshape_position = MaxSector;
7548		spin_unlock_irq(&conf->device_lock);
7549		wake_up(&conf->wait_for_overlap);
7550
7551		/* read-ahead size must cover two whole stripes, which is
7552		 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
7553		 */
7554		if (conf->mddev->queue) {
7555			int data_disks = conf->raid_disks - conf->max_degraded;
7556			int stripe = data_disks * ((conf->chunk_sectors << 9)
7557						   / PAGE_SIZE);
7558			if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
7559				conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
7560		}
7561	}
7562}
7563
7564/* This is called from the raid5d thread with mddev_lock held.
7565 * It makes config changes to the device.
7566 */
7567static void raid5_finish_reshape(struct mddev *mddev)
7568{
7569	struct r5conf *conf = mddev->private;
7570
7571	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7572
7573		if (mddev->delta_disks > 0) {
7574			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7575			set_capacity(mddev->gendisk, mddev->array_sectors);
7576			revalidate_disk(mddev->gendisk);
7577		} else {
7578			int d;
7579			spin_lock_irq(&conf->device_lock);
7580			mddev->degraded = calc_degraded(conf);
7581			spin_unlock_irq(&conf->device_lock);
 
 
 
7582			for (d = conf->raid_disks ;
7583			     d < conf->raid_disks - mddev->delta_disks;
7584			     d++) {
7585				struct md_rdev *rdev = conf->disks[d].rdev;
7586				if (rdev)
7587					clear_bit(In_sync, &rdev->flags);
7588				rdev = conf->disks[d].replacement;
7589				if (rdev)
7590					clear_bit(In_sync, &rdev->flags);
7591			}
7592		}
7593		mddev->layout = conf->algorithm;
7594		mddev->chunk_sectors = conf->chunk_sectors;
7595		mddev->reshape_position = MaxSector;
7596		mddev->delta_disks = 0;
7597		mddev->reshape_backwards = 0;
7598	}
7599}
7600
7601static void raid5_quiesce(struct mddev *mddev, int state)
7602{
7603	struct r5conf *conf = mddev->private;
7604
7605	switch(state) {
7606	case 2: /* resume for a suspend */
7607		wake_up(&conf->wait_for_overlap);
7608		break;
7609
7610	case 1: /* stop all writes */
7611		lock_all_device_hash_locks_irq(conf);
7612		/* '2' tells resync/reshape to pause so that all
7613		 * active stripes can drain
7614		 */
7615		conf->quiesce = 2;
7616		wait_event_cmd(conf->wait_for_quiescent,
7617				    atomic_read(&conf->active_stripes) == 0 &&
7618				    atomic_read(&conf->active_aligned_reads) == 0,
7619				    unlock_all_device_hash_locks_irq(conf),
7620				    lock_all_device_hash_locks_irq(conf));
7621		conf->quiesce = 1;
7622		unlock_all_device_hash_locks_irq(conf);
7623		/* allow reshape to continue */
7624		wake_up(&conf->wait_for_overlap);
7625		break;
7626
7627	case 0: /* re-enable writes */
7628		lock_all_device_hash_locks_irq(conf);
7629		conf->quiesce = 0;
7630		wake_up(&conf->wait_for_quiescent);
7631		wake_up(&conf->wait_for_overlap);
7632		unlock_all_device_hash_locks_irq(conf);
7633		break;
7634	}
7635	r5l_quiesce(conf->log, state);
7636}
7637
7638static void *raid45_takeover_raid0(struct mddev *mddev, int level)
 
7639{
7640	struct r0conf *raid0_conf = mddev->private;
7641	sector_t sectors;
7642
7643	/* for raid0 takeover only one zone is supported */
7644	if (raid0_conf->nr_strip_zones > 1) {
7645		printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
7646		       mdname(mddev));
7647		return ERR_PTR(-EINVAL);
7648	}
7649
7650	sectors = raid0_conf->strip_zone[0].zone_end;
7651	sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
7652	mddev->dev_sectors = sectors;
7653	mddev->new_level = level;
7654	mddev->new_layout = ALGORITHM_PARITY_N;
7655	mddev->new_chunk_sectors = mddev->chunk_sectors;
7656	mddev->raid_disks += 1;
7657	mddev->delta_disks = 1;
7658	/* make sure it will be not marked as dirty */
7659	mddev->recovery_cp = MaxSector;
7660
7661	return setup_conf(mddev);
7662}
7663
7664static void *raid5_takeover_raid1(struct mddev *mddev)
 
7665{
7666	int chunksect;
7667
7668	if (mddev->raid_disks != 2 ||
7669	    mddev->degraded > 1)
7670		return ERR_PTR(-EINVAL);
7671
7672	/* Should check if there are write-behind devices? */
7673
7674	chunksect = 64*2; /* 64K by default */
7675
7676	/* The array must be an exact multiple of chunksize */
7677	while (chunksect && (mddev->array_sectors & (chunksect-1)))
7678		chunksect >>= 1;
7679
7680	if ((chunksect<<9) < STRIPE_SIZE)
7681		/* array size does not allow a suitable chunk size */
7682		return ERR_PTR(-EINVAL);
7683
7684	mddev->new_level = 5;
7685	mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
7686	mddev->new_chunk_sectors = chunksect;
7687
7688	return setup_conf(mddev);
7689}
7690
7691static void *raid5_takeover_raid6(struct mddev *mddev)
7692{
7693	int new_layout;
7694
7695	switch (mddev->layout) {
7696	case ALGORITHM_LEFT_ASYMMETRIC_6:
7697		new_layout = ALGORITHM_LEFT_ASYMMETRIC;
7698		break;
7699	case ALGORITHM_RIGHT_ASYMMETRIC_6:
7700		new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
7701		break;
7702	case ALGORITHM_LEFT_SYMMETRIC_6:
7703		new_layout = ALGORITHM_LEFT_SYMMETRIC;
7704		break;
7705	case ALGORITHM_RIGHT_SYMMETRIC_6:
7706		new_layout = ALGORITHM_RIGHT_SYMMETRIC;
7707		break;
7708	case ALGORITHM_PARITY_0_6:
7709		new_layout = ALGORITHM_PARITY_0;
7710		break;
7711	case ALGORITHM_PARITY_N:
7712		new_layout = ALGORITHM_PARITY_N;
7713		break;
7714	default:
7715		return ERR_PTR(-EINVAL);
7716	}
7717	mddev->new_level = 5;
7718	mddev->new_layout = new_layout;
7719	mddev->delta_disks = -1;
7720	mddev->raid_disks -= 1;
7721	return setup_conf(mddev);
7722}
7723
7724static int raid5_check_reshape(struct mddev *mddev)
 
7725{
7726	/* For a 2-drive array, the layout and chunk size can be changed
7727	 * immediately as not restriping is needed.
7728	 * For larger arrays we record the new value - after validation
7729	 * to be used by a reshape pass.
7730	 */
7731	struct r5conf *conf = mddev->private;
7732	int new_chunk = mddev->new_chunk_sectors;
7733
7734	if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
7735		return -EINVAL;
7736	if (new_chunk > 0) {
7737		if (!is_power_of_2(new_chunk))
7738			return -EINVAL;
7739		if (new_chunk < (PAGE_SIZE>>9))
7740			return -EINVAL;
7741		if (mddev->array_sectors & (new_chunk-1))
7742			/* not factor of array size */
7743			return -EINVAL;
7744	}
7745
7746	/* They look valid */
7747
7748	if (mddev->raid_disks == 2) {
7749		/* can make the change immediately */
7750		if (mddev->new_layout >= 0) {
7751			conf->algorithm = mddev->new_layout;
7752			mddev->layout = mddev->new_layout;
7753		}
7754		if (new_chunk > 0) {
7755			conf->chunk_sectors = new_chunk ;
7756			mddev->chunk_sectors = new_chunk;
7757		}
7758		set_bit(MD_CHANGE_DEVS, &mddev->flags);
7759		md_wakeup_thread(mddev->thread);
7760	}
7761	return check_reshape(mddev);
7762}
7763
7764static int raid6_check_reshape(struct mddev *mddev)
7765{
7766	int new_chunk = mddev->new_chunk_sectors;
7767
7768	if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
7769		return -EINVAL;
7770	if (new_chunk > 0) {
7771		if (!is_power_of_2(new_chunk))
7772			return -EINVAL;
7773		if (new_chunk < (PAGE_SIZE >> 9))
7774			return -EINVAL;
7775		if (mddev->array_sectors & (new_chunk-1))
7776			/* not factor of array size */
7777			return -EINVAL;
7778	}
7779
7780	/* They look valid */
7781	return check_reshape(mddev);
7782}
7783
7784static void *raid5_takeover(struct mddev *mddev)
7785{
7786	/* raid5 can take over:
7787	 *  raid0 - if there is only one strip zone - make it a raid4 layout
7788	 *  raid1 - if there are two drives.  We need to know the chunk size
7789	 *  raid4 - trivial - just use a raid4 layout.
7790	 *  raid6 - Providing it is a *_6 layout
7791	 */
7792	if (mddev->level == 0)
7793		return raid45_takeover_raid0(mddev, 5);
7794	if (mddev->level == 1)
7795		return raid5_takeover_raid1(mddev);
7796	if (mddev->level == 4) {
7797		mddev->new_layout = ALGORITHM_PARITY_N;
7798		mddev->new_level = 5;
7799		return setup_conf(mddev);
7800	}
7801	if (mddev->level == 6)
7802		return raid5_takeover_raid6(mddev);
7803
7804	return ERR_PTR(-EINVAL);
7805}
7806
7807static void *raid4_takeover(struct mddev *mddev)
7808{
7809	/* raid4 can take over:
7810	 *  raid0 - if there is only one strip zone
7811	 *  raid5 - if layout is right
7812	 */
7813	if (mddev->level == 0)
7814		return raid45_takeover_raid0(mddev, 4);
7815	if (mddev->level == 5 &&
7816	    mddev->layout == ALGORITHM_PARITY_N) {
7817		mddev->new_layout = 0;
7818		mddev->new_level = 4;
7819		return setup_conf(mddev);
7820	}
7821	return ERR_PTR(-EINVAL);
7822}
7823
7824static struct md_personality raid5_personality;
7825
7826static void *raid6_takeover(struct mddev *mddev)
7827{
7828	/* Currently can only take over a raid5.  We map the
7829	 * personality to an equivalent raid6 personality
7830	 * with the Q block at the end.
7831	 */
7832	int new_layout;
7833
7834	if (mddev->pers != &raid5_personality)
7835		return ERR_PTR(-EINVAL);
7836	if (mddev->degraded > 1)
7837		return ERR_PTR(-EINVAL);
7838	if (mddev->raid_disks > 253)
7839		return ERR_PTR(-EINVAL);
7840	if (mddev->raid_disks < 3)
7841		return ERR_PTR(-EINVAL);
7842
7843	switch (mddev->layout) {
7844	case ALGORITHM_LEFT_ASYMMETRIC:
7845		new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
7846		break;
7847	case ALGORITHM_RIGHT_ASYMMETRIC:
7848		new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
7849		break;
7850	case ALGORITHM_LEFT_SYMMETRIC:
7851		new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
7852		break;
7853	case ALGORITHM_RIGHT_SYMMETRIC:
7854		new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
7855		break;
7856	case ALGORITHM_PARITY_0:
7857		new_layout = ALGORITHM_PARITY_0_6;
7858		break;
7859	case ALGORITHM_PARITY_N:
7860		new_layout = ALGORITHM_PARITY_N;
7861		break;
7862	default:
7863		return ERR_PTR(-EINVAL);
7864	}
7865	mddev->new_level = 6;
7866	mddev->new_layout = new_layout;
7867	mddev->delta_disks = 1;
7868	mddev->raid_disks += 1;
7869	return setup_conf(mddev);
7870}
7871
7872static struct md_personality raid6_personality =
 
7873{
7874	.name		= "raid6",
7875	.level		= 6,
7876	.owner		= THIS_MODULE,
7877	.make_request	= raid5_make_request,
7878	.run		= raid5_run,
7879	.free		= raid5_free,
7880	.status		= raid5_status,
7881	.error_handler	= raid5_error,
7882	.hot_add_disk	= raid5_add_disk,
7883	.hot_remove_disk= raid5_remove_disk,
7884	.spare_active	= raid5_spare_active,
7885	.sync_request	= raid5_sync_request,
7886	.resize		= raid5_resize,
7887	.size		= raid5_size,
7888	.check_reshape	= raid6_check_reshape,
7889	.start_reshape  = raid5_start_reshape,
7890	.finish_reshape = raid5_finish_reshape,
7891	.quiesce	= raid5_quiesce,
7892	.takeover	= raid6_takeover,
7893	.congested	= raid5_congested,
7894};
7895static struct md_personality raid5_personality =
7896{
7897	.name		= "raid5",
7898	.level		= 5,
7899	.owner		= THIS_MODULE,
7900	.make_request	= raid5_make_request,
7901	.run		= raid5_run,
7902	.free		= raid5_free,
7903	.status		= raid5_status,
7904	.error_handler	= raid5_error,
7905	.hot_add_disk	= raid5_add_disk,
7906	.hot_remove_disk= raid5_remove_disk,
7907	.spare_active	= raid5_spare_active,
7908	.sync_request	= raid5_sync_request,
7909	.resize		= raid5_resize,
7910	.size		= raid5_size,
7911	.check_reshape	= raid5_check_reshape,
7912	.start_reshape  = raid5_start_reshape,
7913	.finish_reshape = raid5_finish_reshape,
7914	.quiesce	= raid5_quiesce,
7915	.takeover	= raid5_takeover,
7916	.congested	= raid5_congested,
7917};
7918
7919static struct md_personality raid4_personality =
7920{
7921	.name		= "raid4",
7922	.level		= 4,
7923	.owner		= THIS_MODULE,
7924	.make_request	= raid5_make_request,
7925	.run		= raid5_run,
7926	.free		= raid5_free,
7927	.status		= raid5_status,
7928	.error_handler	= raid5_error,
7929	.hot_add_disk	= raid5_add_disk,
7930	.hot_remove_disk= raid5_remove_disk,
7931	.spare_active	= raid5_spare_active,
7932	.sync_request	= raid5_sync_request,
7933	.resize		= raid5_resize,
7934	.size		= raid5_size,
7935	.check_reshape	= raid5_check_reshape,
7936	.start_reshape  = raid5_start_reshape,
7937	.finish_reshape = raid5_finish_reshape,
7938	.quiesce	= raid5_quiesce,
7939	.takeover	= raid4_takeover,
7940	.congested	= raid5_congested,
7941};
7942
7943static int __init raid5_init(void)
7944{
7945	raid5_wq = alloc_workqueue("raid5wq",
7946		WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
7947	if (!raid5_wq)
7948		return -ENOMEM;
7949	register_md_personality(&raid6_personality);
7950	register_md_personality(&raid5_personality);
7951	register_md_personality(&raid4_personality);
7952	return 0;
7953}
7954
7955static void raid5_exit(void)
7956{
7957	unregister_md_personality(&raid6_personality);
7958	unregister_md_personality(&raid5_personality);
7959	unregister_md_personality(&raid4_personality);
7960	destroy_workqueue(raid5_wq);
7961}
7962
7963module_init(raid5_init);
7964module_exit(raid5_exit);
7965MODULE_LICENSE("GPL");
7966MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
7967MODULE_ALIAS("md-personality-4"); /* RAID5 */
7968MODULE_ALIAS("md-raid5");
7969MODULE_ALIAS("md-raid4");
7970MODULE_ALIAS("md-level-5");
7971MODULE_ALIAS("md-level-4");
7972MODULE_ALIAS("md-personality-8"); /* RAID6 */
7973MODULE_ALIAS("md-raid6");
7974MODULE_ALIAS("md-level-6");
7975
7976/* This used to be two separate modules, they were: */
7977MODULE_ALIAS("raid5");
7978MODULE_ALIAS("raid6");
v3.1
   1/*
   2 * raid5.c : Multiple Devices driver for Linux
   3 *	   Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   4 *	   Copyright (C) 1999, 2000 Ingo Molnar
   5 *	   Copyright (C) 2002, 2003 H. Peter Anvin
   6 *
   7 * RAID-4/5/6 management functions.
   8 * Thanks to Penguin Computing for making the RAID-6 development possible
   9 * by donating a test server!
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21/*
  22 * BITMAP UNPLUGGING:
  23 *
  24 * The sequencing for updating the bitmap reliably is a little
  25 * subtle (and I got it wrong the first time) so it deserves some
  26 * explanation.
  27 *
  28 * We group bitmap updates into batches.  Each batch has a number.
  29 * We may write out several batches at once, but that isn't very important.
  30 * conf->seq_write is the number of the last batch successfully written.
  31 * conf->seq_flush is the number of the last batch that was closed to
  32 *    new additions.
  33 * When we discover that we will need to write to any block in a stripe
  34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  35 * the number of the batch it will be in. This is seq_flush+1.
  36 * When we are ready to do a write, if that batch hasn't been written yet,
  37 *   we plug the array and queue the stripe for later.
  38 * When an unplug happens, we increment bm_flush, thus closing the current
  39 *   batch.
  40 * When we notice that bm_flush > bm_write, we write out all pending updates
  41 * to the bitmap, and advance bm_write to where bm_flush was.
  42 * This may occasionally write a bit out twice, but is sure never to
  43 * miss any bits.
  44 */
  45
  46#include <linux/blkdev.h>
  47#include <linux/kthread.h>
  48#include <linux/raid/pq.h>
  49#include <linux/async_tx.h>
 
  50#include <linux/async.h>
  51#include <linux/seq_file.h>
  52#include <linux/cpu.h>
  53#include <linux/slab.h>
  54#include <linux/ratelimit.h>
 
 
 
 
  55#include "md.h"
  56#include "raid5.h"
  57#include "raid0.h"
  58#include "bitmap.h"
  59
 
 
 
 
 
 
 
 
  60/*
  61 * Stripe cache
  62 */
  63
  64#define NR_STRIPES		256
  65#define STRIPE_SIZE		PAGE_SIZE
  66#define STRIPE_SHIFT		(PAGE_SHIFT - 9)
  67#define STRIPE_SECTORS		(STRIPE_SIZE>>9)
  68#define	IO_THRESHOLD		1
  69#define BYPASS_THRESHOLD	1
  70#define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
  71#define HASH_MASK		(NR_HASH - 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72
  73#define stripe_hash(conf, sect)	(&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74
  75/* bio's attached to a stripe+device for I/O are linked together in bi_sector
  76 * order without overlap.  There may be several bio's per stripe+device, and
  77 * a bio could span several devices.
  78 * When walking this list for a particular stripe+device, we must never proceed
  79 * beyond a bio that extends past this device, as the next bio might no longer
  80 * be valid.
  81 * This macro is used to determine the 'next' bio in the list, given the sector
  82 * of the current stripe+device
  83 */
  84#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
  85/*
  86 * The following can be used to debug the driver
  87 */
  88#define RAID5_PARANOIA	1
  89#if RAID5_PARANOIA && defined(CONFIG_SMP)
  90# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
  91#else
  92# define CHECK_DEVLOCK()
  93#endif
  94
  95#ifdef DEBUG
  96#define inline
  97#define __inline__
  98#endif
  99
 100/*
 101 * We maintain a biased count of active stripes in the bottom 16 bits of
 102 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 103 */
 104static inline int raid5_bi_phys_segments(struct bio *bio)
 105{
 106	return bio->bi_phys_segments & 0xffff;
 
 107}
 108
 109static inline int raid5_bi_hw_segments(struct bio *bio)
 110{
 111	return (bio->bi_phys_segments >> 16) & 0xffff;
 
 112}
 113
 114static inline int raid5_dec_bi_phys_segments(struct bio *bio)
 115{
 116	--bio->bi_phys_segments;
 117	return raid5_bi_phys_segments(bio);
 118}
 119
 120static inline int raid5_dec_bi_hw_segments(struct bio *bio)
 
 121{
 122	unsigned short val = raid5_bi_hw_segments(bio);
 
 123
 124	--val;
 125	bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
 126	return val;
 
 127}
 128
 129static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
 130{
 131	bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
 
 132}
 133
 134/* Find first data disk in a raid6 stripe */
 135static inline int raid6_d0(struct stripe_head *sh)
 136{
 137	if (sh->ddf_layout)
 138		/* ddf always start from first device */
 139		return 0;
 140	/* md starts just after Q block */
 141	if (sh->qd_idx == sh->disks - 1)
 142		return 0;
 143	else
 144		return sh->qd_idx + 1;
 145}
 146static inline int raid6_next_disk(int disk, int raid_disks)
 147{
 148	disk++;
 149	return (disk < raid_disks) ? disk : 0;
 150}
 151
 152/* When walking through the disks in a raid5, starting at raid6_d0,
 153 * We need to map each disk to a 'slot', where the data disks are slot
 154 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
 155 * is raid_disks-1.  This help does that mapping.
 156 */
 157static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
 158			     int *count, int syndrome_disks)
 159{
 160	int slot = *count;
 161
 162	if (sh->ddf_layout)
 163		(*count)++;
 164	if (idx == sh->pd_idx)
 165		return syndrome_disks;
 166	if (idx == sh->qd_idx)
 167		return syndrome_disks + 1;
 168	if (!sh->ddf_layout)
 169		(*count)++;
 170	return slot;
 171}
 172
 173static void return_io(struct bio *return_bi)
 174{
 175	struct bio *bi = return_bi;
 176	while (bi) {
 177
 178		return_bi = bi->bi_next;
 179		bi->bi_next = NULL;
 180		bi->bi_size = 0;
 181		bio_endio(bi, 0);
 182		bi = return_bi;
 183	}
 184}
 185
 186static void print_raid5_conf (raid5_conf_t *conf);
 187
 188static int stripe_operations_active(struct stripe_head *sh)
 189{
 190	return sh->check_state || sh->reconstruct_state ||
 191	       test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
 192	       test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 193}
 194
 195static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 196{
 197	if (atomic_dec_and_test(&sh->count)) {
 198		BUG_ON(!list_empty(&sh->lru));
 199		BUG_ON(atomic_read(&conf->active_stripes)==0);
 200		if (test_bit(STRIPE_HANDLE, &sh->state)) {
 201			if (test_bit(STRIPE_DELAYED, &sh->state))
 202				list_add_tail(&sh->lru, &conf->delayed_list);
 203			else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 204				   sh->bm_seq - conf->seq_write > 0)
 205				list_add_tail(&sh->lru, &conf->bitmap_list);
 206			else {
 207				clear_bit(STRIPE_BIT_DELAY, &sh->state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 208				list_add_tail(&sh->lru, &conf->handle_list);
 
 
 
 209			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210			md_wakeup_thread(conf->mddev->thread);
 211		} else {
 212			BUG_ON(stripe_operations_active(sh));
 213			if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
 214				atomic_dec(&conf->preread_active_stripes);
 215				if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
 216					md_wakeup_thread(conf->mddev->thread);
 217			}
 218			atomic_dec(&conf->active_stripes);
 219			if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
 220				list_add_tail(&sh->lru, &conf->inactive_list);
 221				wake_up(&conf->wait_for_stripe);
 222				if (conf->retry_read_aligned)
 223					md_wakeup_thread(conf->mddev->thread);
 224			}
 225		}
 226	}
 227}
 228
 229static void release_stripe(struct stripe_head *sh)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230{
 231	raid5_conf_t *conf = sh->raid_conf;
 232	unsigned long flags;
 
 
 
 233
 234	spin_lock_irqsave(&conf->device_lock, flags);
 235	__release_stripe(conf, sh);
 236	spin_unlock_irqrestore(&conf->device_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237}
 238
 239static inline void remove_hash(struct stripe_head *sh)
 240{
 241	pr_debug("remove_hash(), stripe %llu\n",
 242		(unsigned long long)sh->sector);
 243
 244	hlist_del_init(&sh->hash);
 245}
 246
 247static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
 248{
 249	struct hlist_head *hp = stripe_hash(conf, sh->sector);
 250
 251	pr_debug("insert_hash(), stripe %llu\n",
 252		(unsigned long long)sh->sector);
 253
 254	CHECK_DEVLOCK();
 255	hlist_add_head(&sh->hash, hp);
 256}
 257
 258
 259/* find an idle stripe, make sure it is unhashed, and return it. */
 260static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
 261{
 262	struct stripe_head *sh = NULL;
 263	struct list_head *first;
 264
 265	CHECK_DEVLOCK();
 266	if (list_empty(&conf->inactive_list))
 267		goto out;
 268	first = conf->inactive_list.next;
 269	sh = list_entry(first, struct stripe_head, lru);
 270	list_del_init(first);
 271	remove_hash(sh);
 272	atomic_inc(&conf->active_stripes);
 
 
 
 273out:
 274	return sh;
 275}
 276
 277static void shrink_buffers(struct stripe_head *sh)
 278{
 279	struct page *p;
 280	int i;
 281	int num = sh->raid_conf->pool_size;
 282
 283	for (i = 0; i < num ; i++) {
 
 284		p = sh->dev[i].page;
 285		if (!p)
 286			continue;
 287		sh->dev[i].page = NULL;
 288		put_page(p);
 289	}
 290}
 291
 292static int grow_buffers(struct stripe_head *sh)
 293{
 294	int i;
 295	int num = sh->raid_conf->pool_size;
 296
 297	for (i = 0; i < num; i++) {
 298		struct page *page;
 299
 300		if (!(page = alloc_page(GFP_KERNEL))) {
 301			return 1;
 302		}
 303		sh->dev[i].page = page;
 
 304	}
 305	return 0;
 306}
 307
 308static void raid5_build_block(struct stripe_head *sh, int i, int previous);
 309static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
 310			    struct stripe_head *sh);
 311
 312static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
 313{
 314	raid5_conf_t *conf = sh->raid_conf;
 315	int i;
 316
 317	BUG_ON(atomic_read(&sh->count) != 0);
 318	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 319	BUG_ON(stripe_operations_active(sh));
 
 320
 321	CHECK_DEVLOCK();
 322	pr_debug("init_stripe called, stripe %llu\n",
 323		(unsigned long long)sh->sector);
 324
 325	remove_hash(sh);
 326
 327	sh->generation = conf->generation - previous;
 328	sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
 329	sh->sector = sector;
 330	stripe_set_idx(sector, conf, previous, sh);
 331	sh->state = 0;
 332
 333
 334	for (i = sh->disks; i--; ) {
 335		struct r5dev *dev = &sh->dev[i];
 336
 337		if (dev->toread || dev->read || dev->towrite || dev->written ||
 338		    test_bit(R5_LOCKED, &dev->flags)) {
 339			printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
 340			       (unsigned long long)sh->sector, i, dev->toread,
 341			       dev->read, dev->towrite, dev->written,
 342			       test_bit(R5_LOCKED, &dev->flags));
 343			WARN_ON(1);
 344		}
 345		dev->flags = 0;
 346		raid5_build_block(sh, i, previous);
 347	}
 
 
 
 348	insert_hash(conf, sh);
 
 
 349}
 350
 351static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
 352					 short generation)
 353{
 354	struct stripe_head *sh;
 355	struct hlist_node *hn;
 356
 357	CHECK_DEVLOCK();
 358	pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
 359	hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
 360		if (sh->sector == sector && sh->generation == generation)
 361			return sh;
 362	pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
 363	return NULL;
 364}
 365
 366/*
 367 * Need to check if array has failed when deciding whether to:
 368 *  - start an array
 369 *  - remove non-faulty devices
 370 *  - add a spare
 371 *  - allow a reshape
 372 * This determination is simple when no reshape is happening.
 373 * However if there is a reshape, we need to carefully check
 374 * both the before and after sections.
 375 * This is because some failed devices may only affect one
 376 * of the two sections, and some non-in_sync devices may
 377 * be insync in the section most affected by failed devices.
 378 */
 379static int has_failed(raid5_conf_t *conf)
 380{
 381	int degraded;
 382	int i;
 383	if (conf->mddev->reshape_position == MaxSector)
 384		return conf->mddev->degraded > conf->max_degraded;
 385
 386	rcu_read_lock();
 387	degraded = 0;
 388	for (i = 0; i < conf->previous_raid_disks; i++) {
 389		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 
 
 390		if (!rdev || test_bit(Faulty, &rdev->flags))
 391			degraded++;
 392		else if (test_bit(In_sync, &rdev->flags))
 393			;
 394		else
 395			/* not in-sync or faulty.
 396			 * If the reshape increases the number of devices,
 397			 * this is being recovered by the reshape, so
 398			 * this 'previous' section is not in_sync.
 399			 * If the number of devices is being reduced however,
 400			 * the device can only be part of the array if
 401			 * we are reverting a reshape, so this section will
 402			 * be in-sync.
 403			 */
 404			if (conf->raid_disks >= conf->previous_raid_disks)
 405				degraded++;
 406	}
 407	rcu_read_unlock();
 408	if (degraded > conf->max_degraded)
 409		return 1;
 410	rcu_read_lock();
 411	degraded = 0;
 412	for (i = 0; i < conf->raid_disks; i++) {
 413		mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 
 
 414		if (!rdev || test_bit(Faulty, &rdev->flags))
 415			degraded++;
 416		else if (test_bit(In_sync, &rdev->flags))
 417			;
 418		else
 419			/* not in-sync or faulty.
 420			 * If reshape increases the number of devices, this
 421			 * section has already been recovered, else it
 422			 * almost certainly hasn't.
 423			 */
 424			if (conf->raid_disks <= conf->previous_raid_disks)
 425				degraded++;
 426	}
 427	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 428	if (degraded > conf->max_degraded)
 429		return 1;
 430	return 0;
 431}
 432
 433static struct stripe_head *
 434get_active_stripe(raid5_conf_t *conf, sector_t sector,
 435		  int previous, int noblock, int noquiesce)
 436{
 437	struct stripe_head *sh;
 
 438
 439	pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 440
 441	spin_lock_irq(&conf->device_lock);
 442
 443	do {
 444		wait_event_lock_irq(conf->wait_for_stripe,
 445				    conf->quiesce == 0 || noquiesce,
 446				    conf->device_lock, /* nothing */);
 447		sh = __find_stripe(conf, sector, conf->generation - previous);
 448		if (!sh) {
 449			if (!conf->inactive_blocked)
 450				sh = get_free_stripe(conf);
 
 
 
 
 
 451			if (noblock && sh == NULL)
 452				break;
 453			if (!sh) {
 454				conf->inactive_blocked = 1;
 455				wait_event_lock_irq(conf->wait_for_stripe,
 456						    !list_empty(&conf->inactive_list) &&
 457						    (atomic_read(&conf->active_stripes)
 458						     < (conf->max_nr_stripes *3/4)
 459						     || !conf->inactive_blocked),
 460						    conf->device_lock,
 461						    );
 462				conf->inactive_blocked = 0;
 463			} else
 
 
 
 464				init_stripe(sh, sector, previous);
 465		} else {
 466			if (atomic_read(&sh->count)) {
 467				BUG_ON(!list_empty(&sh->lru)
 468				    && !test_bit(STRIPE_EXPANDING, &sh->state));
 469			} else {
 470				if (!test_bit(STRIPE_HANDLE, &sh->state))
 471					atomic_inc(&conf->active_stripes);
 472				if (list_empty(&sh->lru) &&
 473				    !test_bit(STRIPE_EXPANDING, &sh->state))
 474					BUG();
 475				list_del_init(&sh->lru);
 
 
 
 
 476			}
 
 
 477		}
 478	} while (sh == NULL);
 479
 480	if (sh)
 481		atomic_inc(&sh->count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482
 483	spin_unlock_irq(&conf->device_lock);
 484	return sh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485}
 486
 487static void
 488raid5_end_read_request(struct bio *bi, int error);
 489static void
 490raid5_end_write_request(struct bio *bi, int error);
 491
 492static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 493{
 494	raid5_conf_t *conf = sh->raid_conf;
 495	int i, disks = sh->disks;
 
 496
 497	might_sleep();
 498
 
 
 499	for (i = disks; i--; ) {
 500		int rw;
 501		struct bio *bi;
 502		mdk_rdev_t *rdev;
 
 
 
 503		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 504			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
 505				rw = WRITE_FUA;
 506			else
 507				rw = WRITE;
 
 
 508		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
 509			rw = READ;
 510		else
 
 
 
 
 511			continue;
 
 
 512
 
 513		bi = &sh->dev[i].req;
 514
 515		bi->bi_rw = rw;
 516		if (rw & WRITE)
 517			bi->bi_end_io = raid5_end_write_request;
 518		else
 519			bi->bi_end_io = raid5_end_read_request;
 520
 521		rcu_read_lock();
 
 
 522		rdev = rcu_dereference(conf->disks[i].rdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 523		if (rdev && test_bit(Faulty, &rdev->flags))
 524			rdev = NULL;
 525		if (rdev)
 526			atomic_inc(&rdev->nr_pending);
 
 
 
 
 527		rcu_read_unlock();
 528
 529		/* We have already checked bad blocks for reads.  Now
 530		 * need to check for writes.
 
 531		 */
 532		while ((rw & WRITE) && rdev &&
 533		       test_bit(WriteErrorSeen, &rdev->flags)) {
 534			sector_t first_bad;
 535			int bad_sectors;
 536			int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
 537					      &first_bad, &bad_sectors);
 538			if (!bad)
 539				break;
 540
 541			if (bad < 0) {
 542				set_bit(BlockedBadBlocks, &rdev->flags);
 543				if (!conf->mddev->external &&
 544				    conf->mddev->flags) {
 545					/* It is very unlikely, but we might
 546					 * still need to write out the
 547					 * bad block log - better give it
 548					 * a chance*/
 549					md_check_recovery(conf->mddev);
 550				}
 
 
 
 
 
 
 551				md_wait_for_blocked_rdev(rdev, conf->mddev);
 552			} else {
 553				/* Acknowledged bad block - skip the write */
 554				rdev_dec_pending(rdev, conf->mddev);
 555				rdev = NULL;
 556			}
 557		}
 558
 559		if (rdev) {
 560			if (s->syncing || s->expanding || s->expanded)
 
 561				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 562
 563			set_bit(STRIPE_IO_STARTED, &sh->state);
 564
 
 565			bi->bi_bdev = rdev->bdev;
 
 
 
 
 
 
 566			pr_debug("%s: for %llu schedule op %ld on disc %d\n",
 567				__func__, (unsigned long long)sh->sector,
 568				bi->bi_rw, i);
 569			atomic_inc(&sh->count);
 570			bi->bi_sector = sh->sector + rdev->data_offset;
 571			bi->bi_flags = 1 << BIO_UPTODATE;
 
 
 
 
 
 
 
 
 
 
 
 
 572			bi->bi_vcnt = 1;
 573			bi->bi_max_vecs = 1;
 574			bi->bi_idx = 0;
 575			bi->bi_io_vec = &sh->dev[i].vec;
 576			bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 577			bi->bi_io_vec[0].bv_offset = 0;
 578			bi->bi_size = STRIPE_SIZE;
 579			bi->bi_next = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 580			generic_make_request(bi);
 581		} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582			if (rw & WRITE)
 583				set_bit(STRIPE_DEGRADED, &sh->state);
 584			pr_debug("skip op %ld on disc %d for sector %llu\n",
 585				bi->bi_rw, i, (unsigned long long)sh->sector);
 586			clear_bit(R5_LOCKED, &sh->dev[i].flags);
 587			set_bit(STRIPE_HANDLE, &sh->state);
 588		}
 
 
 
 
 
 
 
 589	}
 590}
 591
 592static struct dma_async_tx_descriptor *
 593async_copy_data(int frombio, struct bio *bio, struct page *page,
 594	sector_t sector, struct dma_async_tx_descriptor *tx)
 
 595{
 596	struct bio_vec *bvl;
 
 597	struct page *bio_page;
 598	int i;
 599	int page_offset;
 600	struct async_submit_ctl submit;
 601	enum async_tx_flags flags = 0;
 602
 603	if (bio->bi_sector >= sector)
 604		page_offset = (signed)(bio->bi_sector - sector) * 512;
 605	else
 606		page_offset = (signed)(sector - bio->bi_sector) * -512;
 607
 608	if (frombio)
 609		flags |= ASYNC_TX_FENCE;
 610	init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 611
 612	bio_for_each_segment(bvl, bio, i) {
 613		int len = bvl->bv_len;
 614		int clen;
 615		int b_offset = 0;
 616
 617		if (page_offset < 0) {
 618			b_offset = -page_offset;
 619			page_offset += b_offset;
 620			len -= b_offset;
 621		}
 622
 623		if (len > 0 && page_offset + len > STRIPE_SIZE)
 624			clen = STRIPE_SIZE - page_offset;
 625		else
 626			clen = len;
 627
 628		if (clen > 0) {
 629			b_offset += bvl->bv_offset;
 630			bio_page = bvl->bv_page;
 631			if (frombio)
 632				tx = async_memcpy(page, bio_page, page_offset,
 
 
 
 
 
 633						  b_offset, clen, &submit);
 634			else
 635				tx = async_memcpy(bio_page, page, b_offset,
 636						  page_offset, clen, &submit);
 637		}
 638		/* chain the operations */
 639		submit.depend_tx = tx;
 640
 641		if (clen < len) /* hit end of page */
 642			break;
 643		page_offset +=  len;
 644	}
 645
 646	return tx;
 647}
 648
 649static void ops_complete_biofill(void *stripe_head_ref)
 650{
 651	struct stripe_head *sh = stripe_head_ref;
 652	struct bio *return_bi = NULL;
 653	raid5_conf_t *conf = sh->raid_conf;
 654	int i;
 655
 656	pr_debug("%s: stripe %llu\n", __func__,
 657		(unsigned long long)sh->sector);
 658
 659	/* clear completed biofills */
 660	spin_lock_irq(&conf->device_lock);
 661	for (i = sh->disks; i--; ) {
 662		struct r5dev *dev = &sh->dev[i];
 663
 664		/* acknowledge completion of a biofill operation */
 665		/* and check if we need to reply to a read request,
 666		 * new R5_Wantfill requests are held off until
 667		 * !STRIPE_BIOFILL_RUN
 668		 */
 669		if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
 670			struct bio *rbi, *rbi2;
 671
 672			BUG_ON(!dev->read);
 673			rbi = dev->read;
 674			dev->read = NULL;
 675			while (rbi && rbi->bi_sector <
 676				dev->sector + STRIPE_SECTORS) {
 677				rbi2 = r5_next_bio(rbi, dev->sector);
 678				if (!raid5_dec_bi_phys_segments(rbi)) {
 679					rbi->bi_next = return_bi;
 680					return_bi = rbi;
 681				}
 682				rbi = rbi2;
 683			}
 684		}
 685	}
 686	spin_unlock_irq(&conf->device_lock);
 687	clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 688
 689	return_io(return_bi);
 690
 691	set_bit(STRIPE_HANDLE, &sh->state);
 692	release_stripe(sh);
 693}
 694
 695static void ops_run_biofill(struct stripe_head *sh)
 696{
 697	struct dma_async_tx_descriptor *tx = NULL;
 698	raid5_conf_t *conf = sh->raid_conf;
 699	struct async_submit_ctl submit;
 700	int i;
 701
 
 702	pr_debug("%s: stripe %llu\n", __func__,
 703		(unsigned long long)sh->sector);
 704
 705	for (i = sh->disks; i--; ) {
 706		struct r5dev *dev = &sh->dev[i];
 707		if (test_bit(R5_Wantfill, &dev->flags)) {
 708			struct bio *rbi;
 709			spin_lock_irq(&conf->device_lock);
 710			dev->read = rbi = dev->toread;
 711			dev->toread = NULL;
 712			spin_unlock_irq(&conf->device_lock);
 713			while (rbi && rbi->bi_sector <
 714				dev->sector + STRIPE_SECTORS) {
 715				tx = async_copy_data(0, rbi, dev->page,
 716					dev->sector, tx);
 717				rbi = r5_next_bio(rbi, dev->sector);
 718			}
 719		}
 720	}
 721
 722	atomic_inc(&sh->count);
 723	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
 724	async_trigger_callback(&submit);
 725}
 726
 727static void mark_target_uptodate(struct stripe_head *sh, int target)
 728{
 729	struct r5dev *tgt;
 730
 731	if (target < 0)
 732		return;
 733
 734	tgt = &sh->dev[target];
 735	set_bit(R5_UPTODATE, &tgt->flags);
 736	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 737	clear_bit(R5_Wantcompute, &tgt->flags);
 738}
 739
 740static void ops_complete_compute(void *stripe_head_ref)
 741{
 742	struct stripe_head *sh = stripe_head_ref;
 743
 744	pr_debug("%s: stripe %llu\n", __func__,
 745		(unsigned long long)sh->sector);
 746
 747	/* mark the computed target(s) as uptodate */
 748	mark_target_uptodate(sh, sh->ops.target);
 749	mark_target_uptodate(sh, sh->ops.target2);
 750
 751	clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
 752	if (sh->check_state == check_state_compute_run)
 753		sh->check_state = check_state_compute_result;
 754	set_bit(STRIPE_HANDLE, &sh->state);
 755	release_stripe(sh);
 756}
 757
 758/* return a pointer to the address conversion region of the scribble buffer */
 759static addr_conv_t *to_addr_conv(struct stripe_head *sh,
 760				 struct raid5_percpu *percpu)
 761{
 762	return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
 
 
 
 
 
 
 
 
 
 
 
 
 763}
 764
 765static struct dma_async_tx_descriptor *
 766ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
 767{
 768	int disks = sh->disks;
 769	struct page **xor_srcs = percpu->scribble;
 770	int target = sh->ops.target;
 771	struct r5dev *tgt = &sh->dev[target];
 772	struct page *xor_dest = tgt->page;
 773	int count = 0;
 774	struct dma_async_tx_descriptor *tx;
 775	struct async_submit_ctl submit;
 776	int i;
 777
 
 
 778	pr_debug("%s: stripe %llu block: %d\n",
 779		__func__, (unsigned long long)sh->sector, target);
 780	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 781
 782	for (i = disks; i--; )
 783		if (i != target)
 784			xor_srcs[count++] = sh->dev[i].page;
 785
 786	atomic_inc(&sh->count);
 787
 788	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
 789			  ops_complete_compute, sh, to_addr_conv(sh, percpu));
 790	if (unlikely(count == 1))
 791		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
 792	else
 793		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
 794
 795	return tx;
 796}
 797
 798/* set_syndrome_sources - populate source buffers for gen_syndrome
 799 * @srcs - (struct page *) array of size sh->disks
 800 * @sh - stripe_head to parse
 801 *
 802 * Populates srcs in proper layout order for the stripe and returns the
 803 * 'count' of sources to be used in a call to async_gen_syndrome.  The P
 804 * destination buffer is recorded in srcs[count] and the Q destination
 805 * is recorded in srcs[count+1]].
 806 */
 807static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
 
 
 808{
 809	int disks = sh->disks;
 810	int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
 811	int d0_idx = raid6_d0(sh);
 812	int count;
 813	int i;
 814
 815	for (i = 0; i < disks; i++)
 816		srcs[i] = NULL;
 817
 818	count = 0;
 819	i = d0_idx;
 820	do {
 821		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 
 822
 823		srcs[slot] = sh->dev[i].page;
 
 
 
 
 
 
 824		i = raid6_next_disk(i, disks);
 825	} while (i != d0_idx);
 826
 827	return syndrome_disks;
 828}
 829
 830static struct dma_async_tx_descriptor *
 831ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
 832{
 833	int disks = sh->disks;
 834	struct page **blocks = percpu->scribble;
 835	int target;
 836	int qd_idx = sh->qd_idx;
 837	struct dma_async_tx_descriptor *tx;
 838	struct async_submit_ctl submit;
 839	struct r5dev *tgt;
 840	struct page *dest;
 841	int i;
 842	int count;
 843
 
 844	if (sh->ops.target < 0)
 845		target = sh->ops.target2;
 846	else if (sh->ops.target2 < 0)
 847		target = sh->ops.target;
 848	else
 849		/* we should only have one valid target */
 850		BUG();
 851	BUG_ON(target < 0);
 852	pr_debug("%s: stripe %llu block: %d\n",
 853		__func__, (unsigned long long)sh->sector, target);
 854
 855	tgt = &sh->dev[target];
 856	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 857	dest = tgt->page;
 858
 859	atomic_inc(&sh->count);
 860
 861	if (target == qd_idx) {
 862		count = set_syndrome_sources(blocks, sh);
 863		blocks[count] = NULL; /* regenerating p is not necessary */
 864		BUG_ON(blocks[count+1] != dest); /* q should already be set */
 865		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 866				  ops_complete_compute, sh,
 867				  to_addr_conv(sh, percpu));
 868		tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
 869	} else {
 870		/* Compute any data- or p-drive using XOR */
 871		count = 0;
 872		for (i = disks; i-- ; ) {
 873			if (i == target || i == qd_idx)
 874				continue;
 875			blocks[count++] = sh->dev[i].page;
 876		}
 877
 878		init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 879				  NULL, ops_complete_compute, sh,
 880				  to_addr_conv(sh, percpu));
 881		tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
 882	}
 883
 884	return tx;
 885}
 886
 887static struct dma_async_tx_descriptor *
 888ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
 889{
 890	int i, count, disks = sh->disks;
 891	int syndrome_disks = sh->ddf_layout ? disks : disks-2;
 892	int d0_idx = raid6_d0(sh);
 893	int faila = -1, failb = -1;
 894	int target = sh->ops.target;
 895	int target2 = sh->ops.target2;
 896	struct r5dev *tgt = &sh->dev[target];
 897	struct r5dev *tgt2 = &sh->dev[target2];
 898	struct dma_async_tx_descriptor *tx;
 899	struct page **blocks = percpu->scribble;
 900	struct async_submit_ctl submit;
 901
 
 902	pr_debug("%s: stripe %llu block1: %d block2: %d\n",
 903		 __func__, (unsigned long long)sh->sector, target, target2);
 904	BUG_ON(target < 0 || target2 < 0);
 905	BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 906	BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
 907
 908	/* we need to open-code set_syndrome_sources to handle the
 909	 * slot number conversion for 'faila' and 'failb'
 910	 */
 911	for (i = 0; i < disks ; i++)
 912		blocks[i] = NULL;
 913	count = 0;
 914	i = d0_idx;
 915	do {
 916		int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 917
 918		blocks[slot] = sh->dev[i].page;
 919
 920		if (i == target)
 921			faila = slot;
 922		if (i == target2)
 923			failb = slot;
 924		i = raid6_next_disk(i, disks);
 925	} while (i != d0_idx);
 926
 927	BUG_ON(faila == failb);
 928	if (failb < faila)
 929		swap(faila, failb);
 930	pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
 931		 __func__, (unsigned long long)sh->sector, faila, failb);
 932
 933	atomic_inc(&sh->count);
 934
 935	if (failb == syndrome_disks+1) {
 936		/* Q disk is one of the missing disks */
 937		if (faila == syndrome_disks) {
 938			/* Missing P+Q, just recompute */
 939			init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 940					  ops_complete_compute, sh,
 941					  to_addr_conv(sh, percpu));
 942			return async_gen_syndrome(blocks, 0, syndrome_disks+2,
 943						  STRIPE_SIZE, &submit);
 944		} else {
 945			struct page *dest;
 946			int data_target;
 947			int qd_idx = sh->qd_idx;
 948
 949			/* Missing D+Q: recompute D from P, then recompute Q */
 950			if (target == qd_idx)
 951				data_target = target2;
 952			else
 953				data_target = target;
 954
 955			count = 0;
 956			for (i = disks; i-- ; ) {
 957				if (i == data_target || i == qd_idx)
 958					continue;
 959				blocks[count++] = sh->dev[i].page;
 960			}
 961			dest = sh->dev[data_target].page;
 962			init_async_submit(&submit,
 963					  ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 964					  NULL, NULL, NULL,
 965					  to_addr_conv(sh, percpu));
 966			tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
 967				       &submit);
 968
 969			count = set_syndrome_sources(blocks, sh);
 970			init_async_submit(&submit, ASYNC_TX_FENCE, tx,
 971					  ops_complete_compute, sh,
 972					  to_addr_conv(sh, percpu));
 973			return async_gen_syndrome(blocks, 0, count+2,
 974						  STRIPE_SIZE, &submit);
 975		}
 976	} else {
 977		init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 978				  ops_complete_compute, sh,
 979				  to_addr_conv(sh, percpu));
 980		if (failb == syndrome_disks) {
 981			/* We're missing D+P. */
 982			return async_raid6_datap_recov(syndrome_disks+2,
 983						       STRIPE_SIZE, faila,
 984						       blocks, &submit);
 985		} else {
 986			/* We're missing D+D. */
 987			return async_raid6_2data_recov(syndrome_disks+2,
 988						       STRIPE_SIZE, faila, failb,
 989						       blocks, &submit);
 990		}
 991	}
 992}
 993
 994
 995static void ops_complete_prexor(void *stripe_head_ref)
 996{
 997	struct stripe_head *sh = stripe_head_ref;
 998
 999	pr_debug("%s: stripe %llu\n", __func__,
1000		(unsigned long long)sh->sector);
1001}
1002
1003static struct dma_async_tx_descriptor *
1004ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1005	       struct dma_async_tx_descriptor *tx)
1006{
1007	int disks = sh->disks;
1008	struct page **xor_srcs = percpu->scribble;
1009	int count = 0, pd_idx = sh->pd_idx, i;
1010	struct async_submit_ctl submit;
1011
1012	/* existing parity data subtracted */
1013	struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1014
 
1015	pr_debug("%s: stripe %llu\n", __func__,
1016		(unsigned long long)sh->sector);
1017
1018	for (i = disks; i--; ) {
1019		struct r5dev *dev = &sh->dev[i];
1020		/* Only process blocks that are known to be uptodate */
1021		if (test_bit(R5_Wantdrain, &dev->flags))
1022			xor_srcs[count++] = dev->page;
1023	}
1024
1025	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1026			  ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1027	tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1028
1029	return tx;
1030}
1031
1032static struct dma_async_tx_descriptor *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1034{
1035	int disks = sh->disks;
1036	int i;
 
1037
1038	pr_debug("%s: stripe %llu\n", __func__,
1039		(unsigned long long)sh->sector);
1040
1041	for (i = disks; i--; ) {
1042		struct r5dev *dev = &sh->dev[i];
1043		struct bio *chosen;
1044
1045		if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
 
1046			struct bio *wbi;
1047
1048			spin_lock_irq(&sh->raid_conf->device_lock);
 
 
1049			chosen = dev->towrite;
1050			dev->towrite = NULL;
 
1051			BUG_ON(dev->written);
1052			wbi = dev->written = chosen;
1053			spin_unlock_irq(&sh->raid_conf->device_lock);
 
1054
1055			while (wbi && wbi->bi_sector <
1056				dev->sector + STRIPE_SECTORS) {
1057				if (wbi->bi_rw & REQ_FUA)
1058					set_bit(R5_WantFUA, &dev->flags);
1059				tx = async_copy_data(1, wbi, dev->page,
1060					dev->sector, tx);
 
 
 
 
 
 
 
 
 
 
 
1061				wbi = r5_next_bio(wbi, dev->sector);
1062			}
 
 
 
 
 
 
 
 
 
1063		}
1064	}
1065
1066	return tx;
1067}
1068
1069static void ops_complete_reconstruct(void *stripe_head_ref)
1070{
1071	struct stripe_head *sh = stripe_head_ref;
1072	int disks = sh->disks;
1073	int pd_idx = sh->pd_idx;
1074	int qd_idx = sh->qd_idx;
1075	int i;
1076	bool fua = false;
1077
1078	pr_debug("%s: stripe %llu\n", __func__,
1079		(unsigned long long)sh->sector);
1080
1081	for (i = disks; i--; )
1082		fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
 
 
 
1083
1084	for (i = disks; i--; ) {
1085		struct r5dev *dev = &sh->dev[i];
1086
1087		if (dev->written || i == pd_idx || i == qd_idx) {
1088			set_bit(R5_UPTODATE, &dev->flags);
 
1089			if (fua)
1090				set_bit(R5_WantFUA, &dev->flags);
 
 
1091		}
1092	}
1093
1094	if (sh->reconstruct_state == reconstruct_state_drain_run)
1095		sh->reconstruct_state = reconstruct_state_drain_result;
1096	else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1097		sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1098	else {
1099		BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1100		sh->reconstruct_state = reconstruct_state_result;
1101	}
1102
1103	set_bit(STRIPE_HANDLE, &sh->state);
1104	release_stripe(sh);
1105}
1106
1107static void
1108ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1109		     struct dma_async_tx_descriptor *tx)
1110{
1111	int disks = sh->disks;
1112	struct page **xor_srcs = percpu->scribble;
1113	struct async_submit_ctl submit;
1114	int count = 0, pd_idx = sh->pd_idx, i;
1115	struct page *xor_dest;
1116	int prexor = 0;
1117	unsigned long flags;
 
 
 
1118
1119	pr_debug("%s: stripe %llu\n", __func__,
1120		(unsigned long long)sh->sector);
1121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122	/* check if prexor is active which means only process blocks
1123	 * that are part of a read-modify-write (written)
1124	 */
1125	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1126		prexor = 1;
1127		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1128		for (i = disks; i--; ) {
1129			struct r5dev *dev = &sh->dev[i];
1130			if (dev->written)
1131				xor_srcs[count++] = dev->page;
1132		}
1133	} else {
1134		xor_dest = sh->dev[pd_idx].page;
1135		for (i = disks; i--; ) {
1136			struct r5dev *dev = &sh->dev[i];
1137			if (i != pd_idx)
1138				xor_srcs[count++] = dev->page;
1139		}
1140	}
1141
1142	/* 1/ if we prexor'd then the dest is reused as a source
1143	 * 2/ if we did not prexor then we are redoing the parity
1144	 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1145	 * for the synchronous xor case
1146	 */
1147	flags = ASYNC_TX_ACK |
1148		(prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1149
1150	atomic_inc(&sh->count);
 
 
 
 
 
 
 
 
 
 
 
1151
1152	init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1153			  to_addr_conv(sh, percpu));
1154	if (unlikely(count == 1))
1155		tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1156	else
1157		tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
 
 
 
 
 
 
1158}
1159
1160static void
1161ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1162		     struct dma_async_tx_descriptor *tx)
1163{
1164	struct async_submit_ctl submit;
1165	struct page **blocks = percpu->scribble;
1166	int count;
 
 
 
 
1167
1168	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1169
1170	count = set_syndrome_sources(blocks, sh);
 
 
 
 
 
 
 
 
 
 
 
 
1171
1172	atomic_inc(&sh->count);
 
 
 
 
 
 
 
 
 
1173
1174	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1175			  sh, to_addr_conv(sh, percpu));
1176	async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177}
1178
1179static void ops_complete_check(void *stripe_head_ref)
1180{
1181	struct stripe_head *sh = stripe_head_ref;
1182
1183	pr_debug("%s: stripe %llu\n", __func__,
1184		(unsigned long long)sh->sector);
1185
1186	sh->check_state = check_state_check_result;
1187	set_bit(STRIPE_HANDLE, &sh->state);
1188	release_stripe(sh);
1189}
1190
1191static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1192{
1193	int disks = sh->disks;
1194	int pd_idx = sh->pd_idx;
1195	int qd_idx = sh->qd_idx;
1196	struct page *xor_dest;
1197	struct page **xor_srcs = percpu->scribble;
1198	struct dma_async_tx_descriptor *tx;
1199	struct async_submit_ctl submit;
1200	int count;
1201	int i;
1202
1203	pr_debug("%s: stripe %llu\n", __func__,
1204		(unsigned long long)sh->sector);
1205
 
1206	count = 0;
1207	xor_dest = sh->dev[pd_idx].page;
1208	xor_srcs[count++] = xor_dest;
1209	for (i = disks; i--; ) {
1210		if (i == pd_idx || i == qd_idx)
1211			continue;
1212		xor_srcs[count++] = sh->dev[i].page;
1213	}
1214
1215	init_async_submit(&submit, 0, NULL, NULL, NULL,
1216			  to_addr_conv(sh, percpu));
1217	tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1218			   &sh->ops.zero_sum_result, &submit);
1219
1220	atomic_inc(&sh->count);
1221	init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1222	tx = async_trigger_callback(&submit);
1223}
1224
1225static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1226{
1227	struct page **srcs = percpu->scribble;
1228	struct async_submit_ctl submit;
1229	int count;
1230
1231	pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1232		(unsigned long long)sh->sector, checkp);
1233
1234	count = set_syndrome_sources(srcs, sh);
 
1235	if (!checkp)
1236		srcs[count] = NULL;
1237
1238	atomic_inc(&sh->count);
1239	init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1240			  sh, to_addr_conv(sh, percpu));
1241	async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1242			   &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1243}
1244
1245static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1246{
1247	int overlap_clear = 0, i, disks = sh->disks;
1248	struct dma_async_tx_descriptor *tx = NULL;
1249	raid5_conf_t *conf = sh->raid_conf;
1250	int level = conf->level;
1251	struct raid5_percpu *percpu;
1252	unsigned long cpu;
1253
1254	cpu = get_cpu();
1255	percpu = per_cpu_ptr(conf->percpu, cpu);
1256	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1257		ops_run_biofill(sh);
1258		overlap_clear++;
1259	}
1260
1261	if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1262		if (level < 6)
1263			tx = ops_run_compute5(sh, percpu);
1264		else {
1265			if (sh->ops.target2 < 0 || sh->ops.target < 0)
1266				tx = ops_run_compute6_1(sh, percpu);
1267			else
1268				tx = ops_run_compute6_2(sh, percpu);
1269		}
1270		/* terminate the chain if reconstruct is not set to be run */
1271		if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1272			async_tx_ack(tx);
1273	}
1274
1275	if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1276		tx = ops_run_prexor(sh, percpu, tx);
 
 
 
 
1277
1278	if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1279		tx = ops_run_biodrain(sh, tx);
1280		overlap_clear++;
1281	}
1282
1283	if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1284		if (level < 6)
1285			ops_run_reconstruct5(sh, percpu, tx);
1286		else
1287			ops_run_reconstruct6(sh, percpu, tx);
1288	}
1289
1290	if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1291		if (sh->check_state == check_state_run)
1292			ops_run_check_p(sh, percpu);
1293		else if (sh->check_state == check_state_run_q)
1294			ops_run_check_pq(sh, percpu, 0);
1295		else if (sh->check_state == check_state_run_pq)
1296			ops_run_check_pq(sh, percpu, 1);
1297		else
1298			BUG();
1299	}
1300
1301	if (overlap_clear)
1302		for (i = disks; i--; ) {
1303			struct r5dev *dev = &sh->dev[i];
1304			if (test_and_clear_bit(R5_Overlap, &dev->flags))
1305				wake_up(&sh->raid_conf->wait_for_overlap);
1306		}
1307	put_cpu();
1308}
1309
1310#ifdef CONFIG_MULTICORE_RAID456
1311static void async_run_ops(void *param, async_cookie_t cookie)
1312{
1313	struct stripe_head *sh = param;
1314	unsigned long ops_request = sh->ops.request;
1315
1316	clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1317	wake_up(&sh->ops.wait_for_ops);
1318
1319	__raid_run_ops(sh, ops_request);
1320	release_stripe(sh);
 
 
 
 
1321}
1322
1323static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1324{
1325	/* since handle_stripe can be called outside of raid5d context
1326	 * we need to ensure sh->ops.request is de-staged before another
1327	 * request arrives
1328	 */
1329	wait_event(sh->ops.wait_for_ops,
1330		   !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1331	sh->ops.request = ops_request;
1332
1333	atomic_inc(&sh->count);
1334	async_schedule(async_run_ops, sh);
1335}
1336#else
1337#define raid_run_ops __raid_run_ops
1338#endif
1339
1340static int grow_one_stripe(raid5_conf_t *conf)
1341{
1342	struct stripe_head *sh;
1343	sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1344	if (!sh)
1345		return 0;
1346
1347	sh->raid_conf = conf;
1348	#ifdef CONFIG_MULTICORE_RAID456
1349	init_waitqueue_head(&sh->ops.wait_for_ops);
1350	#endif
1351
1352	if (grow_buffers(sh)) {
1353		shrink_buffers(sh);
1354		kmem_cache_free(conf->slab_cache, sh);
1355		return 0;
1356	}
 
 
1357	/* we just created an active stripe so... */
1358	atomic_set(&sh->count, 1);
1359	atomic_inc(&conf->active_stripes);
1360	INIT_LIST_HEAD(&sh->lru);
1361	release_stripe(sh);
 
1362	return 1;
1363}
1364
1365static int grow_stripes(raid5_conf_t *conf, int num)
1366{
1367	struct kmem_cache *sc;
1368	int devs = max(conf->raid_disks, conf->previous_raid_disks);
1369
1370	if (conf->mddev->gendisk)
1371		sprintf(conf->cache_name[0],
1372			"raid%d-%s", conf->level, mdname(conf->mddev));
1373	else
1374		sprintf(conf->cache_name[0],
1375			"raid%d-%p", conf->level, conf->mddev);
1376	sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1377
1378	conf->active_name = 0;
1379	sc = kmem_cache_create(conf->cache_name[conf->active_name],
1380			       sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1381			       0, 0, NULL);
1382	if (!sc)
1383		return 1;
1384	conf->slab_cache = sc;
1385	conf->pool_size = devs;
1386	while (num--)
1387		if (!grow_one_stripe(conf))
1388			return 1;
 
1389	return 0;
1390}
1391
1392/**
1393 * scribble_len - return the required size of the scribble region
1394 * @num - total number of disks in the array
1395 *
1396 * The size must be enough to contain:
1397 * 1/ a struct page pointer for each device in the array +2
1398 * 2/ room to convert each entry in (1) to its corresponding dma
1399 *    (dma_map_page()) or page (page_address()) address.
1400 *
1401 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1402 * calculate over all devices (not just the data blocks), using zeros in place
1403 * of the P and Q blocks.
1404 */
1405static size_t scribble_len(int num)
1406{
 
1407	size_t len;
1408
1409	len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410
1411	return len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412}
1413
1414static int resize_stripes(raid5_conf_t *conf, int newsize)
1415{
1416	/* Make all the stripes able to hold 'newsize' devices.
1417	 * New slots in each stripe get 'page' set to a new page.
1418	 *
1419	 * This happens in stages:
1420	 * 1/ create a new kmem_cache and allocate the required number of
1421	 *    stripe_heads.
1422	 * 2/ gather all the old stripe_heads and tranfer the pages across
1423	 *    to the new stripe_heads.  This will have the side effect of
1424	 *    freezing the array as once all stripe_heads have been collected,
1425	 *    no IO will be possible.  Old stripe heads are freed once their
1426	 *    pages have been transferred over, and the old kmem_cache is
1427	 *    freed when all stripes are done.
1428	 * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
1429	 *    we simple return a failre status - no need to clean anything up.
1430	 * 4/ allocate new pages for the new slots in the new stripe_heads.
1431	 *    If this fails, we don't bother trying the shrink the
1432	 *    stripe_heads down again, we just leave them as they are.
1433	 *    As each stripe_head is processed the new one is released into
1434	 *    active service.
1435	 *
1436	 * Once step2 is started, we cannot afford to wait for a write,
1437	 * so we use GFP_NOIO allocations.
1438	 */
1439	struct stripe_head *osh, *nsh;
1440	LIST_HEAD(newstripes);
1441	struct disk_info *ndisks;
1442	unsigned long cpu;
1443	int err;
1444	struct kmem_cache *sc;
1445	int i;
 
1446
1447	if (newsize <= conf->pool_size)
1448		return 0; /* never bother to shrink */
1449
1450	err = md_allow_write(conf->mddev);
1451	if (err)
1452		return err;
1453
1454	/* Step 1 */
1455	sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1456			       sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1457			       0, 0, NULL);
1458	if (!sc)
1459		return -ENOMEM;
1460
 
 
 
1461	for (i = conf->max_nr_stripes; i; i--) {
1462		nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1463		if (!nsh)
1464			break;
1465
1466		nsh->raid_conf = conf;
1467		#ifdef CONFIG_MULTICORE_RAID456
1468		init_waitqueue_head(&nsh->ops.wait_for_ops);
1469		#endif
1470
1471		list_add(&nsh->lru, &newstripes);
1472	}
1473	if (i) {
1474		/* didn't get enough, give up */
1475		while (!list_empty(&newstripes)) {
1476			nsh = list_entry(newstripes.next, struct stripe_head, lru);
1477			list_del(&nsh->lru);
1478			kmem_cache_free(sc, nsh);
1479		}
1480		kmem_cache_destroy(sc);
 
1481		return -ENOMEM;
1482	}
1483	/* Step 2 - Must use GFP_NOIO now.
1484	 * OK, we have enough stripes, start collecting inactive
1485	 * stripes and copying them over
1486	 */
 
 
1487	list_for_each_entry(nsh, &newstripes, lru) {
1488		spin_lock_irq(&conf->device_lock);
1489		wait_event_lock_irq(conf->wait_for_stripe,
1490				    !list_empty(&conf->inactive_list),
1491				    conf->device_lock,
1492				    );
1493		osh = get_free_stripe(conf);
1494		spin_unlock_irq(&conf->device_lock);
1495		atomic_set(&nsh->count, 1);
1496		for(i=0; i<conf->pool_size; i++)
1497			nsh->dev[i].page = osh->dev[i].page;
1498		for( ; i<newsize; i++)
1499			nsh->dev[i].page = NULL;
 
1500		kmem_cache_free(conf->slab_cache, osh);
 
 
 
 
 
 
1501	}
1502	kmem_cache_destroy(conf->slab_cache);
1503
1504	/* Step 3.
1505	 * At this point, we are holding all the stripes so the array
1506	 * is completely stalled, so now is a good time to resize
1507	 * conf->disks and the scribble region
1508	 */
1509	ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1510	if (ndisks) {
1511		for (i=0; i<conf->raid_disks; i++)
1512			ndisks[i] = conf->disks[i];
1513		kfree(conf->disks);
1514		conf->disks = ndisks;
1515	} else
1516		err = -ENOMEM;
1517
1518	get_online_cpus();
1519	conf->scribble_len = scribble_len(newsize);
1520	for_each_present_cpu(cpu) {
1521		struct raid5_percpu *percpu;
1522		void *scribble;
1523
1524		percpu = per_cpu_ptr(conf->percpu, cpu);
1525		scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1526
1527		if (scribble) {
1528			kfree(percpu->scribble);
1529			percpu->scribble = scribble;
1530		} else {
1531			err = -ENOMEM;
1532			break;
1533		}
1534	}
1535	put_online_cpus();
1536
1537	/* Step 4, return new stripes to service */
1538	while(!list_empty(&newstripes)) {
1539		nsh = list_entry(newstripes.next, struct stripe_head, lru);
1540		list_del_init(&nsh->lru);
1541
1542		for (i=conf->raid_disks; i < newsize; i++)
1543			if (nsh->dev[i].page == NULL) {
1544				struct page *p = alloc_page(GFP_NOIO);
1545				nsh->dev[i].page = p;
 
1546				if (!p)
1547					err = -ENOMEM;
1548			}
1549		release_stripe(nsh);
1550	}
1551	/* critical section pass, GFP_NOIO no longer needed */
1552
1553	conf->slab_cache = sc;
1554	conf->active_name = 1-conf->active_name;
1555	conf->pool_size = newsize;
 
1556	return err;
1557}
1558
1559static int drop_one_stripe(raid5_conf_t *conf)
1560{
1561	struct stripe_head *sh;
 
1562
1563	spin_lock_irq(&conf->device_lock);
1564	sh = get_free_stripe(conf);
1565	spin_unlock_irq(&conf->device_lock);
1566	if (!sh)
1567		return 0;
1568	BUG_ON(atomic_read(&sh->count));
1569	shrink_buffers(sh);
1570	kmem_cache_free(conf->slab_cache, sh);
1571	atomic_dec(&conf->active_stripes);
 
1572	return 1;
1573}
1574
1575static void shrink_stripes(raid5_conf_t *conf)
1576{
1577	while (drop_one_stripe(conf))
 
1578		;
1579
1580	if (conf->slab_cache)
1581		kmem_cache_destroy(conf->slab_cache);
1582	conf->slab_cache = NULL;
1583}
1584
1585static void raid5_end_read_request(struct bio * bi, int error)
1586{
1587	struct stripe_head *sh = bi->bi_private;
1588	raid5_conf_t *conf = sh->raid_conf;
1589	int disks = sh->disks, i;
1590	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1591	char b[BDEVNAME_SIZE];
1592	mdk_rdev_t *rdev;
1593
1594
1595	for (i=0 ; i<disks; i++)
1596		if (bi == &sh->dev[i].req)
1597			break;
1598
1599	pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1600		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
1601		uptodate);
1602	if (i == disks) {
1603		BUG();
1604		return;
1605	}
 
 
 
 
 
 
 
 
 
1606
1607	if (uptodate) {
 
 
 
 
1608		set_bit(R5_UPTODATE, &sh->dev[i].flags);
1609		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1610			rdev = conf->disks[i].rdev;
 
 
 
1611			printk_ratelimited(
1612				KERN_INFO
1613				"md/raid:%s: read error corrected"
1614				" (%lu sectors at %llu on %s)\n",
1615				mdname(conf->mddev), STRIPE_SECTORS,
1616				(unsigned long long)(sh->sector
1617						     + rdev->data_offset),
1618				bdevname(rdev->bdev, b));
1619			atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1620			clear_bit(R5_ReadError, &sh->dev[i].flags);
1621			clear_bit(R5_ReWrite, &sh->dev[i].flags);
1622		}
1623		if (atomic_read(&conf->disks[i].rdev->read_errors))
1624			atomic_set(&conf->disks[i].rdev->read_errors, 0);
 
 
1625	} else {
1626		const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1627		int retry = 0;
1628		rdev = conf->disks[i].rdev;
1629
1630		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1631		atomic_inc(&rdev->read_errors);
1632		if (conf->mddev->degraded >= conf->max_degraded)
 
 
 
 
 
 
 
 
 
1633			printk_ratelimited(
1634				KERN_WARNING
1635				"md/raid:%s: read error not correctable "
1636				"(sector %llu on %s).\n",
1637				mdname(conf->mddev),
1638				(unsigned long long)(sh->sector
1639						     + rdev->data_offset),
1640				bdn);
1641		else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1642			/* Oh, no!!! */
 
1643			printk_ratelimited(
1644				KERN_WARNING
1645				"md/raid:%s: read error NOT corrected!! "
1646				"(sector %llu on %s).\n",
1647				mdname(conf->mddev),
1648				(unsigned long long)(sh->sector
1649						     + rdev->data_offset),
1650				bdn);
1651		else if (atomic_read(&rdev->read_errors)
1652			 > conf->max_nr_stripes)
1653			printk(KERN_WARNING
1654			       "md/raid:%s: Too many read errors, failing device %s.\n",
1655			       mdname(conf->mddev), bdn);
1656		else
1657			retry = 1;
 
 
 
1658		if (retry)
1659			set_bit(R5_ReadError, &sh->dev[i].flags);
 
 
 
 
1660		else {
1661			clear_bit(R5_ReadError, &sh->dev[i].flags);
1662			clear_bit(R5_ReWrite, &sh->dev[i].flags);
1663			md_error(conf->mddev, rdev);
 
 
 
 
1664		}
1665	}
1666	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1667	clear_bit(R5_LOCKED, &sh->dev[i].flags);
1668	set_bit(STRIPE_HANDLE, &sh->state);
1669	release_stripe(sh);
1670}
1671
1672static void raid5_end_write_request(struct bio *bi, int error)
1673{
1674	struct stripe_head *sh = bi->bi_private;
1675	raid5_conf_t *conf = sh->raid_conf;
1676	int disks = sh->disks, i;
1677	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1678	sector_t first_bad;
1679	int bad_sectors;
 
1680
1681	for (i=0 ; i<disks; i++)
1682		if (bi == &sh->dev[i].req)
 
 
 
 
 
 
 
 
 
 
 
 
 
1683			break;
1684
1685	pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
 
1686		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
1687		uptodate);
1688	if (i == disks) {
1689		BUG();
1690		return;
1691	}
1692
1693	if (!uptodate) {
1694		set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
1695		set_bit(R5_WriteError, &sh->dev[i].flags);
1696	} else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
1697			       &first_bad, &bad_sectors))
1698		set_bit(R5_MadeGood, &sh->dev[i].flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1699
1700	rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1701	
1702	clear_bit(R5_LOCKED, &sh->dev[i].flags);
 
 
1703	set_bit(STRIPE_HANDLE, &sh->state);
1704	release_stripe(sh);
 
 
 
1705}
1706
1707
1708static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1709	
1710static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1711{
1712	struct r5dev *dev = &sh->dev[i];
1713
1714	bio_init(&dev->req);
1715	dev->req.bi_io_vec = &dev->vec;
1716	dev->req.bi_vcnt++;
1717	dev->req.bi_max_vecs++;
1718	dev->vec.bv_page = dev->page;
1719	dev->vec.bv_len = STRIPE_SIZE;
1720	dev->vec.bv_offset = 0;
1721
1722	dev->req.bi_sector = sh->sector;
1723	dev->req.bi_private = sh;
 
 
1724
1725	dev->flags = 0;
1726	dev->sector = compute_blocknr(sh, i, previous);
1727}
1728
1729static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1730{
1731	char b[BDEVNAME_SIZE];
1732	raid5_conf_t *conf = mddev->private;
 
1733	pr_debug("raid456: error called\n");
1734
1735	if (test_and_clear_bit(In_sync, &rdev->flags)) {
1736		unsigned long flags;
1737		spin_lock_irqsave(&conf->device_lock, flags);
1738		mddev->degraded++;
1739		spin_unlock_irqrestore(&conf->device_lock, flags);
1740		/*
1741		 * if recovery was running, make sure it aborts.
1742		 */
1743		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1744	}
1745	set_bit(Blocked, &rdev->flags);
1746	set_bit(Faulty, &rdev->flags);
1747	set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
1748	printk(KERN_ALERT
1749	       "md/raid:%s: Disk failure on %s, disabling device.\n"
1750	       "md/raid:%s: Operation continuing on %d devices.\n",
1751	       mdname(mddev),
1752	       bdevname(rdev->bdev, b),
1753	       mdname(mddev),
1754	       conf->raid_disks - mddev->degraded);
1755}
1756
1757/*
1758 * Input: a 'big' sector number,
1759 * Output: index of the data and parity disk, and the sector # in them.
1760 */
1761static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1762				     int previous, int *dd_idx,
1763				     struct stripe_head *sh)
1764{
1765	sector_t stripe, stripe2;
1766	sector_t chunk_number;
1767	unsigned int chunk_offset;
1768	int pd_idx, qd_idx;
1769	int ddf_layout = 0;
1770	sector_t new_sector;
1771	int algorithm = previous ? conf->prev_algo
1772				 : conf->algorithm;
1773	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1774					 : conf->chunk_sectors;
1775	int raid_disks = previous ? conf->previous_raid_disks
1776				  : conf->raid_disks;
1777	int data_disks = raid_disks - conf->max_degraded;
1778
1779	/* First compute the information on this sector */
1780
1781	/*
1782	 * Compute the chunk number and the sector offset inside the chunk
1783	 */
1784	chunk_offset = sector_div(r_sector, sectors_per_chunk);
1785	chunk_number = r_sector;
1786
1787	/*
1788	 * Compute the stripe number
1789	 */
1790	stripe = chunk_number;
1791	*dd_idx = sector_div(stripe, data_disks);
1792	stripe2 = stripe;
1793	/*
1794	 * Select the parity disk based on the user selected algorithm.
1795	 */
1796	pd_idx = qd_idx = -1;
1797	switch(conf->level) {
1798	case 4:
1799		pd_idx = data_disks;
1800		break;
1801	case 5:
1802		switch (algorithm) {
1803		case ALGORITHM_LEFT_ASYMMETRIC:
1804			pd_idx = data_disks - sector_div(stripe2, raid_disks);
1805			if (*dd_idx >= pd_idx)
1806				(*dd_idx)++;
1807			break;
1808		case ALGORITHM_RIGHT_ASYMMETRIC:
1809			pd_idx = sector_div(stripe2, raid_disks);
1810			if (*dd_idx >= pd_idx)
1811				(*dd_idx)++;
1812			break;
1813		case ALGORITHM_LEFT_SYMMETRIC:
1814			pd_idx = data_disks - sector_div(stripe2, raid_disks);
1815			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1816			break;
1817		case ALGORITHM_RIGHT_SYMMETRIC:
1818			pd_idx = sector_div(stripe2, raid_disks);
1819			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1820			break;
1821		case ALGORITHM_PARITY_0:
1822			pd_idx = 0;
1823			(*dd_idx)++;
1824			break;
1825		case ALGORITHM_PARITY_N:
1826			pd_idx = data_disks;
1827			break;
1828		default:
1829			BUG();
1830		}
1831		break;
1832	case 6:
1833
1834		switch (algorithm) {
1835		case ALGORITHM_LEFT_ASYMMETRIC:
1836			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1837			qd_idx = pd_idx + 1;
1838			if (pd_idx == raid_disks-1) {
1839				(*dd_idx)++;	/* Q D D D P */
1840				qd_idx = 0;
1841			} else if (*dd_idx >= pd_idx)
1842				(*dd_idx) += 2; /* D D P Q D */
1843			break;
1844		case ALGORITHM_RIGHT_ASYMMETRIC:
1845			pd_idx = sector_div(stripe2, raid_disks);
1846			qd_idx = pd_idx + 1;
1847			if (pd_idx == raid_disks-1) {
1848				(*dd_idx)++;	/* Q D D D P */
1849				qd_idx = 0;
1850			} else if (*dd_idx >= pd_idx)
1851				(*dd_idx) += 2; /* D D P Q D */
1852			break;
1853		case ALGORITHM_LEFT_SYMMETRIC:
1854			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1855			qd_idx = (pd_idx + 1) % raid_disks;
1856			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1857			break;
1858		case ALGORITHM_RIGHT_SYMMETRIC:
1859			pd_idx = sector_div(stripe2, raid_disks);
1860			qd_idx = (pd_idx + 1) % raid_disks;
1861			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1862			break;
1863
1864		case ALGORITHM_PARITY_0:
1865			pd_idx = 0;
1866			qd_idx = 1;
1867			(*dd_idx) += 2;
1868			break;
1869		case ALGORITHM_PARITY_N:
1870			pd_idx = data_disks;
1871			qd_idx = data_disks + 1;
1872			break;
1873
1874		case ALGORITHM_ROTATING_ZERO_RESTART:
1875			/* Exactly the same as RIGHT_ASYMMETRIC, but or
1876			 * of blocks for computing Q is different.
1877			 */
1878			pd_idx = sector_div(stripe2, raid_disks);
1879			qd_idx = pd_idx + 1;
1880			if (pd_idx == raid_disks-1) {
1881				(*dd_idx)++;	/* Q D D D P */
1882				qd_idx = 0;
1883			} else if (*dd_idx >= pd_idx)
1884				(*dd_idx) += 2; /* D D P Q D */
1885			ddf_layout = 1;
1886			break;
1887
1888		case ALGORITHM_ROTATING_N_RESTART:
1889			/* Same a left_asymmetric, by first stripe is
1890			 * D D D P Q  rather than
1891			 * Q D D D P
1892			 */
1893			stripe2 += 1;
1894			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1895			qd_idx = pd_idx + 1;
1896			if (pd_idx == raid_disks-1) {
1897				(*dd_idx)++;	/* Q D D D P */
1898				qd_idx = 0;
1899			} else if (*dd_idx >= pd_idx)
1900				(*dd_idx) += 2; /* D D P Q D */
1901			ddf_layout = 1;
1902			break;
1903
1904		case ALGORITHM_ROTATING_N_CONTINUE:
1905			/* Same as left_symmetric but Q is before P */
1906			pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1907			qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1908			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1909			ddf_layout = 1;
1910			break;
1911
1912		case ALGORITHM_LEFT_ASYMMETRIC_6:
1913			/* RAID5 left_asymmetric, with Q on last device */
1914			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1915			if (*dd_idx >= pd_idx)
1916				(*dd_idx)++;
1917			qd_idx = raid_disks - 1;
1918			break;
1919
1920		case ALGORITHM_RIGHT_ASYMMETRIC_6:
1921			pd_idx = sector_div(stripe2, raid_disks-1);
1922			if (*dd_idx >= pd_idx)
1923				(*dd_idx)++;
1924			qd_idx = raid_disks - 1;
1925			break;
1926
1927		case ALGORITHM_LEFT_SYMMETRIC_6:
1928			pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1929			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1930			qd_idx = raid_disks - 1;
1931			break;
1932
1933		case ALGORITHM_RIGHT_SYMMETRIC_6:
1934			pd_idx = sector_div(stripe2, raid_disks-1);
1935			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1936			qd_idx = raid_disks - 1;
1937			break;
1938
1939		case ALGORITHM_PARITY_0_6:
1940			pd_idx = 0;
1941			(*dd_idx)++;
1942			qd_idx = raid_disks - 1;
1943			break;
1944
1945		default:
1946			BUG();
1947		}
1948		break;
1949	}
1950
1951	if (sh) {
1952		sh->pd_idx = pd_idx;
1953		sh->qd_idx = qd_idx;
1954		sh->ddf_layout = ddf_layout;
1955	}
1956	/*
1957	 * Finally, compute the new sector number
1958	 */
1959	new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1960	return new_sector;
1961}
1962
1963
1964static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1965{
1966	raid5_conf_t *conf = sh->raid_conf;
1967	int raid_disks = sh->disks;
1968	int data_disks = raid_disks - conf->max_degraded;
1969	sector_t new_sector = sh->sector, check;
1970	int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1971					 : conf->chunk_sectors;
1972	int algorithm = previous ? conf->prev_algo
1973				 : conf->algorithm;
1974	sector_t stripe;
1975	int chunk_offset;
1976	sector_t chunk_number;
1977	int dummy1, dd_idx = i;
1978	sector_t r_sector;
1979	struct stripe_head sh2;
1980
1981
1982	chunk_offset = sector_div(new_sector, sectors_per_chunk);
1983	stripe = new_sector;
1984
1985	if (i == sh->pd_idx)
1986		return 0;
1987	switch(conf->level) {
1988	case 4: break;
1989	case 5:
1990		switch (algorithm) {
1991		case ALGORITHM_LEFT_ASYMMETRIC:
1992		case ALGORITHM_RIGHT_ASYMMETRIC:
1993			if (i > sh->pd_idx)
1994				i--;
1995			break;
1996		case ALGORITHM_LEFT_SYMMETRIC:
1997		case ALGORITHM_RIGHT_SYMMETRIC:
1998			if (i < sh->pd_idx)
1999				i += raid_disks;
2000			i -= (sh->pd_idx + 1);
2001			break;
2002		case ALGORITHM_PARITY_0:
2003			i -= 1;
2004			break;
2005		case ALGORITHM_PARITY_N:
2006			break;
2007		default:
2008			BUG();
2009		}
2010		break;
2011	case 6:
2012		if (i == sh->qd_idx)
2013			return 0; /* It is the Q disk */
2014		switch (algorithm) {
2015		case ALGORITHM_LEFT_ASYMMETRIC:
2016		case ALGORITHM_RIGHT_ASYMMETRIC:
2017		case ALGORITHM_ROTATING_ZERO_RESTART:
2018		case ALGORITHM_ROTATING_N_RESTART:
2019			if (sh->pd_idx == raid_disks-1)
2020				i--;	/* Q D D D P */
2021			else if (i > sh->pd_idx)
2022				i -= 2; /* D D P Q D */
2023			break;
2024		case ALGORITHM_LEFT_SYMMETRIC:
2025		case ALGORITHM_RIGHT_SYMMETRIC:
2026			if (sh->pd_idx == raid_disks-1)
2027				i--; /* Q D D D P */
2028			else {
2029				/* D D P Q D */
2030				if (i < sh->pd_idx)
2031					i += raid_disks;
2032				i -= (sh->pd_idx + 2);
2033			}
2034			break;
2035		case ALGORITHM_PARITY_0:
2036			i -= 2;
2037			break;
2038		case ALGORITHM_PARITY_N:
2039			break;
2040		case ALGORITHM_ROTATING_N_CONTINUE:
2041			/* Like left_symmetric, but P is before Q */
2042			if (sh->pd_idx == 0)
2043				i--;	/* P D D D Q */
2044			else {
2045				/* D D Q P D */
2046				if (i < sh->pd_idx)
2047					i += raid_disks;
2048				i -= (sh->pd_idx + 1);
2049			}
2050			break;
2051		case ALGORITHM_LEFT_ASYMMETRIC_6:
2052		case ALGORITHM_RIGHT_ASYMMETRIC_6:
2053			if (i > sh->pd_idx)
2054				i--;
2055			break;
2056		case ALGORITHM_LEFT_SYMMETRIC_6:
2057		case ALGORITHM_RIGHT_SYMMETRIC_6:
2058			if (i < sh->pd_idx)
2059				i += data_disks + 1;
2060			i -= (sh->pd_idx + 1);
2061			break;
2062		case ALGORITHM_PARITY_0_6:
2063			i -= 1;
2064			break;
2065		default:
2066			BUG();
2067		}
2068		break;
2069	}
2070
2071	chunk_number = stripe * data_disks + i;
2072	r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2073
2074	check = raid5_compute_sector(conf, r_sector,
2075				     previous, &dummy1, &sh2);
2076	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2077		|| sh2.qd_idx != sh->qd_idx) {
2078		printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2079		       mdname(conf->mddev));
2080		return 0;
2081	}
2082	return r_sector;
2083}
2084
2085
2086static void
2087schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2088			 int rcw, int expand)
2089{
2090	int i, pd_idx = sh->pd_idx, disks = sh->disks;
2091	raid5_conf_t *conf = sh->raid_conf;
2092	int level = conf->level;
2093
2094	if (rcw) {
 
 
 
 
 
 
 
 
 
 
 
 
2095		/* if we are not expanding this is a proper write request, and
2096		 * there will be bios with new data to be drained into the
2097		 * stripe cache
2098		 */
2099		if (!expand) {
 
 
 
2100			sh->reconstruct_state = reconstruct_state_drain_run;
2101			set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2102		} else
2103			sh->reconstruct_state = reconstruct_state_run;
2104
2105		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2106
2107		for (i = disks; i--; ) {
2108			struct r5dev *dev = &sh->dev[i];
2109
2110			if (dev->towrite) {
2111				set_bit(R5_LOCKED, &dev->flags);
2112				set_bit(R5_Wantdrain, &dev->flags);
2113				if (!expand)
2114					clear_bit(R5_UPTODATE, &dev->flags);
2115				s->locked++;
2116			}
2117		}
2118		if (s->locked + conf->max_degraded == disks)
2119			if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2120				atomic_inc(&conf->pending_full_writes);
2121	} else {
2122		BUG_ON(level == 6);
2123		BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2124			test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2125
2126		sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2127		set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2128		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2129		set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2130
2131		for (i = disks; i--; ) {
2132			struct r5dev *dev = &sh->dev[i];
2133			if (i == pd_idx)
2134				continue;
2135
2136			if (dev->towrite &&
2137			    (test_bit(R5_UPTODATE, &dev->flags) ||
2138			     test_bit(R5_Wantcompute, &dev->flags))) {
2139				set_bit(R5_Wantdrain, &dev->flags);
2140				set_bit(R5_LOCKED, &dev->flags);
2141				clear_bit(R5_UPTODATE, &dev->flags);
2142				s->locked++;
2143			}
2144		}
 
 
 
 
 
 
 
2145	}
2146
2147	/* keep the parity disk(s) locked while asynchronous operations
2148	 * are in flight
2149	 */
2150	set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2151	clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2152	s->locked++;
2153
2154	if (level == 6) {
2155		int qd_idx = sh->qd_idx;
2156		struct r5dev *dev = &sh->dev[qd_idx];
2157
2158		set_bit(R5_LOCKED, &dev->flags);
2159		clear_bit(R5_UPTODATE, &dev->flags);
2160		s->locked++;
2161	}
2162
2163	pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2164		__func__, (unsigned long long)sh->sector,
2165		s->locked, s->ops_request);
2166}
2167
2168/*
2169 * Each stripe/dev can have one or more bion attached.
2170 * toread/towrite point to the first in a chain.
2171 * The bi_next chain must be in order.
2172 */
2173static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
 
2174{
2175	struct bio **bip;
2176	raid5_conf_t *conf = sh->raid_conf;
2177	int firstwrite=0;
2178
2179	pr_debug("adding bi b#%llu to stripe s#%llu\n",
2180		(unsigned long long)bi->bi_sector,
2181		(unsigned long long)sh->sector);
2182
2183
2184	spin_lock_irq(&conf->device_lock);
 
 
 
 
 
 
 
 
 
 
2185	if (forwrite) {
2186		bip = &sh->dev[dd_idx].towrite;
2187		if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2188			firstwrite = 1;
2189	} else
2190		bip = &sh->dev[dd_idx].toread;
2191	while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2192		if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2193			goto overlap;
2194		bip = & (*bip)->bi_next;
2195	}
2196	if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2197		goto overlap;
2198
 
 
 
2199	BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2200	if (*bip)
2201		bi->bi_next = *bip;
2202	*bip = bi;
2203	bi->bi_phys_segments++;
2204
2205	if (forwrite) {
2206		/* check if page is covered */
2207		sector_t sector = sh->dev[dd_idx].sector;
2208		for (bi=sh->dev[dd_idx].towrite;
2209		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2210			     bi && bi->bi_sector <= sector;
2211		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2212			if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2213				sector = bi->bi_sector + (bi->bi_size>>9);
2214		}
2215		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2216			set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
 
2217	}
2218	spin_unlock_irq(&conf->device_lock);
2219
2220	pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2221		(unsigned long long)(*bip)->bi_sector,
2222		(unsigned long long)sh->sector, dd_idx);
2223
2224	if (conf->mddev->bitmap && firstwrite) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2225		bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2226				  STRIPE_SECTORS, 0);
2227		sh->bm_seq = conf->seq_flush+1;
2228		set_bit(STRIPE_BIT_DELAY, &sh->state);
 
 
 
 
2229	}
 
 
 
 
2230	return 1;
2231
2232 overlap:
2233	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2234	spin_unlock_irq(&conf->device_lock);
2235	return 0;
2236}
2237
2238static void end_reshape(raid5_conf_t *conf);
2239
2240static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2241			    struct stripe_head *sh)
2242{
2243	int sectors_per_chunk =
2244		previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2245	int dd_idx;
2246	int chunk_offset = sector_div(stripe, sectors_per_chunk);
2247	int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2248
2249	raid5_compute_sector(conf,
2250			     stripe * (disks - conf->max_degraded)
2251			     *sectors_per_chunk + chunk_offset,
2252			     previous,
2253			     &dd_idx, sh);
2254}
2255
2256static void
2257handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2258				struct stripe_head_state *s, int disks,
2259				struct bio **return_bi)
2260{
2261	int i;
 
2262	for (i = disks; i--; ) {
2263		struct bio *bi;
2264		int bitmap_end = 0;
2265
2266		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2267			mdk_rdev_t *rdev;
2268			rcu_read_lock();
2269			rdev = rcu_dereference(conf->disks[i].rdev);
2270			if (rdev && test_bit(In_sync, &rdev->flags))
2271				atomic_inc(&rdev->nr_pending);
2272			else
2273				rdev = NULL;
2274			rcu_read_unlock();
2275			if (rdev) {
2276				if (!rdev_set_badblocks(
2277					    rdev,
2278					    sh->sector,
2279					    STRIPE_SECTORS, 0))
2280					md_error(conf->mddev, rdev);
2281				rdev_dec_pending(rdev, conf->mddev);
2282			}
2283		}
2284		spin_lock_irq(&conf->device_lock);
2285		/* fail all writes first */
2286		bi = sh->dev[i].towrite;
2287		sh->dev[i].towrite = NULL;
2288		if (bi) {
2289			s->to_write--;
 
2290			bitmap_end = 1;
2291		}
 
2292
2293		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2294			wake_up(&conf->wait_for_overlap);
2295
2296		while (bi && bi->bi_sector <
2297			sh->dev[i].sector + STRIPE_SECTORS) {
2298			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2299			clear_bit(BIO_UPTODATE, &bi->bi_flags);
2300			if (!raid5_dec_bi_phys_segments(bi)) {
 
2301				md_write_end(conf->mddev);
2302				bi->bi_next = *return_bi;
2303				*return_bi = bi;
2304			}
2305			bi = nextbi;
2306		}
 
 
 
 
2307		/* and fail all 'written' */
2308		bi = sh->dev[i].written;
2309		sh->dev[i].written = NULL;
 
 
 
 
 
2310		if (bi) bitmap_end = 1;
2311		while (bi && bi->bi_sector <
2312		       sh->dev[i].sector + STRIPE_SECTORS) {
2313			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2314			clear_bit(BIO_UPTODATE, &bi->bi_flags);
2315			if (!raid5_dec_bi_phys_segments(bi)) {
 
2316				md_write_end(conf->mddev);
2317				bi->bi_next = *return_bi;
2318				*return_bi = bi;
2319			}
2320			bi = bi2;
2321		}
2322
2323		/* fail any reads if this device is non-operational and
2324		 * the data has not reached the cache yet.
2325		 */
2326		if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
 
2327		    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2328		      test_bit(R5_ReadError, &sh->dev[i].flags))) {
 
2329			bi = sh->dev[i].toread;
2330			sh->dev[i].toread = NULL;
 
2331			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2332				wake_up(&conf->wait_for_overlap);
2333			if (bi) s->to_read--;
2334			while (bi && bi->bi_sector <
 
2335			       sh->dev[i].sector + STRIPE_SECTORS) {
2336				struct bio *nextbi =
2337					r5_next_bio(bi, sh->dev[i].sector);
2338				clear_bit(BIO_UPTODATE, &bi->bi_flags);
2339				if (!raid5_dec_bi_phys_segments(bi)) {
2340					bi->bi_next = *return_bi;
2341					*return_bi = bi;
2342				}
2343				bi = nextbi;
2344			}
2345		}
2346		spin_unlock_irq(&conf->device_lock);
2347		if (bitmap_end)
2348			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2349					STRIPE_SECTORS, 0, 0);
2350		/* If we were in the middle of a write the parity block might
2351		 * still be locked - so just clear all R5_LOCKED flags
2352		 */
2353		clear_bit(R5_LOCKED, &sh->dev[i].flags);
2354	}
 
 
2355
2356	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2357		if (atomic_dec_and_test(&conf->pending_full_writes))
2358			md_wakeup_thread(conf->mddev->thread);
2359}
2360
2361static void
2362handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh,
2363		   struct stripe_head_state *s)
2364{
2365	int abort = 0;
2366	int i;
2367
2368	md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
2369	clear_bit(STRIPE_SYNCING, &sh->state);
 
 
2370	s->syncing = 0;
 
2371	/* There is nothing more to do for sync/check/repair.
2372	 * For recover we need to record a bad block on all
 
 
 
2373	 * non-sync devices, or abort the recovery
2374	 */
2375	if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
2376		return;
2377	/* During recovery devices cannot be removed, so locking and
2378	 * refcounting of rdevs is not needed
2379	 */
2380	for (i = 0; i < conf->raid_disks; i++) {
2381		mdk_rdev_t *rdev = conf->disks[i].rdev;
2382		if (!rdev
2383		    || test_bit(Faulty, &rdev->flags)
2384		    || test_bit(In_sync, &rdev->flags))
2385			continue;
2386		if (!rdev_set_badblocks(rdev, sh->sector,
2387					STRIPE_SECTORS, 0))
2388			abort = 1;
2389	}
2390	if (abort) {
2391		conf->recovery_disabled = conf->mddev->recovery_disabled;
2392		set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
2393	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2394}
2395
2396/* fetch_block - checks the given member device to see if its data needs
2397 * to be read or computed to satisfy a request.
2398 *
2399 * Returns 1 when no more member devices need to be checked, otherwise returns
2400 * 0 to tell the loop in handle_stripe_fill to continue
2401 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2402static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2403		       int disk_idx, int disks)
2404{
2405	struct r5dev *dev = &sh->dev[disk_idx];
2406	struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2407				  &sh->dev[s->failed_num[1]] };
2408
2409	/* is the data in this block needed, and can we get it? */
2410	if (!test_bit(R5_LOCKED, &dev->flags) &&
2411	    !test_bit(R5_UPTODATE, &dev->flags) &&
2412	    (dev->toread ||
2413	     (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2414	     s->syncing || s->expanding ||
2415	     (s->failed >= 1 && fdev[0]->toread) ||
2416	     (s->failed >= 2 && fdev[1]->toread) ||
2417	     (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2418	      !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2419	     (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2420		/* we would like to get this block, possibly by computing it,
2421		 * otherwise read it if the backing disk is insync
2422		 */
2423		BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2424		BUG_ON(test_bit(R5_Wantread, &dev->flags));
 
2425		if ((s->uptodate == disks - 1) &&
2426		    (s->failed && (disk_idx == s->failed_num[0] ||
2427				   disk_idx == s->failed_num[1]))) {
2428			/* have disk failed, and we're requested to fetch it;
2429			 * do compute it
2430			 */
2431			pr_debug("Computing stripe %llu block %d\n",
2432			       (unsigned long long)sh->sector, disk_idx);
2433			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2434			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2435			set_bit(R5_Wantcompute, &dev->flags);
2436			sh->ops.target = disk_idx;
2437			sh->ops.target2 = -1; /* no 2nd target */
2438			s->req_compute = 1;
2439			/* Careful: from this point on 'uptodate' is in the eye
2440			 * of raid_run_ops which services 'compute' operations
2441			 * before writes. R5_Wantcompute flags a block that will
2442			 * be R5_UPTODATE by the time it is needed for a
2443			 * subsequent operation.
2444			 */
2445			s->uptodate++;
2446			return 1;
2447		} else if (s->uptodate == disks-2 && s->failed >= 2) {
2448			/* Computing 2-failure is *very* expensive; only
2449			 * do it if failed >= 2
2450			 */
2451			int other;
2452			for (other = disks; other--; ) {
2453				if (other == disk_idx)
2454					continue;
2455				if (!test_bit(R5_UPTODATE,
2456				      &sh->dev[other].flags))
2457					break;
2458			}
2459			BUG_ON(other < 0);
2460			pr_debug("Computing stripe %llu blocks %d,%d\n",
2461			       (unsigned long long)sh->sector,
2462			       disk_idx, other);
2463			set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2464			set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2465			set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2466			set_bit(R5_Wantcompute, &sh->dev[other].flags);
2467			sh->ops.target = disk_idx;
2468			sh->ops.target2 = other;
2469			s->uptodate += 2;
2470			s->req_compute = 1;
2471			return 1;
2472		} else if (test_bit(R5_Insync, &dev->flags)) {
2473			set_bit(R5_LOCKED, &dev->flags);
2474			set_bit(R5_Wantread, &dev->flags);
2475			s->locked++;
2476			pr_debug("Reading block %d (sync=%d)\n",
2477				disk_idx, s->syncing);
2478		}
2479	}
2480
2481	return 0;
2482}
2483
2484/**
2485 * handle_stripe_fill - read or compute data to satisfy pending requests.
2486 */
2487static void handle_stripe_fill(struct stripe_head *sh,
2488			       struct stripe_head_state *s,
2489			       int disks)
2490{
2491	int i;
2492
2493	/* look for blocks to read/compute, skip this if a compute
2494	 * is already in flight, or if the stripe contents are in the
2495	 * midst of changing due to a write
2496	 */
2497	if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2498	    !sh->reconstruct_state)
2499		for (i = disks; i--; )
2500			if (fetch_block(sh, s, i, disks))
2501				break;
2502	set_bit(STRIPE_HANDLE, &sh->state);
2503}
2504
2505
 
2506/* handle_stripe_clean_event
2507 * any written block on an uptodate or failed drive can be returned.
2508 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2509 * never LOCKED, so we don't need to test 'failed' directly.
2510 */
2511static void handle_stripe_clean_event(raid5_conf_t *conf,
2512	struct stripe_head *sh, int disks, struct bio **return_bi)
2513{
2514	int i;
2515	struct r5dev *dev;
 
 
 
2516
2517	for (i = disks; i--; )
2518		if (sh->dev[i].written) {
2519			dev = &sh->dev[i];
2520			if (!test_bit(R5_LOCKED, &dev->flags) &&
2521				test_bit(R5_UPTODATE, &dev->flags)) {
 
 
2522				/* We can return any write requests */
2523				struct bio *wbi, *wbi2;
2524				int bitmap_end = 0;
2525				pr_debug("Return write for disc %d\n", i);
2526				spin_lock_irq(&conf->device_lock);
 
 
 
 
 
 
 
 
2527				wbi = dev->written;
2528				dev->written = NULL;
2529				while (wbi && wbi->bi_sector <
2530					dev->sector + STRIPE_SECTORS) {
2531					wbi2 = r5_next_bio(wbi, dev->sector);
2532					if (!raid5_dec_bi_phys_segments(wbi)) {
2533						md_write_end(conf->mddev);
2534						wbi->bi_next = *return_bi;
2535						*return_bi = wbi;
2536					}
2537					wbi = wbi2;
2538				}
2539				if (dev->towrite == NULL)
2540					bitmap_end = 1;
2541				spin_unlock_irq(&conf->device_lock);
2542				if (bitmap_end)
2543					bitmap_endwrite(conf->mddev->bitmap,
2544							sh->sector,
2545							STRIPE_SECTORS,
2546					 !test_bit(STRIPE_DEGRADED, &sh->state),
2547							0);
2548			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2549		}
 
 
 
 
 
 
2550
2551	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2552		if (atomic_dec_and_test(&conf->pending_full_writes))
2553			md_wakeup_thread(conf->mddev->thread);
 
 
 
2554}
2555
2556static void handle_stripe_dirtying(raid5_conf_t *conf,
2557				   struct stripe_head *sh,
2558				   struct stripe_head_state *s,
2559				   int disks)
2560{
2561	int rmw = 0, rcw = 0, i;
2562	if (conf->max_degraded == 2) {
2563		/* RAID6 requires 'rcw' in current implementation
2564		 * Calculate the real rcw later - for now fake it
 
 
 
 
 
 
 
 
 
 
2565		 * look like rcw is cheaper
2566		 */
2567		rcw = 1; rmw = 2;
 
 
 
2568	} else for (i = disks; i--; ) {
2569		/* would I have to read this buffer for read_modify_write */
2570		struct r5dev *dev = &sh->dev[i];
2571		if ((dev->towrite || i == sh->pd_idx) &&
2572		    !test_bit(R5_LOCKED, &dev->flags) &&
2573		    !(test_bit(R5_UPTODATE, &dev->flags) ||
2574		      test_bit(R5_Wantcompute, &dev->flags))) {
2575			if (test_bit(R5_Insync, &dev->flags))
2576				rmw++;
2577			else
2578				rmw += 2*disks;  /* cannot read it */
2579		}
2580		/* Would I have to read this buffer for reconstruct_write */
2581		if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
 
2582		    !test_bit(R5_LOCKED, &dev->flags) &&
2583		    !(test_bit(R5_UPTODATE, &dev->flags) ||
2584		    test_bit(R5_Wantcompute, &dev->flags))) {
2585			if (test_bit(R5_Insync, &dev->flags)) rcw++;
 
2586			else
2587				rcw += 2*disks;
2588		}
2589	}
2590	pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2591		(unsigned long long)sh->sector, rmw, rcw);
2592	set_bit(STRIPE_HANDLE, &sh->state);
2593	if (rmw < rcw && rmw > 0)
2594		/* prefer read-modify-write, but need to get some data */
 
 
 
 
2595		for (i = disks; i--; ) {
2596			struct r5dev *dev = &sh->dev[i];
2597			if ((dev->towrite || i == sh->pd_idx) &&
2598			    !test_bit(R5_LOCKED, &dev->flags) &&
2599			    !(test_bit(R5_UPTODATE, &dev->flags) ||
2600			    test_bit(R5_Wantcompute, &dev->flags)) &&
2601			    test_bit(R5_Insync, &dev->flags)) {
2602				if (
2603				  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2604					pr_debug("Read_old block "
2605						"%d for r-m-w\n", i);
2606					set_bit(R5_LOCKED, &dev->flags);
2607					set_bit(R5_Wantread, &dev->flags);
2608					s->locked++;
2609				} else {
2610					set_bit(STRIPE_DELAYED, &sh->state);
2611					set_bit(STRIPE_HANDLE, &sh->state);
2612				}
2613			}
2614		}
2615	if (rcw <= rmw && rcw > 0) {
 
2616		/* want reconstruct write, but need to get some data */
 
2617		rcw = 0;
2618		for (i = disks; i--; ) {
2619			struct r5dev *dev = &sh->dev[i];
2620			if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2621			    i != sh->pd_idx && i != sh->qd_idx &&
2622			    !test_bit(R5_LOCKED, &dev->flags) &&
2623			    !(test_bit(R5_UPTODATE, &dev->flags) ||
2624			      test_bit(R5_Wantcompute, &dev->flags))) {
2625				rcw++;
2626				if (!test_bit(R5_Insync, &dev->flags))
2627					continue; /* it's a failed drive */
2628				if (
2629				  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2630					pr_debug("Read_old block "
2631						"%d for Reconstruct\n", i);
2632					set_bit(R5_LOCKED, &dev->flags);
2633					set_bit(R5_Wantread, &dev->flags);
2634					s->locked++;
 
2635				} else {
2636					set_bit(STRIPE_DELAYED, &sh->state);
2637					set_bit(STRIPE_HANDLE, &sh->state);
2638				}
2639			}
2640		}
 
 
 
 
2641	}
 
 
 
 
 
2642	/* now if nothing is locked, and if we have enough data,
2643	 * we can start a write request
2644	 */
2645	/* since handle_stripe can be called at any time we need to handle the
2646	 * case where a compute block operation has been submitted and then a
2647	 * subsequent call wants to start a write request.  raid_run_ops only
2648	 * handles the case where compute block and reconstruct are requested
2649	 * simultaneously.  If this is not the case then new writes need to be
2650	 * held off until the compute completes.
2651	 */
2652	if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2653	    (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2654	    !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2655		schedule_reconstruction(sh, s, rcw == 0, 0);
2656}
2657
2658static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2659				struct stripe_head_state *s, int disks)
2660{
2661	struct r5dev *dev = NULL;
2662
 
2663	set_bit(STRIPE_HANDLE, &sh->state);
2664
2665	switch (sh->check_state) {
2666	case check_state_idle:
2667		/* start a new check operation if there are no failures */
2668		if (s->failed == 0) {
2669			BUG_ON(s->uptodate != disks);
2670			sh->check_state = check_state_run;
2671			set_bit(STRIPE_OP_CHECK, &s->ops_request);
2672			clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2673			s->uptodate--;
2674			break;
2675		}
2676		dev = &sh->dev[s->failed_num[0]];
2677		/* fall through */
2678	case check_state_compute_result:
2679		sh->check_state = check_state_idle;
2680		if (!dev)
2681			dev = &sh->dev[sh->pd_idx];
2682
2683		/* check that a write has not made the stripe insync */
2684		if (test_bit(STRIPE_INSYNC, &sh->state))
2685			break;
2686
2687		/* either failed parity check, or recovery is happening */
2688		BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2689		BUG_ON(s->uptodate != disks);
2690
2691		set_bit(R5_LOCKED, &dev->flags);
2692		s->locked++;
2693		set_bit(R5_Wantwrite, &dev->flags);
2694
2695		clear_bit(STRIPE_DEGRADED, &sh->state);
2696		set_bit(STRIPE_INSYNC, &sh->state);
2697		break;
2698	case check_state_run:
2699		break; /* we will be called again upon completion */
2700	case check_state_check_result:
2701		sh->check_state = check_state_idle;
2702
2703		/* if a failure occurred during the check operation, leave
2704		 * STRIPE_INSYNC not set and let the stripe be handled again
2705		 */
2706		if (s->failed)
2707			break;
2708
2709		/* handle a successful check operation, if parity is correct
2710		 * we are done.  Otherwise update the mismatch count and repair
2711		 * parity if !MD_RECOVERY_CHECK
2712		 */
2713		if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2714			/* parity is correct (on disc,
2715			 * not in buffer any more)
2716			 */
2717			set_bit(STRIPE_INSYNC, &sh->state);
2718		else {
2719			conf->mddev->resync_mismatches += STRIPE_SECTORS;
2720			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2721				/* don't try to repair!! */
2722				set_bit(STRIPE_INSYNC, &sh->state);
2723			else {
2724				sh->check_state = check_state_compute_run;
2725				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2726				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2727				set_bit(R5_Wantcompute,
2728					&sh->dev[sh->pd_idx].flags);
2729				sh->ops.target = sh->pd_idx;
2730				sh->ops.target2 = -1;
2731				s->uptodate++;
2732			}
2733		}
2734		break;
2735	case check_state_compute_run:
2736		break;
2737	default:
2738		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2739		       __func__, sh->check_state,
2740		       (unsigned long long) sh->sector);
2741		BUG();
2742	}
2743}
2744
2745
2746static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2747				  struct stripe_head_state *s,
2748				  int disks)
2749{
2750	int pd_idx = sh->pd_idx;
2751	int qd_idx = sh->qd_idx;
2752	struct r5dev *dev;
2753
 
2754	set_bit(STRIPE_HANDLE, &sh->state);
2755
2756	BUG_ON(s->failed > 2);
2757
2758	/* Want to check and possibly repair P and Q.
2759	 * However there could be one 'failed' device, in which
2760	 * case we can only check one of them, possibly using the
2761	 * other to generate missing data
2762	 */
2763
2764	switch (sh->check_state) {
2765	case check_state_idle:
2766		/* start a new check operation if there are < 2 failures */
2767		if (s->failed == s->q_failed) {
2768			/* The only possible failed device holds Q, so it
2769			 * makes sense to check P (If anything else were failed,
2770			 * we would have used P to recreate it).
2771			 */
2772			sh->check_state = check_state_run;
2773		}
2774		if (!s->q_failed && s->failed < 2) {
2775			/* Q is not failed, and we didn't use it to generate
2776			 * anything, so it makes sense to check it
2777			 */
2778			if (sh->check_state == check_state_run)
2779				sh->check_state = check_state_run_pq;
2780			else
2781				sh->check_state = check_state_run_q;
2782		}
2783
2784		/* discard potentially stale zero_sum_result */
2785		sh->ops.zero_sum_result = 0;
2786
2787		if (sh->check_state == check_state_run) {
2788			/* async_xor_zero_sum destroys the contents of P */
2789			clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2790			s->uptodate--;
2791		}
2792		if (sh->check_state >= check_state_run &&
2793		    sh->check_state <= check_state_run_pq) {
2794			/* async_syndrome_zero_sum preserves P and Q, so
2795			 * no need to mark them !uptodate here
2796			 */
2797			set_bit(STRIPE_OP_CHECK, &s->ops_request);
2798			break;
2799		}
2800
2801		/* we have 2-disk failure */
2802		BUG_ON(s->failed != 2);
2803		/* fall through */
2804	case check_state_compute_result:
2805		sh->check_state = check_state_idle;
2806
2807		/* check that a write has not made the stripe insync */
2808		if (test_bit(STRIPE_INSYNC, &sh->state))
2809			break;
2810
2811		/* now write out any block on a failed drive,
2812		 * or P or Q if they were recomputed
2813		 */
2814		BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2815		if (s->failed == 2) {
2816			dev = &sh->dev[s->failed_num[1]];
2817			s->locked++;
2818			set_bit(R5_LOCKED, &dev->flags);
2819			set_bit(R5_Wantwrite, &dev->flags);
2820		}
2821		if (s->failed >= 1) {
2822			dev = &sh->dev[s->failed_num[0]];
2823			s->locked++;
2824			set_bit(R5_LOCKED, &dev->flags);
2825			set_bit(R5_Wantwrite, &dev->flags);
2826		}
2827		if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2828			dev = &sh->dev[pd_idx];
2829			s->locked++;
2830			set_bit(R5_LOCKED, &dev->flags);
2831			set_bit(R5_Wantwrite, &dev->flags);
2832		}
2833		if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2834			dev = &sh->dev[qd_idx];
2835			s->locked++;
2836			set_bit(R5_LOCKED, &dev->flags);
2837			set_bit(R5_Wantwrite, &dev->flags);
2838		}
2839		clear_bit(STRIPE_DEGRADED, &sh->state);
2840
2841		set_bit(STRIPE_INSYNC, &sh->state);
2842		break;
2843	case check_state_run:
2844	case check_state_run_q:
2845	case check_state_run_pq:
2846		break; /* we will be called again upon completion */
2847	case check_state_check_result:
2848		sh->check_state = check_state_idle;
2849
2850		/* handle a successful check operation, if parity is correct
2851		 * we are done.  Otherwise update the mismatch count and repair
2852		 * parity if !MD_RECOVERY_CHECK
2853		 */
2854		if (sh->ops.zero_sum_result == 0) {
2855			/* both parities are correct */
2856			if (!s->failed)
2857				set_bit(STRIPE_INSYNC, &sh->state);
2858			else {
2859				/* in contrast to the raid5 case we can validate
2860				 * parity, but still have a failure to write
2861				 * back
2862				 */
2863				sh->check_state = check_state_compute_result;
2864				/* Returning at this point means that we may go
2865				 * off and bring p and/or q uptodate again so
2866				 * we make sure to check zero_sum_result again
2867				 * to verify if p or q need writeback
2868				 */
2869			}
2870		} else {
2871			conf->mddev->resync_mismatches += STRIPE_SECTORS;
2872			if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2873				/* don't try to repair!! */
2874				set_bit(STRIPE_INSYNC, &sh->state);
2875			else {
2876				int *target = &sh->ops.target;
2877
2878				sh->ops.target = -1;
2879				sh->ops.target2 = -1;
2880				sh->check_state = check_state_compute_run;
2881				set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2882				set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2883				if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2884					set_bit(R5_Wantcompute,
2885						&sh->dev[pd_idx].flags);
2886					*target = pd_idx;
2887					target = &sh->ops.target2;
2888					s->uptodate++;
2889				}
2890				if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2891					set_bit(R5_Wantcompute,
2892						&sh->dev[qd_idx].flags);
2893					*target = qd_idx;
2894					s->uptodate++;
2895				}
2896			}
2897		}
2898		break;
2899	case check_state_compute_run:
2900		break;
2901	default:
2902		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2903		       __func__, sh->check_state,
2904		       (unsigned long long) sh->sector);
2905		BUG();
2906	}
2907}
2908
2909static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
2910{
2911	int i;
2912
2913	/* We have read all the blocks in this stripe and now we need to
2914	 * copy some of them into a target stripe for expand.
2915	 */
2916	struct dma_async_tx_descriptor *tx = NULL;
 
2917	clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2918	for (i = 0; i < sh->disks; i++)
2919		if (i != sh->pd_idx && i != sh->qd_idx) {
2920			int dd_idx, j;
2921			struct stripe_head *sh2;
2922			struct async_submit_ctl submit;
2923
2924			sector_t bn = compute_blocknr(sh, i, 1);
2925			sector_t s = raid5_compute_sector(conf, bn, 0,
2926							  &dd_idx, NULL);
2927			sh2 = get_active_stripe(conf, s, 0, 1, 1);
2928			if (sh2 == NULL)
2929				/* so far only the early blocks of this stripe
2930				 * have been requested.  When later blocks
2931				 * get requested, we will try again
2932				 */
2933				continue;
2934			if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2935			   test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2936				/* must have already done this block */
2937				release_stripe(sh2);
2938				continue;
2939			}
2940
2941			/* place all the copies on one channel */
2942			init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2943			tx = async_memcpy(sh2->dev[dd_idx].page,
2944					  sh->dev[i].page, 0, 0, STRIPE_SIZE,
2945					  &submit);
2946
2947			set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2948			set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2949			for (j = 0; j < conf->raid_disks; j++)
2950				if (j != sh2->pd_idx &&
2951				    j != sh2->qd_idx &&
2952				    !test_bit(R5_Expanded, &sh2->dev[j].flags))
2953					break;
2954			if (j == conf->raid_disks) {
2955				set_bit(STRIPE_EXPAND_READY, &sh2->state);
2956				set_bit(STRIPE_HANDLE, &sh2->state);
2957			}
2958			release_stripe(sh2);
2959
2960		}
2961	/* done submitting copies, wait for them to complete */
2962	if (tx) {
2963		async_tx_ack(tx);
2964		dma_wait_for_async_tx(tx);
2965	}
2966}
2967
2968
2969/*
2970 * handle_stripe - do things to a stripe.
2971 *
2972 * We lock the stripe and then examine the state of various bits
2973 * to see what needs to be done.
2974 * Possible results:
2975 *    return some read request which now have data
2976 *    return some write requests which are safely on disc
2977 *    schedule a read on some buffers
2978 *    schedule a write of some buffers
2979 *    return confirmation of parity correctness
2980 *
2981 * buffers are taken off read_list or write_list, and bh_cache buffers
2982 * get BH_Lock set before the stripe lock is released.
2983 *
2984 */
2985
2986static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2987{
2988	raid5_conf_t *conf = sh->raid_conf;
2989	int disks = sh->disks;
2990	struct r5dev *dev;
2991	int i;
 
2992
2993	memset(s, 0, sizeof(*s));
2994
2995	s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
2996	s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2997	s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2998	s->failed_num[0] = -1;
2999	s->failed_num[1] = -1;
 
3000
3001	/* Now to look around and see what can be done */
3002	rcu_read_lock();
3003	spin_lock_irq(&conf->device_lock);
3004	for (i=disks; i--; ) {
3005		mdk_rdev_t *rdev;
3006		sector_t first_bad;
3007		int bad_sectors;
3008		int is_bad = 0;
3009
3010		dev = &sh->dev[i];
3011
3012		pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3013			i, dev->flags, dev->toread, dev->towrite, dev->written);
 
3014		/* maybe we can reply to a read
3015		 *
3016		 * new wantfill requests are only permitted while
3017		 * ops_complete_biofill is guaranteed to be inactive
3018		 */
3019		if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3020		    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3021			set_bit(R5_Wantfill, &dev->flags);
3022
3023		/* now count some things */
3024		if (test_bit(R5_LOCKED, &dev->flags))
3025			s->locked++;
3026		if (test_bit(R5_UPTODATE, &dev->flags))
3027			s->uptodate++;
3028		if (test_bit(R5_Wantcompute, &dev->flags)) {
3029			s->compute++;
3030			BUG_ON(s->compute > 2);
3031		}
3032
3033		if (test_bit(R5_Wantfill, &dev->flags))
3034			s->to_fill++;
3035		else if (dev->toread)
3036			s->to_read++;
3037		if (dev->towrite) {
3038			s->to_write++;
3039			if (!test_bit(R5_OVERWRITE, &dev->flags))
3040				s->non_overwrite++;
3041		}
3042		if (dev->written)
3043			s->written++;
3044		rdev = rcu_dereference(conf->disks[i].rdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3045		if (rdev) {
3046			is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3047					     &first_bad, &bad_sectors);
3048			if (s->blocked_rdev == NULL
3049			    && (test_bit(Blocked, &rdev->flags)
3050				|| is_bad < 0)) {
3051				if (is_bad < 0)
3052					set_bit(BlockedBadBlocks,
3053						&rdev->flags);
3054				s->blocked_rdev = rdev;
3055				atomic_inc(&rdev->nr_pending);
3056			}
3057		}
3058		clear_bit(R5_Insync, &dev->flags);
3059		if (!rdev)
3060			/* Not in-sync */;
3061		else if (is_bad) {
3062			/* also not in-sync */
3063			if (!test_bit(WriteErrorSeen, &rdev->flags)) {
 
3064				/* treat as in-sync, but with a read error
3065				 * which we can now try to correct
3066				 */
3067				set_bit(R5_Insync, &dev->flags);
3068				set_bit(R5_ReadError, &dev->flags);
3069			}
3070		} else if (test_bit(In_sync, &rdev->flags))
3071			set_bit(R5_Insync, &dev->flags);
3072		else {
3073			/* in sync if before recovery_offset */
3074			if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3075				set_bit(R5_Insync, &dev->flags);
3076		}
 
 
 
 
 
 
3077		if (test_bit(R5_WriteError, &dev->flags)) {
3078			clear_bit(R5_Insync, &dev->flags);
3079			if (!test_bit(Faulty, &rdev->flags)) {
 
 
 
 
 
3080				s->handle_bad_blocks = 1;
3081				atomic_inc(&rdev->nr_pending);
3082			} else
3083				clear_bit(R5_WriteError, &dev->flags);
3084		}
3085		if (test_bit(R5_MadeGood, &dev->flags)) {
3086			if (!test_bit(Faulty, &rdev->flags)) {
 
 
 
 
3087				s->handle_bad_blocks = 1;
3088				atomic_inc(&rdev->nr_pending);
3089			} else
3090				clear_bit(R5_MadeGood, &dev->flags);
3091		}
 
 
 
 
 
 
 
 
 
3092		if (!test_bit(R5_Insync, &dev->flags)) {
3093			/* The ReadError flag will just be confusing now */
3094			clear_bit(R5_ReadError, &dev->flags);
3095			clear_bit(R5_ReWrite, &dev->flags);
3096		}
3097		if (test_bit(R5_ReadError, &dev->flags))
3098			clear_bit(R5_Insync, &dev->flags);
3099		if (!test_bit(R5_Insync, &dev->flags)) {
3100			if (s->failed < 2)
3101				s->failed_num[s->failed] = i;
3102			s->failed++;
 
 
3103		}
3104	}
3105	spin_unlock_irq(&conf->device_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3106	rcu_read_unlock();
3107}
3108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3109static void handle_stripe(struct stripe_head *sh)
3110{
3111	struct stripe_head_state s;
3112	raid5_conf_t *conf = sh->raid_conf;
3113	int i;
3114	int prexor;
3115	int disks = sh->disks;
3116	struct r5dev *pdev, *qdev;
3117
3118	clear_bit(STRIPE_HANDLE, &sh->state);
3119	if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
3120		/* already being handled, ensure it gets handled
3121		 * again when current action finishes */
3122		set_bit(STRIPE_HANDLE, &sh->state);
3123		return;
3124	}
3125
3126	if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3127		set_bit(STRIPE_SYNCING, &sh->state);
3128		clear_bit(STRIPE_INSYNC, &sh->state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3129	}
3130	clear_bit(STRIPE_DELAYED, &sh->state);
3131
3132	pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3133		"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3134	       (unsigned long long)sh->sector, sh->state,
3135	       atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3136	       sh->check_state, sh->reconstruct_state);
3137
3138	analyse_stripe(sh, &s);
3139
 
 
 
3140	if (s.handle_bad_blocks) {
3141		set_bit(STRIPE_HANDLE, &sh->state);
3142		goto finish;
3143	}
3144
3145	if (unlikely(s.blocked_rdev)) {
3146		if (s.syncing || s.expanding || s.expanded ||
3147		    s.to_write || s.written) {
3148			set_bit(STRIPE_HANDLE, &sh->state);
3149			goto finish;
3150		}
3151		/* There is nothing for the blocked_rdev to block */
3152		rdev_dec_pending(s.blocked_rdev, conf->mddev);
3153		s.blocked_rdev = NULL;
3154	}
3155
3156	if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3157		set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3158		set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3159	}
3160
3161	pr_debug("locked=%d uptodate=%d to_read=%d"
3162	       " to_write=%d failed=%d failed_num=%d,%d\n",
3163	       s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3164	       s.failed_num[0], s.failed_num[1]);
3165	/* check if the array has lost more than max_degraded devices and,
3166	 * if so, some requests might need to be failed.
3167	 */
3168	if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
3169		handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3170	if (s.failed > conf->max_degraded && s.syncing)
3171		handle_failed_sync(conf, sh, &s);
3172
3173	/*
3174	 * might be able to return some write requests if the parity blocks
3175	 * are safe, or on a failed drive
3176	 */
3177	pdev = &sh->dev[sh->pd_idx];
3178	s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3179		|| (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3180	qdev = &sh->dev[sh->qd_idx];
3181	s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3182		|| (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3183		|| conf->level < 6;
3184
3185	if (s.written &&
3186	    (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3187			     && !test_bit(R5_LOCKED, &pdev->flags)
3188			     && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3189	    (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3190			     && !test_bit(R5_LOCKED, &qdev->flags)
3191			     && test_bit(R5_UPTODATE, &qdev->flags)))))
3192		handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3193
3194	/* Now we might consider reading some blocks, either to check/generate
3195	 * parity, or to satisfy requests
3196	 * or to load a block that is being partially written.
3197	 */
3198	if (s.to_read || s.non_overwrite
3199	    || (conf->level == 6 && s.to_write && s.failed)
3200	    || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3201		handle_stripe_fill(sh, &s, disks);
3202
3203	/* Now we check to see if any write operations have recently
3204	 * completed
3205	 */
3206	prexor = 0;
3207	if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3208		prexor = 1;
3209	if (sh->reconstruct_state == reconstruct_state_drain_result ||
3210	    sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3211		sh->reconstruct_state = reconstruct_state_idle;
3212
3213		/* All the 'written' buffers and the parity block are ready to
3214		 * be written back to disk
3215		 */
3216		BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
 
3217		BUG_ON(sh->qd_idx >= 0 &&
3218		       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
 
3219		for (i = disks; i--; ) {
3220			struct r5dev *dev = &sh->dev[i];
3221			if (test_bit(R5_LOCKED, &dev->flags) &&
3222				(i == sh->pd_idx || i == sh->qd_idx ||
3223				 dev->written)) {
3224				pr_debug("Writing block %d\n", i);
3225				set_bit(R5_Wantwrite, &dev->flags);
3226				if (prexor)
3227					continue;
 
 
3228				if (!test_bit(R5_Insync, &dev->flags) ||
3229				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
3230				     s.failed == 0))
3231					set_bit(STRIPE_INSYNC, &sh->state);
3232			}
3233		}
3234		if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3235			s.dec_preread_active = 1;
3236	}
3237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3238	/* Now to consider new write requests and what else, if anything
3239	 * should be read.  We do not handle new writes when:
3240	 * 1/ A 'write' operation (copy+xor) is already in flight.
3241	 * 2/ A 'check' operation is in flight, as it may clobber the parity
3242	 *    block.
3243	 */
3244	if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3245		handle_stripe_dirtying(conf, sh, &s, disks);
3246
3247	/* maybe we need to check and possibly fix the parity for this stripe
3248	 * Any reads will already have been scheduled, so we just see if enough
3249	 * data is available.  The parity check is held off while parity
3250	 * dependent operations are in flight.
3251	 */
3252	if (sh->check_state ||
3253	    (s.syncing && s.locked == 0 &&
3254	     !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3255	     !test_bit(STRIPE_INSYNC, &sh->state))) {
3256		if (conf->level == 6)
3257			handle_parity_checks6(conf, sh, &s, disks);
3258		else
3259			handle_parity_checks5(conf, sh, &s, disks);
3260	}
3261
3262	if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3263		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3264		clear_bit(STRIPE_SYNCING, &sh->state);
 
 
3265	}
3266
3267	/* If the failed drives are just a ReadError, then we might need
3268	 * to progress the repair/check process
3269	 */
3270	if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3271		for (i = 0; i < s.failed; i++) {
3272			struct r5dev *dev = &sh->dev[s.failed_num[i]];
3273			if (test_bit(R5_ReadError, &dev->flags)
3274			    && !test_bit(R5_LOCKED, &dev->flags)
3275			    && test_bit(R5_UPTODATE, &dev->flags)
3276				) {
3277				if (!test_bit(R5_ReWrite, &dev->flags)) {
3278					set_bit(R5_Wantwrite, &dev->flags);
3279					set_bit(R5_ReWrite, &dev->flags);
3280					set_bit(R5_LOCKED, &dev->flags);
3281					s.locked++;
3282				} else {
3283					/* let's read it back */
3284					set_bit(R5_Wantread, &dev->flags);
3285					set_bit(R5_LOCKED, &dev->flags);
3286					s.locked++;
3287				}
3288			}
3289		}
3290
3291
3292	/* Finish reconstruct operations initiated by the expansion process */
3293	if (sh->reconstruct_state == reconstruct_state_result) {
3294		struct stripe_head *sh_src
3295			= get_active_stripe(conf, sh->sector, 1, 1, 1);
3296		if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3297			/* sh cannot be written until sh_src has been read.
3298			 * so arrange for sh to be delayed a little
3299			 */
3300			set_bit(STRIPE_DELAYED, &sh->state);
3301			set_bit(STRIPE_HANDLE, &sh->state);
3302			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3303					      &sh_src->state))
3304				atomic_inc(&conf->preread_active_stripes);
3305			release_stripe(sh_src);
3306			goto finish;
3307		}
3308		if (sh_src)
3309			release_stripe(sh_src);
3310
3311		sh->reconstruct_state = reconstruct_state_idle;
3312		clear_bit(STRIPE_EXPANDING, &sh->state);
3313		for (i = conf->raid_disks; i--; ) {
3314			set_bit(R5_Wantwrite, &sh->dev[i].flags);
3315			set_bit(R5_LOCKED, &sh->dev[i].flags);
3316			s.locked++;
3317		}
3318	}
3319
3320	if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3321	    !sh->reconstruct_state) {
3322		/* Need to write out all blocks after computing parity */
3323		sh->disks = conf->raid_disks;
3324		stripe_set_idx(sh->sector, conf, 0, sh);
3325		schedule_reconstruction(sh, &s, 1, 1);
3326	} else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3327		clear_bit(STRIPE_EXPAND_READY, &sh->state);
3328		atomic_dec(&conf->reshape_stripes);
3329		wake_up(&conf->wait_for_overlap);
3330		md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3331	}
3332
3333	if (s.expanding && s.locked == 0 &&
3334	    !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3335		handle_stripe_expansion(conf, sh);
3336
3337finish:
3338	/* wait for this device to become unblocked */
3339	if (conf->mddev->external && unlikely(s.blocked_rdev))
3340		md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
 
 
 
 
 
 
 
 
 
 
3341
3342	if (s.handle_bad_blocks)
3343		for (i = disks; i--; ) {
3344			mdk_rdev_t *rdev;
3345			struct r5dev *dev = &sh->dev[i];
3346			if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3347				/* We own a safe reference to the rdev */
3348				rdev = conf->disks[i].rdev;
3349				if (!rdev_set_badblocks(rdev, sh->sector,
3350							STRIPE_SECTORS, 0))
3351					md_error(conf->mddev, rdev);
3352				rdev_dec_pending(rdev, conf->mddev);
3353			}
3354			if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3355				rdev = conf->disks[i].rdev;
3356				rdev_clear_badblocks(rdev, sh->sector,
3357						     STRIPE_SECTORS);
 
 
 
 
 
 
 
 
 
3358				rdev_dec_pending(rdev, conf->mddev);
3359			}
3360		}
3361
3362	if (s.ops_request)
3363		raid_run_ops(sh, s.ops_request);
3364
3365	ops_run_io(sh, &s);
3366
3367	if (s.dec_preread_active) {
3368		/* We delay this until after ops_run_io so that if make_request
3369		 * is waiting on a flush, it won't continue until the writes
3370		 * have actually been submitted.
3371		 */
3372		atomic_dec(&conf->preread_active_stripes);
3373		if (atomic_read(&conf->preread_active_stripes) <
3374		    IO_THRESHOLD)
3375			md_wakeup_thread(conf->mddev->thread);
3376	}
3377
3378	return_io(s.return_bi);
 
 
 
 
 
 
 
 
3379
3380	clear_bit(STRIPE_ACTIVE, &sh->state);
3381}
3382
3383static void raid5_activate_delayed(raid5_conf_t *conf)
3384{
3385	if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3386		while (!list_empty(&conf->delayed_list)) {
3387			struct list_head *l = conf->delayed_list.next;
3388			struct stripe_head *sh;
3389			sh = list_entry(l, struct stripe_head, lru);
3390			list_del_init(l);
3391			clear_bit(STRIPE_DELAYED, &sh->state);
3392			if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3393				atomic_inc(&conf->preread_active_stripes);
3394			list_add_tail(&sh->lru, &conf->hold_list);
 
3395		}
3396	}
3397}
3398
3399static void activate_bit_delay(raid5_conf_t *conf)
 
3400{
3401	/* device_lock is held */
3402	struct list_head head;
3403	list_add(&head, &conf->bitmap_list);
3404	list_del_init(&conf->bitmap_list);
3405	while (!list_empty(&head)) {
3406		struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
 
3407		list_del_init(&sh->lru);
3408		atomic_inc(&sh->count);
3409		__release_stripe(conf, sh);
 
3410	}
3411}
3412
3413int md_raid5_congested(mddev_t *mddev, int bits)
3414{
3415	raid5_conf_t *conf = mddev->private;
3416
3417	/* No difference between reads and writes.  Just check
3418	 * how busy the stripe_cache is
3419	 */
3420
3421	if (conf->inactive_blocked)
3422		return 1;
3423	if (conf->quiesce)
3424		return 1;
3425	if (list_empty_careful(&conf->inactive_list))
3426		return 1;
3427
3428	return 0;
3429}
3430EXPORT_SYMBOL_GPL(md_raid5_congested);
3431
3432static int raid5_congested(void *data, int bits)
3433{
3434	mddev_t *mddev = data;
3435
3436	return mddev_congested(mddev, bits) ||
3437		md_raid5_congested(mddev, bits);
3438}
3439
3440/* We want read requests to align with chunks where possible,
3441 * but write requests don't need to.
3442 */
3443static int raid5_mergeable_bvec(struct request_queue *q,
3444				struct bvec_merge_data *bvm,
3445				struct bio_vec *biovec)
3446{
3447	mddev_t *mddev = q->queuedata;
3448	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3449	int max;
3450	unsigned int chunk_sectors = mddev->chunk_sectors;
3451	unsigned int bio_sectors = bvm->bi_size >> 9;
3452
3453	if ((bvm->bi_rw & 1) == WRITE)
3454		return biovec->bv_len; /* always allow writes to be mergeable */
3455
3456	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3457		chunk_sectors = mddev->new_chunk_sectors;
3458	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3459	if (max < 0) max = 0;
3460	if (max <= biovec->bv_len && bio_sectors == 0)
3461		return biovec->bv_len;
3462	else
3463		return max;
3464}
3465
3466
3467static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3468{
3469	sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3470	unsigned int chunk_sectors = mddev->chunk_sectors;
3471	unsigned int bio_sectors = bio->bi_size >> 9;
3472
3473	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3474		chunk_sectors = mddev->new_chunk_sectors;
3475	return  chunk_sectors >=
3476		((sector & (chunk_sectors - 1)) + bio_sectors);
3477}
3478
3479/*
3480 *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
3481 *  later sampled by raid5d.
3482 */
3483static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3484{
3485	unsigned long flags;
3486
3487	spin_lock_irqsave(&conf->device_lock, flags);
3488
3489	bi->bi_next = conf->retry_read_aligned_list;
3490	conf->retry_read_aligned_list = bi;
3491
3492	spin_unlock_irqrestore(&conf->device_lock, flags);
3493	md_wakeup_thread(conf->mddev->thread);
3494}
3495
3496
3497static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3498{
3499	struct bio *bi;
3500
3501	bi = conf->retry_read_aligned;
3502	if (bi) {
3503		conf->retry_read_aligned = NULL;
3504		return bi;
3505	}
3506	bi = conf->retry_read_aligned_list;
3507	if(bi) {
3508		conf->retry_read_aligned_list = bi->bi_next;
3509		bi->bi_next = NULL;
3510		/*
3511		 * this sets the active strip count to 1 and the processed
3512		 * strip count to zero (upper 8 bits)
3513		 */
3514		bi->bi_phys_segments = 1; /* biased count of active stripes */
3515	}
3516
3517	return bi;
3518}
3519
3520
3521/*
3522 *  The "raid5_align_endio" should check if the read succeeded and if it
3523 *  did, call bio_endio on the original bio (having bio_put the new bio
3524 *  first).
3525 *  If the read failed..
3526 */
3527static void raid5_align_endio(struct bio *bi, int error)
3528{
3529	struct bio* raid_bi  = bi->bi_private;
3530	mddev_t *mddev;
3531	raid5_conf_t *conf;
3532	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3533	mdk_rdev_t *rdev;
3534
3535	bio_put(bi);
3536
3537	rdev = (void*)raid_bi->bi_next;
3538	raid_bi->bi_next = NULL;
3539	mddev = rdev->mddev;
3540	conf = mddev->private;
3541
3542	rdev_dec_pending(rdev, conf->mddev);
3543
3544	if (!error && uptodate) {
3545		bio_endio(raid_bi, 0);
 
 
3546		if (atomic_dec_and_test(&conf->active_aligned_reads))
3547			wake_up(&conf->wait_for_stripe);
3548		return;
3549	}
3550
3551
3552	pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3553
3554	add_bio_to_retry(raid_bi, conf);
3555}
3556
3557static int bio_fits_rdev(struct bio *bi)
3558{
3559	struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3560
3561	if ((bi->bi_size>>9) > queue_max_sectors(q))
3562		return 0;
3563	blk_recount_segments(q, bi);
3564	if (bi->bi_phys_segments > queue_max_segments(q))
3565		return 0;
3566
3567	if (q->merge_bvec_fn)
3568		/* it's too hard to apply the merge_bvec_fn at this stage,
3569		 * just just give up
3570		 */
3571		return 0;
3572
3573	return 1;
3574}
3575
3576
3577static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3578{
3579	raid5_conf_t *conf = mddev->private;
3580	int dd_idx;
3581	struct bio* align_bi;
3582	mdk_rdev_t *rdev;
 
3583
3584	if (!in_chunk_boundary(mddev, raid_bio)) {
3585		pr_debug("chunk_aligned_read : non aligned\n");
3586		return 0;
3587	}
3588	/*
3589	 * use bio_clone_mddev to make a copy of the bio
3590	 */
3591	align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3592	if (!align_bi)
3593		return 0;
3594	/*
3595	 *   set bi_end_io to a new function, and set bi_private to the
3596	 *     original bio.
3597	 */
3598	align_bi->bi_end_io  = raid5_align_endio;
3599	align_bi->bi_private = raid_bio;
3600	/*
3601	 *	compute position
3602	 */
3603	align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
3604						    0,
3605						    &dd_idx, NULL);
3606
 
3607	rcu_read_lock();
3608	rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3609	if (rdev && test_bit(In_sync, &rdev->flags)) {
 
 
 
 
 
 
 
 
 
3610		sector_t first_bad;
3611		int bad_sectors;
3612
3613		atomic_inc(&rdev->nr_pending);
3614		rcu_read_unlock();
3615		raid_bio->bi_next = (void*)rdev;
3616		align_bi->bi_bdev =  rdev->bdev;
3617		align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3618		align_bi->bi_sector += rdev->data_offset;
3619
3620		if (!bio_fits_rdev(align_bi) ||
3621		    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3622				&first_bad, &bad_sectors)) {
3623			/* too big in some way, or has a known bad block */
3624			bio_put(align_bi);
3625			rdev_dec_pending(rdev, mddev);
3626			return 0;
3627		}
3628
 
 
 
3629		spin_lock_irq(&conf->device_lock);
3630		wait_event_lock_irq(conf->wait_for_stripe,
3631				    conf->quiesce == 0,
3632				    conf->device_lock, /* nothing */);
3633		atomic_inc(&conf->active_aligned_reads);
3634		spin_unlock_irq(&conf->device_lock);
3635
 
 
 
 
3636		generic_make_request(align_bi);
3637		return 1;
3638	} else {
3639		rcu_read_unlock();
3640		bio_put(align_bi);
3641		return 0;
3642	}
3643}
3644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3645/* __get_priority_stripe - get the next stripe to process
3646 *
3647 * Full stripe writes are allowed to pass preread active stripes up until
3648 * the bypass_threshold is exceeded.  In general the bypass_count
3649 * increments when the handle_list is handled before the hold_list; however, it
3650 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3651 * stripe with in flight i/o.  The bypass_count will be reset when the
3652 * head of the hold_list has changed, i.e. the head was promoted to the
3653 * handle_list.
3654 */
3655static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3656{
3657	struct stripe_head *sh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3658
3659	pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3660		  __func__,
3661		  list_empty(&conf->handle_list) ? "empty" : "busy",
3662		  list_empty(&conf->hold_list) ? "empty" : "busy",
3663		  atomic_read(&conf->pending_full_writes), conf->bypass_count);
3664
3665	if (!list_empty(&conf->handle_list)) {
3666		sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3667
3668		if (list_empty(&conf->hold_list))
3669			conf->bypass_count = 0;
3670		else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3671			if (conf->hold_list.next == conf->last_hold)
3672				conf->bypass_count++;
3673			else {
3674				conf->last_hold = conf->hold_list.next;
3675				conf->bypass_count -= conf->bypass_threshold;
3676				if (conf->bypass_count < 0)
3677					conf->bypass_count = 0;
3678			}
3679		}
3680	} else if (!list_empty(&conf->hold_list) &&
3681		   ((conf->bypass_threshold &&
3682		     conf->bypass_count > conf->bypass_threshold) ||
3683		    atomic_read(&conf->pending_full_writes) == 0)) {
3684		sh = list_entry(conf->hold_list.next,
3685				typeof(*sh), lru);
3686		conf->bypass_count -= conf->bypass_threshold;
3687		if (conf->bypass_count < 0)
3688			conf->bypass_count = 0;
3689	} else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3690		return NULL;
3691
 
 
 
 
3692	list_del_init(&sh->lru);
3693	atomic_inc(&sh->count);
3694	BUG_ON(atomic_read(&sh->count) != 1);
3695	return sh;
3696}
3697
3698static int make_request(mddev_t *mddev, struct bio * bi)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3699{
3700	raid5_conf_t *conf = mddev->private;
3701	int dd_idx;
3702	sector_t new_sector;
3703	sector_t logical_sector, last_sector;
3704	struct stripe_head *sh;
3705	const int rw = bio_data_dir(bi);
3706	int remaining;
3707	int plugged;
 
3708
3709	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3710		md_flush_request(mddev, bi);
3711		return 0;
 
 
 
 
 
 
 
3712	}
3713
3714	md_write_start(mddev, bi);
3715
3716	if (rw == READ &&
3717	     mddev->reshape_position == MaxSector &&
3718	     chunk_aligned_read(mddev,bi))
3719		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
3720
3721	logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3722	last_sector = bi->bi_sector + (bi->bi_size>>9);
3723	bi->bi_next = NULL;
3724	bi->bi_phys_segments = 1;	/* over-loaded to count active stripes */
3725
3726	plugged = mddev_check_plugged(mddev);
3727	for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3728		DEFINE_WAIT(w);
3729		int disks, data_disks;
3730		int previous;
 
3731
 
3732	retry:
 
3733		previous = 0;
3734		disks = conf->raid_disks;
3735		prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
 
3736		if (unlikely(conf->reshape_progress != MaxSector)) {
3737			/* spinlock is needed as reshape_progress may be
3738			 * 64bit on a 32bit platform, and so it might be
3739			 * possible to see a half-updated value
3740			 * Of course reshape_progress could change after
3741			 * the lock is dropped, so once we get a reference
3742			 * to the stripe that we think it is, we will have
3743			 * to check again.
3744			 */
3745			spin_lock_irq(&conf->device_lock);
3746			if (mddev->delta_disks < 0
3747			    ? logical_sector < conf->reshape_progress
3748			    : logical_sector >= conf->reshape_progress) {
3749				disks = conf->previous_raid_disks;
3750				previous = 1;
3751			} else {
3752				if (mddev->delta_disks < 0
3753				    ? logical_sector < conf->reshape_safe
3754				    : logical_sector >= conf->reshape_safe) {
3755					spin_unlock_irq(&conf->device_lock);
3756					schedule();
 
3757					goto retry;
3758				}
3759			}
3760			spin_unlock_irq(&conf->device_lock);
3761		}
3762		data_disks = disks - conf->max_degraded;
3763
3764		new_sector = raid5_compute_sector(conf, logical_sector,
3765						  previous,
3766						  &dd_idx, NULL);
3767		pr_debug("raid456: make_request, sector %llu logical %llu\n",
3768			(unsigned long long)new_sector, 
3769			(unsigned long long)logical_sector);
3770
3771		sh = get_active_stripe(conf, new_sector, previous,
3772				       (bi->bi_rw&RWA_MASK), 0);
3773		if (sh) {
3774			if (unlikely(previous)) {
3775				/* expansion might have moved on while waiting for a
3776				 * stripe, so we must do the range check again.
3777				 * Expansion could still move past after this
3778				 * test, but as we are holding a reference to
3779				 * 'sh', we know that if that happens,
3780				 *  STRIPE_EXPANDING will get set and the expansion
3781				 * won't proceed until we finish with the stripe.
3782				 */
3783				int must_retry = 0;
3784				spin_lock_irq(&conf->device_lock);
3785				if (mddev->delta_disks < 0
3786				    ? logical_sector >= conf->reshape_progress
3787				    : logical_sector < conf->reshape_progress)
3788					/* mismatch, need to try again */
3789					must_retry = 1;
3790				spin_unlock_irq(&conf->device_lock);
3791				if (must_retry) {
3792					release_stripe(sh);
3793					schedule();
 
3794					goto retry;
3795				}
3796			}
 
 
 
 
 
 
 
3797
3798			if (rw == WRITE &&
3799			    logical_sector >= mddev->suspend_lo &&
3800			    logical_sector < mddev->suspend_hi) {
3801				release_stripe(sh);
3802				/* As the suspend_* range is controlled by
3803				 * userspace, we want an interruptible
3804				 * wait.
3805				 */
3806				flush_signals(current);
3807				prepare_to_wait(&conf->wait_for_overlap,
3808						&w, TASK_INTERRUPTIBLE);
3809				if (logical_sector >= mddev->suspend_lo &&
3810				    logical_sector < mddev->suspend_hi)
3811					schedule();
 
 
3812				goto retry;
3813			}
3814
3815			if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3816			    !add_stripe_bio(sh, bi, dd_idx, rw)) {
3817				/* Stripe is busy expanding or
3818				 * add failed due to overlap.  Flush everything
3819				 * and wait a while
3820				 */
3821				md_wakeup_thread(mddev->thread);
3822				release_stripe(sh);
3823				schedule();
 
3824				goto retry;
3825			}
3826			finish_wait(&conf->wait_for_overlap, &w);
3827			set_bit(STRIPE_HANDLE, &sh->state);
3828			clear_bit(STRIPE_DELAYED, &sh->state);
3829			if ((bi->bi_rw & REQ_SYNC) &&
 
3830			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3831				atomic_inc(&conf->preread_active_stripes);
3832			release_stripe(sh);
3833		} else {
3834			/* cannot get stripe for read-ahead, just give-up */
3835			clear_bit(BIO_UPTODATE, &bi->bi_flags);
3836			finish_wait(&conf->wait_for_overlap, &w);
3837			break;
3838		}
3839			
3840	}
3841	if (!plugged)
3842		md_wakeup_thread(mddev->thread);
3843
3844	spin_lock_irq(&conf->device_lock);
3845	remaining = raid5_dec_bi_phys_segments(bi);
3846	spin_unlock_irq(&conf->device_lock);
3847	if (remaining == 0) {
3848
3849		if ( rw == WRITE )
3850			md_write_end(mddev);
3851
3852		bio_endio(bi, 0);
 
 
3853	}
3854
3855	return 0;
3856}
3857
3858static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3859
3860static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
3861{
3862	/* reshaping is quite different to recovery/resync so it is
3863	 * handled quite separately ... here.
3864	 *
3865	 * On each call to sync_request, we gather one chunk worth of
3866	 * destination stripes and flag them as expanding.
3867	 * Then we find all the source stripes and request reads.
3868	 * As the reads complete, handle_stripe will copy the data
3869	 * into the destination stripe and release that stripe.
3870	 */
3871	raid5_conf_t *conf = mddev->private;
3872	struct stripe_head *sh;
3873	sector_t first_sector, last_sector;
3874	int raid_disks = conf->previous_raid_disks;
3875	int data_disks = raid_disks - conf->max_degraded;
3876	int new_data_disks = conf->raid_disks - conf->max_degraded;
3877	int i;
3878	int dd_idx;
3879	sector_t writepos, readpos, safepos;
3880	sector_t stripe_addr;
3881	int reshape_sectors;
3882	struct list_head stripes;
 
3883
3884	if (sector_nr == 0) {
3885		/* If restarting in the middle, skip the initial sectors */
3886		if (mddev->delta_disks < 0 &&
3887		    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3888			sector_nr = raid5_size(mddev, 0, 0)
3889				- conf->reshape_progress;
3890		} else if (mddev->delta_disks >= 0 &&
 
 
 
 
3891			   conf->reshape_progress > 0)
3892			sector_nr = conf->reshape_progress;
3893		sector_div(sector_nr, new_data_disks);
3894		if (sector_nr) {
3895			mddev->curr_resync_completed = sector_nr;
3896			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3897			*skipped = 1;
3898			return sector_nr;
 
3899		}
3900	}
3901
3902	/* We need to process a full chunk at a time.
3903	 * If old and new chunk sizes differ, we need to process the
3904	 * largest of these
3905	 */
3906	if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3907		reshape_sectors = mddev->new_chunk_sectors;
3908	else
3909		reshape_sectors = mddev->chunk_sectors;
3910
3911	/* we update the metadata when there is more than 3Meg
3912	 * in the block range (that is rather arbitrary, should
3913	 * probably be time based) or when the data about to be
3914	 * copied would over-write the source of the data at
3915	 * the front of the range.
3916	 * i.e. one new_stripe along from reshape_progress new_maps
3917	 * to after where reshape_safe old_maps to
3918	 */
3919	writepos = conf->reshape_progress;
3920	sector_div(writepos, new_data_disks);
3921	readpos = conf->reshape_progress;
3922	sector_div(readpos, data_disks);
3923	safepos = conf->reshape_safe;
3924	sector_div(safepos, data_disks);
3925	if (mddev->delta_disks < 0) {
3926		writepos -= min_t(sector_t, reshape_sectors, writepos);
 
3927		readpos += reshape_sectors;
3928		safepos += reshape_sectors;
3929	} else {
3930		writepos += reshape_sectors;
 
 
 
 
3931		readpos -= min_t(sector_t, reshape_sectors, readpos);
3932		safepos -= min_t(sector_t, reshape_sectors, safepos);
3933	}
3934
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3935	/* 'writepos' is the most advanced device address we might write.
3936	 * 'readpos' is the least advanced device address we might read.
3937	 * 'safepos' is the least address recorded in the metadata as having
3938	 *     been reshaped.
3939	 * If 'readpos' is behind 'writepos', then there is no way that we can
 
 
 
3940	 * ensure safety in the face of a crash - that must be done by userspace
3941	 * making a backup of the data.  So in that case there is no particular
3942	 * rush to update metadata.
3943	 * Otherwise if 'safepos' is behind 'writepos', then we really need to
3944	 * update the metadata to advance 'safepos' to match 'readpos' so that
3945	 * we can be safe in the event of a crash.
3946	 * So we insist on updating metadata if safepos is behind writepos and
3947	 * readpos is beyond writepos.
3948	 * In any case, update the metadata every 10 seconds.
3949	 * Maybe that number should be configurable, but I'm not sure it is
3950	 * worth it.... maybe it could be a multiple of safemode_delay???
3951	 */
3952	if ((mddev->delta_disks < 0
 
 
 
 
 
 
3953	     ? (safepos > writepos && readpos < writepos)
3954	     : (safepos < writepos && readpos > writepos)) ||
3955	    time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
3956		/* Cannot proceed until we've updated the superblock... */
3957		wait_event(conf->wait_for_overlap,
3958			   atomic_read(&conf->reshape_stripes)==0);
 
 
 
3959		mddev->reshape_position = conf->reshape_progress;
3960		mddev->curr_resync_completed = sector_nr;
3961		conf->reshape_checkpoint = jiffies;
3962		set_bit(MD_CHANGE_DEVS, &mddev->flags);
3963		md_wakeup_thread(mddev->thread);
3964		wait_event(mddev->sb_wait, mddev->flags == 0 ||
3965			   kthread_should_stop());
 
 
3966		spin_lock_irq(&conf->device_lock);
3967		conf->reshape_safe = mddev->reshape_position;
3968		spin_unlock_irq(&conf->device_lock);
3969		wake_up(&conf->wait_for_overlap);
3970		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3971	}
3972
3973	if (mddev->delta_disks < 0) {
3974		BUG_ON(conf->reshape_progress == 0);
3975		stripe_addr = writepos;
3976		BUG_ON((mddev->dev_sectors &
3977			~((sector_t)reshape_sectors - 1))
3978		       - reshape_sectors - stripe_addr
3979		       != sector_nr);
3980	} else {
3981		BUG_ON(writepos != sector_nr + reshape_sectors);
3982		stripe_addr = sector_nr;
3983	}
3984	INIT_LIST_HEAD(&stripes);
3985	for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
3986		int j;
3987		int skipped_disk = 0;
3988		sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
3989		set_bit(STRIPE_EXPANDING, &sh->state);
3990		atomic_inc(&conf->reshape_stripes);
3991		/* If any of this stripe is beyond the end of the old
3992		 * array, then we need to zero those blocks
3993		 */
3994		for (j=sh->disks; j--;) {
3995			sector_t s;
3996			if (j == sh->pd_idx)
3997				continue;
3998			if (conf->level == 6 &&
3999			    j == sh->qd_idx)
4000				continue;
4001			s = compute_blocknr(sh, j, 0);
4002			if (s < raid5_size(mddev, 0, 0)) {
4003				skipped_disk = 1;
4004				continue;
4005			}
4006			memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4007			set_bit(R5_Expanded, &sh->dev[j].flags);
4008			set_bit(R5_UPTODATE, &sh->dev[j].flags);
4009		}
4010		if (!skipped_disk) {
4011			set_bit(STRIPE_EXPAND_READY, &sh->state);
4012			set_bit(STRIPE_HANDLE, &sh->state);
4013		}
4014		list_add(&sh->lru, &stripes);
4015	}
4016	spin_lock_irq(&conf->device_lock);
4017	if (mddev->delta_disks < 0)
4018		conf->reshape_progress -= reshape_sectors * new_data_disks;
4019	else
4020		conf->reshape_progress += reshape_sectors * new_data_disks;
4021	spin_unlock_irq(&conf->device_lock);
4022	/* Ok, those stripe are ready. We can start scheduling
4023	 * reads on the source stripes.
4024	 * The source stripes are determined by mapping the first and last
4025	 * block on the destination stripes.
4026	 */
4027	first_sector =
4028		raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4029				     1, &dd_idx, NULL);
4030	last_sector =
4031		raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4032					    * new_data_disks - 1),
4033				     1, &dd_idx, NULL);
4034	if (last_sector >= mddev->dev_sectors)
4035		last_sector = mddev->dev_sectors - 1;
4036	while (first_sector <= last_sector) {
4037		sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4038		set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4039		set_bit(STRIPE_HANDLE, &sh->state);
4040		release_stripe(sh);
4041		first_sector += STRIPE_SECTORS;
4042	}
4043	/* Now that the sources are clearly marked, we can release
4044	 * the destination stripes
4045	 */
4046	while (!list_empty(&stripes)) {
4047		sh = list_entry(stripes.next, struct stripe_head, lru);
4048		list_del_init(&sh->lru);
4049		release_stripe(sh);
4050	}
4051	/* If this takes us to the resync_max point where we have to pause,
4052	 * then we need to write out the superblock.
4053	 */
4054	sector_nr += reshape_sectors;
4055	if ((sector_nr - mddev->curr_resync_completed) * 2
 
 
 
4056	    >= mddev->resync_max - mddev->curr_resync_completed) {
4057		/* Cannot proceed until we've updated the superblock... */
4058		wait_event(conf->wait_for_overlap,
4059			   atomic_read(&conf->reshape_stripes) == 0);
 
 
 
4060		mddev->reshape_position = conf->reshape_progress;
4061		mddev->curr_resync_completed = sector_nr;
4062		conf->reshape_checkpoint = jiffies;
4063		set_bit(MD_CHANGE_DEVS, &mddev->flags);
4064		md_wakeup_thread(mddev->thread);
4065		wait_event(mddev->sb_wait,
4066			   !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4067			   || kthread_should_stop());
 
 
4068		spin_lock_irq(&conf->device_lock);
4069		conf->reshape_safe = mddev->reshape_position;
4070		spin_unlock_irq(&conf->device_lock);
4071		wake_up(&conf->wait_for_overlap);
4072		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4073	}
4074	return reshape_sectors;
 
4075}
4076
4077/* FIXME go_faster isn't used */
4078static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4079{
4080	raid5_conf_t *conf = mddev->private;
4081	struct stripe_head *sh;
4082	sector_t max_sector = mddev->dev_sectors;
4083	sector_t sync_blocks;
4084	int still_degraded = 0;
4085	int i;
4086
4087	if (sector_nr >= max_sector) {
4088		/* just being told to finish up .. nothing much to do */
4089
4090		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4091			end_reshape(conf);
4092			return 0;
4093		}
4094
4095		if (mddev->curr_resync < max_sector) /* aborted */
4096			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4097					&sync_blocks, 1);
4098		else /* completed sync */
4099			conf->fullsync = 0;
4100		bitmap_close_sync(mddev->bitmap);
4101
4102		return 0;
4103	}
4104
4105	/* Allow raid5_quiesce to complete */
4106	wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4107
4108	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4109		return reshape_request(mddev, sector_nr, skipped);
4110
4111	/* No need to check resync_max as we never do more than one
4112	 * stripe, and as resync_max will always be on a chunk boundary,
4113	 * if the check in md_do_sync didn't fire, there is no chance
4114	 * of overstepping resync_max here
4115	 */
4116
4117	/* if there is too many failed drives and we are trying
4118	 * to resync, then assert that we are finished, because there is
4119	 * nothing we can do.
4120	 */
4121	if (mddev->degraded >= conf->max_degraded &&
4122	    test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4123		sector_t rv = mddev->dev_sectors - sector_nr;
4124		*skipped = 1;
4125		return rv;
4126	}
4127	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4128	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4129	    !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
 
4130		/* we can skip this block, and probably more */
4131		sync_blocks /= STRIPE_SECTORS;
4132		*skipped = 1;
4133		return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4134	}
4135
 
4136
4137	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4138
4139	sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4140	if (sh == NULL) {
4141		sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4142		/* make sure we don't swamp the stripe cache if someone else
4143		 * is trying to get access
4144		 */
4145		schedule_timeout_uninterruptible(1);
4146	}
4147	/* Need to check if array will still be degraded after recovery/resync
4148	 * We don't need to check the 'failed' flag as when that gets set,
4149	 * recovery aborts.
4150	 */
4151	for (i = 0; i < conf->raid_disks; i++)
4152		if (conf->disks[i].rdev == NULL)
 
 
 
4153			still_degraded = 1;
 
 
4154
4155	bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4156
4157	set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
 
4158
4159	handle_stripe(sh);
4160	release_stripe(sh);
4161
4162	return STRIPE_SECTORS;
4163}
4164
4165static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4166{
4167	/* We may not be able to submit a whole bio at once as there
4168	 * may not be enough stripe_heads available.
4169	 * We cannot pre-allocate enough stripe_heads as we may need
4170	 * more than exist in the cache (if we allow ever large chunks).
4171	 * So we do one stripe head at a time and record in
4172	 * ->bi_hw_segments how many have been done.
4173	 *
4174	 * We *know* that this entire raid_bio is in one chunk, so
4175	 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4176	 */
4177	struct stripe_head *sh;
4178	int dd_idx;
4179	sector_t sector, logical_sector, last_sector;
4180	int scnt = 0;
4181	int remaining;
4182	int handled = 0;
4183
4184	logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
 
4185	sector = raid5_compute_sector(conf, logical_sector,
4186				      0, &dd_idx, NULL);
4187	last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4188
4189	for (; logical_sector < last_sector;
4190	     logical_sector += STRIPE_SECTORS,
4191		     sector += STRIPE_SECTORS,
4192		     scnt++) {
4193
4194		if (scnt < raid5_bi_hw_segments(raid_bio))
4195			/* already done this stripe */
4196			continue;
4197
4198		sh = get_active_stripe(conf, sector, 0, 1, 0);
4199
4200		if (!sh) {
4201			/* failed to get a stripe - must wait */
4202			raid5_set_bi_hw_segments(raid_bio, scnt);
4203			conf->retry_read_aligned = raid_bio;
4204			return handled;
4205		}
4206
4207		set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
4208		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4209			release_stripe(sh);
4210			raid5_set_bi_hw_segments(raid_bio, scnt);
4211			conf->retry_read_aligned = raid_bio;
4212			return handled;
4213		}
4214
 
4215		handle_stripe(sh);
4216		release_stripe(sh);
4217		handled++;
4218	}
4219	spin_lock_irq(&conf->device_lock);
4220	remaining = raid5_dec_bi_phys_segments(raid_bio);
4221	spin_unlock_irq(&conf->device_lock);
4222	if (remaining == 0)
4223		bio_endio(raid_bio, 0);
 
4224	if (atomic_dec_and_test(&conf->active_aligned_reads))
4225		wake_up(&conf->wait_for_stripe);
4226	return handled;
4227}
4228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4229
4230/*
4231 * This is our raid5 kernel thread.
4232 *
4233 * We scan the hash table for stripes which can be handled now.
4234 * During the scan, completed stripes are saved for us by the interrupt
4235 * handler, so that they will not have to wait for our next wakeup.
4236 */
4237static void raid5d(mddev_t *mddev)
4238{
4239	struct stripe_head *sh;
4240	raid5_conf_t *conf = mddev->private;
4241	int handled;
4242	struct blk_plug plug;
4243
4244	pr_debug("+++ raid5d active\n");
4245
4246	md_check_recovery(mddev);
4247
 
 
 
 
 
 
 
 
 
 
 
 
4248	blk_start_plug(&plug);
4249	handled = 0;
4250	spin_lock_irq(&conf->device_lock);
4251	while (1) {
4252		struct bio *bio;
 
4253
4254		if (atomic_read(&mddev->plug_cnt) == 0 &&
 
 
 
 
4255		    !list_empty(&conf->bitmap_list)) {
4256			/* Now is a good time to flush some bitmap updates */
4257			conf->seq_flush++;
4258			spin_unlock_irq(&conf->device_lock);
4259			bitmap_unplug(mddev->bitmap);
4260			spin_lock_irq(&conf->device_lock);
4261			conf->seq_write = conf->seq_flush;
4262			activate_bit_delay(conf);
4263		}
4264		if (atomic_read(&mddev->plug_cnt) == 0)
4265			raid5_activate_delayed(conf);
4266
4267		while ((bio = remove_bio_from_retry(conf))) {
4268			int ok;
4269			spin_unlock_irq(&conf->device_lock);
4270			ok = retry_aligned_read(conf, bio);
4271			spin_lock_irq(&conf->device_lock);
4272			if (!ok)
4273				break;
4274			handled++;
4275		}
4276
4277		sh = __get_priority_stripe(conf);
4278
4279		if (!sh)
4280			break;
4281		spin_unlock_irq(&conf->device_lock);
4282		
4283		handled++;
4284		handle_stripe(sh);
4285		release_stripe(sh);
4286		cond_resched();
4287
4288		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
 
4289			md_check_recovery(mddev);
4290
4291		spin_lock_irq(&conf->device_lock);
4292	}
4293	pr_debug("%d stripes handled\n", handled);
4294
4295	spin_unlock_irq(&conf->device_lock);
 
 
 
 
 
 
 
 
 
 
 
4296
4297	async_tx_issue_pending_all();
4298	blk_finish_plug(&plug);
4299
4300	pr_debug("--- raid5d inactive\n");
4301}
4302
4303static ssize_t
4304raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4305{
4306	raid5_conf_t *conf = mddev->private;
 
 
 
4307	if (conf)
4308		return sprintf(page, "%d\n", conf->max_nr_stripes);
4309	else
4310		return 0;
4311}
4312
4313int
4314raid5_set_cache_size(mddev_t *mddev, int size)
4315{
4316	raid5_conf_t *conf = mddev->private;
4317	int err;
4318
4319	if (size <= 16 || size > 32768)
4320		return -EINVAL;
4321	while (size < conf->max_nr_stripes) {
4322		if (drop_one_stripe(conf))
4323			conf->max_nr_stripes--;
4324		else
4325			break;
4326	}
 
 
 
4327	err = md_allow_write(mddev);
4328	if (err)
4329		return err;
4330	while (size > conf->max_nr_stripes) {
4331		if (grow_one_stripe(conf))
4332			conf->max_nr_stripes++;
4333		else break;
4334	}
 
 
4335	return 0;
4336}
4337EXPORT_SYMBOL(raid5_set_cache_size);
4338
4339static ssize_t
4340raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4341{
4342	raid5_conf_t *conf = mddev->private;
4343	unsigned long new;
4344	int err;
4345
4346	if (len >= PAGE_SIZE)
4347		return -EINVAL;
4348	if (!conf)
4349		return -ENODEV;
4350
4351	if (strict_strtoul(page, 10, &new))
4352		return -EINVAL;
4353	err = raid5_set_cache_size(mddev, new);
4354	if (err)
4355		return err;
4356	return len;
 
 
 
 
 
 
 
4357}
4358
4359static struct md_sysfs_entry
4360raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4361				raid5_show_stripe_cache_size,
4362				raid5_store_stripe_cache_size);
4363
4364static ssize_t
4365raid5_show_preread_threshold(mddev_t *mddev, char *page)
4366{
4367	raid5_conf_t *conf = mddev->private;
4368	if (conf)
4369		return sprintf(page, "%d\n", conf->bypass_threshold);
4370	else
4371		return 0;
4372}
4373
4374static ssize_t
4375raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4376{
4377	raid5_conf_t *conf = mddev->private;
4378	unsigned long new;
 
 
 
 
4379	if (len >= PAGE_SIZE)
4380		return -EINVAL;
4381	if (!conf)
4382		return -ENODEV;
4383
4384	if (strict_strtoul(page, 10, &new))
 
 
 
4385		return -EINVAL;
4386	if (new > conf->max_nr_stripes)
 
 
 
4387		return -EINVAL;
4388	conf->bypass_threshold = new;
 
4389	return len;
4390}
4391
4392static struct md_sysfs_entry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4393raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4394					S_IRUGO | S_IWUSR,
4395					raid5_show_preread_threshold,
4396					raid5_store_preread_threshold);
4397
4398static ssize_t
4399stripe_cache_active_show(mddev_t *mddev, char *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4400{
4401	raid5_conf_t *conf = mddev->private;
4402	if (conf)
4403		return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4404	else
4405		return 0;
4406}
4407
4408static struct md_sysfs_entry
4409raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4411static struct attribute *raid5_attrs[] =  {
4412	&raid5_stripecache_size.attr,
4413	&raid5_stripecache_active.attr,
4414	&raid5_preread_bypass_threshold.attr,
 
 
 
4415	NULL,
4416};
4417static struct attribute_group raid5_attrs_group = {
4418	.name = NULL,
4419	.attrs = raid5_attrs,
4420};
4421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4422static sector_t
4423raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4424{
4425	raid5_conf_t *conf = mddev->private;
4426
4427	if (!sectors)
4428		sectors = mddev->dev_sectors;
4429	if (!raid_disks)
4430		/* size is defined by the smallest of previous and new size */
4431		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4432
4433	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4434	sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4435	return sectors * (raid_disks - conf->max_degraded);
4436}
4437
4438static void raid5_free_percpu(raid5_conf_t *conf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4439{
4440	struct raid5_percpu *percpu;
4441	unsigned long cpu;
4442
4443	if (!conf->percpu)
4444		return;
4445
4446	get_online_cpus();
4447	for_each_possible_cpu(cpu) {
4448		percpu = per_cpu_ptr(conf->percpu, cpu);
4449		safe_put_page(percpu->spare_page);
4450		kfree(percpu->scribble);
4451	}
4452#ifdef CONFIG_HOTPLUG_CPU
4453	unregister_cpu_notifier(&conf->cpu_notify);
4454#endif
 
 
 
 
4455	put_online_cpus();
4456
4457	free_percpu(conf->percpu);
4458}
4459
4460static void free_conf(raid5_conf_t *conf)
4461{
 
 
 
 
 
 
4462	shrink_stripes(conf);
4463	raid5_free_percpu(conf);
4464	kfree(conf->disks);
4465	kfree(conf->stripe_hashtbl);
4466	kfree(conf);
4467}
4468
4469#ifdef CONFIG_HOTPLUG_CPU
4470static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4471			      void *hcpu)
4472{
4473	raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4474	long cpu = (long)hcpu;
4475	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4476
4477	switch (action) {
4478	case CPU_UP_PREPARE:
4479	case CPU_UP_PREPARE_FROZEN:
4480		if (conf->level == 6 && !percpu->spare_page)
4481			percpu->spare_page = alloc_page(GFP_KERNEL);
4482		if (!percpu->scribble)
4483			percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4484
4485		if (!percpu->scribble ||
4486		    (conf->level == 6 && !percpu->spare_page)) {
4487			safe_put_page(percpu->spare_page);
4488			kfree(percpu->scribble);
4489			pr_err("%s: failed memory allocation for cpu%ld\n",
4490			       __func__, cpu);
4491			return notifier_from_errno(-ENOMEM);
4492		}
4493		break;
4494	case CPU_DEAD:
4495	case CPU_DEAD_FROZEN:
4496		safe_put_page(percpu->spare_page);
4497		kfree(percpu->scribble);
4498		percpu->spare_page = NULL;
4499		percpu->scribble = NULL;
4500		break;
4501	default:
4502		break;
4503	}
4504	return NOTIFY_OK;
4505}
4506#endif
4507
4508static int raid5_alloc_percpu(raid5_conf_t *conf)
4509{
4510	unsigned long cpu;
4511	struct page *spare_page;
4512	struct raid5_percpu __percpu *allcpus;
4513	void *scribble;
4514	int err;
4515
4516	allcpus = alloc_percpu(struct raid5_percpu);
4517	if (!allcpus)
4518		return -ENOMEM;
4519	conf->percpu = allcpus;
 
 
 
 
 
 
 
4520
4521	get_online_cpus();
4522	err = 0;
4523	for_each_present_cpu(cpu) {
4524		if (conf->level == 6) {
4525			spare_page = alloc_page(GFP_KERNEL);
4526			if (!spare_page) {
4527				err = -ENOMEM;
4528				break;
4529			}
4530			per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4531		}
4532		scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4533		if (!scribble) {
4534			err = -ENOMEM;
4535			break;
4536		}
4537		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4538	}
4539#ifdef CONFIG_HOTPLUG_CPU
4540	conf->cpu_notify.notifier_call = raid456_cpu_notify;
4541	conf->cpu_notify.priority = 0;
4542	if (err == 0)
4543		err = register_cpu_notifier(&conf->cpu_notify);
4544#endif
4545	put_online_cpus();
4546
 
 
 
 
 
 
4547	return err;
4548}
4549
4550static raid5_conf_t *setup_conf(mddev_t *mddev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4551{
4552	raid5_conf_t *conf;
 
 
 
 
 
 
 
 
 
 
4553	int raid_disk, memory, max_disks;
4554	mdk_rdev_t *rdev;
4555	struct disk_info *disk;
 
 
 
 
4556
4557	if (mddev->new_level != 5
4558	    && mddev->new_level != 4
4559	    && mddev->new_level != 6) {
4560		printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4561		       mdname(mddev), mddev->new_level);
4562		return ERR_PTR(-EIO);
4563	}
4564	if ((mddev->new_level == 5
4565	     && !algorithm_valid_raid5(mddev->new_layout)) ||
4566	    (mddev->new_level == 6
4567	     && !algorithm_valid_raid6(mddev->new_layout))) {
4568		printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4569		       mdname(mddev), mddev->new_layout);
4570		return ERR_PTR(-EIO);
4571	}
4572	if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4573		printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4574		       mdname(mddev), mddev->raid_disks);
4575		return ERR_PTR(-EINVAL);
4576	}
4577
4578	if (!mddev->new_chunk_sectors ||
4579	    (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4580	    !is_power_of_2(mddev->new_chunk_sectors)) {
4581		printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4582		       mdname(mddev), mddev->new_chunk_sectors << 9);
4583		return ERR_PTR(-EINVAL);
4584	}
4585
4586	conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4587	if (conf == NULL)
4588		goto abort;
 
 
 
 
 
 
 
 
4589	spin_lock_init(&conf->device_lock);
 
 
 
4590	init_waitqueue_head(&conf->wait_for_stripe);
4591	init_waitqueue_head(&conf->wait_for_overlap);
4592	INIT_LIST_HEAD(&conf->handle_list);
4593	INIT_LIST_HEAD(&conf->hold_list);
4594	INIT_LIST_HEAD(&conf->delayed_list);
4595	INIT_LIST_HEAD(&conf->bitmap_list);
4596	INIT_LIST_HEAD(&conf->inactive_list);
 
4597	atomic_set(&conf->active_stripes, 0);
4598	atomic_set(&conf->preread_active_stripes, 0);
4599	atomic_set(&conf->active_aligned_reads, 0);
4600	conf->bypass_threshold = BYPASS_THRESHOLD;
 
4601
4602	conf->raid_disks = mddev->raid_disks;
4603	if (mddev->reshape_position == MaxSector)
4604		conf->previous_raid_disks = mddev->raid_disks;
4605	else
4606		conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4607	max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4608	conf->scribble_len = scribble_len(max_disks);
4609
4610	conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4611			      GFP_KERNEL);
4612	if (!conf->disks)
4613		goto abort;
4614
4615	conf->mddev = mddev;
4616
4617	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4618		goto abort;
4619
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4620	conf->level = mddev->new_level;
 
4621	if (raid5_alloc_percpu(conf) != 0)
4622		goto abort;
4623
4624	pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4625
4626	list_for_each_entry(rdev, &mddev->disks, same_set) {
4627		raid_disk = rdev->raid_disk;
4628		if (raid_disk >= max_disks
4629		    || raid_disk < 0)
4630			continue;
4631		disk = conf->disks + raid_disk;
4632
4633		disk->rdev = rdev;
 
 
 
 
 
 
 
 
4634
4635		if (test_bit(In_sync, &rdev->flags)) {
4636			char b[BDEVNAME_SIZE];
4637			printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4638			       " disk %d\n",
4639			       mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4640		} else if (rdev->saved_raid_disk != raid_disk)
4641			/* Cannot rely on bitmap to complete recovery */
4642			conf->fullsync = 1;
4643	}
4644
4645	conf->chunk_sectors = mddev->new_chunk_sectors;
4646	conf->level = mddev->new_level;
4647	if (conf->level == 6)
4648		conf->max_degraded = 2;
4649	else
 
 
 
 
4650		conf->max_degraded = 1;
 
 
4651	conf->algorithm = mddev->new_layout;
4652	conf->max_nr_stripes = NR_STRIPES;
4653	conf->reshape_progress = mddev->reshape_position;
4654	if (conf->reshape_progress != MaxSector) {
4655		conf->prev_chunk_sectors = mddev->chunk_sectors;
4656		conf->prev_algo = mddev->layout;
 
 
 
4657	}
4658
4659	memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
 
4660		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4661	if (grow_stripes(conf, conf->max_nr_stripes)) {
 
4662		printk(KERN_ERR
4663		       "md/raid:%s: couldn't allocate %dkB for buffers\n",
4664		       mdname(mddev), memory);
4665		goto abort;
4666	} else
4667		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4668		       mdname(mddev), memory);
 
 
 
 
 
 
 
 
 
 
 
4669
4670	conf->thread = md_register_thread(raid5d, mddev, NULL);
 
4671	if (!conf->thread) {
4672		printk(KERN_ERR
4673		       "md/raid:%s: couldn't allocate thread.\n",
4674		       mdname(mddev));
4675		goto abort;
4676	}
4677
4678	return conf;
4679
4680 abort:
4681	if (conf) {
4682		free_conf(conf);
4683		return ERR_PTR(-EIO);
4684	} else
4685		return ERR_PTR(-ENOMEM);
4686}
4687
4688
4689static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4690{
4691	switch (algo) {
4692	case ALGORITHM_PARITY_0:
4693		if (raid_disk < max_degraded)
4694			return 1;
4695		break;
4696	case ALGORITHM_PARITY_N:
4697		if (raid_disk >= raid_disks - max_degraded)
4698			return 1;
4699		break;
4700	case ALGORITHM_PARITY_0_6:
4701		if (raid_disk == 0 || 
4702		    raid_disk == raid_disks - 1)
4703			return 1;
4704		break;
4705	case ALGORITHM_LEFT_ASYMMETRIC_6:
4706	case ALGORITHM_RIGHT_ASYMMETRIC_6:
4707	case ALGORITHM_LEFT_SYMMETRIC_6:
4708	case ALGORITHM_RIGHT_SYMMETRIC_6:
4709		if (raid_disk == raid_disks - 1)
4710			return 1;
4711	}
4712	return 0;
4713}
4714
4715static int run(mddev_t *mddev)
4716{
4717	raid5_conf_t *conf;
4718	int working_disks = 0;
4719	int dirty_parity_disks = 0;
4720	mdk_rdev_t *rdev;
 
4721	sector_t reshape_offset = 0;
 
 
 
4722
4723	if (mddev->recovery_cp != MaxSector)
4724		printk(KERN_NOTICE "md/raid:%s: not clean"
4725		       " -- starting background reconstruction\n",
4726		       mdname(mddev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4727	if (mddev->reshape_position != MaxSector) {
4728		/* Check that we can continue the reshape.
4729		 * Currently only disks can change, it must
4730		 * increase, and we must be past the point where
4731		 * a stripe over-writes itself
 
 
 
 
 
 
 
4732		 */
4733		sector_t here_new, here_old;
4734		int old_disks;
4735		int max_degraded = (mddev->level == 6 ? 2 : 1);
 
 
 
 
 
 
 
 
4736
4737		if (mddev->new_level != mddev->level) {
4738			printk(KERN_ERR "md/raid:%s: unsupported reshape "
4739			       "required - aborting.\n",
4740			       mdname(mddev));
4741			return -EINVAL;
4742		}
4743		old_disks = mddev->raid_disks - mddev->delta_disks;
4744		/* reshape_position must be on a new-stripe boundary, and one
4745		 * further up in new geometry must map after here in old
4746		 * geometry.
 
 
 
4747		 */
4748		here_new = mddev->reshape_position;
4749		if (sector_div(here_new, mddev->new_chunk_sectors *
4750			       (mddev->raid_disks - max_degraded))) {
 
4751			printk(KERN_ERR "md/raid:%s: reshape_position not "
4752			       "on a stripe boundary\n", mdname(mddev));
4753			return -EINVAL;
4754		}
4755		reshape_offset = here_new * mddev->new_chunk_sectors;
4756		/* here_new is the stripe we will write to */
4757		here_old = mddev->reshape_position;
4758		sector_div(here_old, mddev->chunk_sectors *
4759			   (old_disks-max_degraded));
4760		/* here_old is the first stripe that we might need to read
4761		 * from */
4762		if (mddev->delta_disks == 0) {
4763			/* We cannot be sure it is safe to start an in-place
4764			 * reshape.  It is only safe if user-space if monitoring
4765			 * and taking constant backups.
4766			 * mdadm always starts a situation like this in
4767			 * readonly mode so it can take control before
4768			 * allowing any writes.  So just check for that.
4769			 */
4770			if ((here_new * mddev->new_chunk_sectors != 
4771			     here_old * mddev->chunk_sectors) ||
4772			    mddev->ro == 0) {
4773				printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
4774				       " in read-only mode - aborting\n",
 
 
4775				       mdname(mddev));
4776				return -EINVAL;
4777			}
4778		} else if (mddev->delta_disks < 0
4779		    ? (here_new * mddev->new_chunk_sectors <=
4780		       here_old * mddev->chunk_sectors)
4781		    : (here_new * mddev->new_chunk_sectors >=
4782		       here_old * mddev->chunk_sectors)) {
4783			/* Reading from the same stripe as writing to - bad */
4784			printk(KERN_ERR "md/raid:%s: reshape_position too early for "
4785			       "auto-recovery - aborting.\n",
4786			       mdname(mddev));
4787			return -EINVAL;
4788		}
4789		printk(KERN_INFO "md/raid:%s: reshape will continue\n",
4790		       mdname(mddev));
4791		/* OK, we should be able to continue; */
4792	} else {
4793		BUG_ON(mddev->level != mddev->new_level);
4794		BUG_ON(mddev->layout != mddev->new_layout);
4795		BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4796		BUG_ON(mddev->delta_disks != 0);
4797	}
4798
4799	if (mddev->private == NULL)
4800		conf = setup_conf(mddev);
4801	else
4802		conf = mddev->private;
4803
4804	if (IS_ERR(conf))
4805		return PTR_ERR(conf);
4806
 
 
 
 
 
 
 
 
4807	mddev->thread = conf->thread;
4808	conf->thread = NULL;
4809	mddev->private = conf;
4810
4811	/*
4812	 * 0 for a fully functional array, 1 or 2 for a degraded array.
4813	 */
4814	list_for_each_entry(rdev, &mddev->disks, same_set) {
4815		if (rdev->raid_disk < 0)
 
 
 
 
 
 
4816			continue;
 
 
 
 
 
 
 
4817		if (test_bit(In_sync, &rdev->flags)) {
4818			working_disks++;
4819			continue;
4820		}
4821		/* This disc is not fully in-sync.  However if it
4822		 * just stored parity (beyond the recovery_offset),
4823		 * when we don't need to be concerned about the
4824		 * array being dirty.
4825		 * When reshape goes 'backwards', we never have
4826		 * partially completed devices, so we only need
4827		 * to worry about reshape going forwards.
4828		 */
4829		/* Hack because v0.91 doesn't store recovery_offset properly. */
4830		if (mddev->major_version == 0 &&
4831		    mddev->minor_version > 90)
4832			rdev->recovery_offset = reshape_offset;
4833			
4834		if (rdev->recovery_offset < reshape_offset) {
4835			/* We need to check old and new layout */
4836			if (!only_parity(rdev->raid_disk,
4837					 conf->algorithm,
4838					 conf->raid_disks,
4839					 conf->max_degraded))
4840				continue;
4841		}
4842		if (!only_parity(rdev->raid_disk,
4843				 conf->prev_algo,
4844				 conf->previous_raid_disks,
4845				 conf->max_degraded))
4846			continue;
4847		dirty_parity_disks++;
4848	}
4849
4850	mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4851			   - working_disks);
 
 
4852
4853	if (has_failed(conf)) {
4854		printk(KERN_ERR "md/raid:%s: not enough operational devices"
4855			" (%d/%d failed)\n",
4856			mdname(mddev), mddev->degraded, conf->raid_disks);
4857		goto abort;
4858	}
4859
4860	/* device size must be a multiple of chunk size */
4861	mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4862	mddev->resync_max_sectors = mddev->dev_sectors;
4863
4864	if (mddev->degraded > dirty_parity_disks &&
4865	    mddev->recovery_cp != MaxSector) {
4866		if (mddev->ok_start_degraded)
4867			printk(KERN_WARNING
4868			       "md/raid:%s: starting dirty degraded array"
4869			       " - data corruption possible.\n",
4870			       mdname(mddev));
4871		else {
4872			printk(KERN_ERR
4873			       "md/raid:%s: cannot start dirty degraded array.\n",
4874			       mdname(mddev));
4875			goto abort;
4876		}
4877	}
4878
4879	if (mddev->degraded == 0)
4880		printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
4881		       " devices, algorithm %d\n", mdname(mddev), conf->level,
4882		       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4883		       mddev->new_layout);
4884	else
4885		printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
4886		       " out of %d devices, algorithm %d\n",
4887		       mdname(mddev), conf->level,
4888		       mddev->raid_disks - mddev->degraded,
4889		       mddev->raid_disks, mddev->new_layout);
4890
4891	print_raid5_conf(conf);
4892
4893	if (conf->reshape_progress != MaxSector) {
4894		conf->reshape_safe = conf->reshape_progress;
4895		atomic_set(&conf->reshape_stripes, 0);
4896		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4897		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4898		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4899		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4900		mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4901							"reshape");
4902	}
4903
4904
4905	/* Ok, everything is just fine now */
4906	if (mddev->to_remove == &raid5_attrs_group)
4907		mddev->to_remove = NULL;
4908	else if (mddev->kobj.sd &&
4909	    sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4910		printk(KERN_WARNING
4911		       "raid5: failed to create sysfs attributes for %s\n",
4912		       mdname(mddev));
4913	md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
4914
4915	if (mddev->queue) {
4916		int chunk_size;
 
4917		/* read-ahead size must cover two whole stripes, which
4918		 * is 2 * (datadisks) * chunksize where 'n' is the
4919		 * number of raid devices
4920		 */
4921		int data_disks = conf->previous_raid_disks - conf->max_degraded;
4922		int stripe = data_disks *
4923			((mddev->chunk_sectors << 9) / PAGE_SIZE);
4924		if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4925			mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4926
4927		blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4928
4929		mddev->queue->backing_dev_info.congested_data = mddev;
4930		mddev->queue->backing_dev_info.congested_fn = raid5_congested;
4931
4932		chunk_size = mddev->chunk_sectors << 9;
4933		blk_queue_io_min(mddev->queue, chunk_size);
4934		blk_queue_io_opt(mddev->queue, chunk_size *
4935				 (conf->raid_disks - conf->max_degraded));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4936
4937		list_for_each_entry(rdev, &mddev->disks, same_set)
4938			disk_stack_limits(mddev->gendisk, rdev->bdev,
4939					  rdev->data_offset << 9);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4940	}
4941
4942	return 0;
4943abort:
4944	md_unregister_thread(&mddev->thread);
4945	if (conf) {
4946		print_raid5_conf(conf);
4947		free_conf(conf);
4948	}
4949	mddev->private = NULL;
4950	printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
4951	return -EIO;
4952}
4953
4954static int stop(mddev_t *mddev)
4955{
4956	raid5_conf_t *conf = mddev->private;
4957
4958	md_unregister_thread(&mddev->thread);
4959	if (mddev->queue)
4960		mddev->queue->backing_dev_info.congested_fn = NULL;
4961	free_conf(conf);
4962	mddev->private = NULL;
4963	mddev->to_remove = &raid5_attrs_group;
4964	return 0;
4965}
4966
4967#ifdef DEBUG
4968static void print_sh(struct seq_file *seq, struct stripe_head *sh)
4969{
4970	int i;
4971
4972	seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4973		   (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4974	seq_printf(seq, "sh %llu,  count %d.\n",
4975		   (unsigned long long)sh->sector, atomic_read(&sh->count));
4976	seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
4977	for (i = 0; i < sh->disks; i++) {
4978		seq_printf(seq, "(cache%d: %p %ld) ",
4979			   i, sh->dev[i].page, sh->dev[i].flags);
4980	}
4981	seq_printf(seq, "\n");
4982}
4983
4984static void printall(struct seq_file *seq, raid5_conf_t *conf)
4985{
4986	struct stripe_head *sh;
4987	struct hlist_node *hn;
4988	int i;
4989
4990	spin_lock_irq(&conf->device_lock);
4991	for (i = 0; i < NR_HASH; i++) {
4992		hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
4993			if (sh->raid_conf != conf)
4994				continue;
4995			print_sh(seq, sh);
4996		}
4997	}
4998	spin_unlock_irq(&conf->device_lock);
4999}
5000#endif
5001
5002static void status(struct seq_file *seq, mddev_t *mddev)
5003{
5004	raid5_conf_t *conf = mddev->private;
5005	int i;
5006
5007	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
5008		mddev->chunk_sectors / 2, mddev->layout);
5009	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
5010	for (i = 0; i < conf->raid_disks; i++)
5011		seq_printf (seq, "%s",
5012			       conf->disks[i].rdev &&
5013			       test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
5014	seq_printf (seq, "]");
5015#ifdef DEBUG
5016	seq_printf (seq, "\n");
5017	printall(seq, conf);
5018#endif
5019}
5020
5021static void print_raid5_conf (raid5_conf_t *conf)
5022{
5023	int i;
5024	struct disk_info *tmp;
5025
5026	printk(KERN_DEBUG "RAID conf printout:\n");
5027	if (!conf) {
5028		printk("(conf==NULL)\n");
5029		return;
5030	}
5031	printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
5032	       conf->raid_disks,
5033	       conf->raid_disks - conf->mddev->degraded);
5034
5035	for (i = 0; i < conf->raid_disks; i++) {
5036		char b[BDEVNAME_SIZE];
5037		tmp = conf->disks + i;
5038		if (tmp->rdev)
5039			printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
5040			       i, !test_bit(Faulty, &tmp->rdev->flags),
5041			       bdevname(tmp->rdev->bdev, b));
5042	}
5043}
5044
5045static int raid5_spare_active(mddev_t *mddev)
5046{
5047	int i;
5048	raid5_conf_t *conf = mddev->private;
5049	struct disk_info *tmp;
5050	int count = 0;
5051	unsigned long flags;
5052
5053	for (i = 0; i < conf->raid_disks; i++) {
5054		tmp = conf->disks + i;
5055		if (tmp->rdev
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5056		    && tmp->rdev->recovery_offset == MaxSector
5057		    && !test_bit(Faulty, &tmp->rdev->flags)
5058		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5059			count++;
5060			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
5061		}
5062	}
5063	spin_lock_irqsave(&conf->device_lock, flags);
5064	mddev->degraded -= count;
5065	spin_unlock_irqrestore(&conf->device_lock, flags);
5066	print_raid5_conf(conf);
5067	return count;
5068}
5069
5070static int raid5_remove_disk(mddev_t *mddev, int number)
5071{
5072	raid5_conf_t *conf = mddev->private;
5073	int err = 0;
5074	mdk_rdev_t *rdev;
 
5075	struct disk_info *p = conf->disks + number;
5076
5077	print_raid5_conf(conf);
5078	rdev = p->rdev;
5079	if (rdev) {
5080		if (number >= conf->raid_disks &&
5081		    conf->reshape_progress == MaxSector)
5082			clear_bit(In_sync, &rdev->flags);
5083
5084		if (test_bit(In_sync, &rdev->flags) ||
5085		    atomic_read(&rdev->nr_pending)) {
5086			err = -EBUSY;
5087			goto abort;
5088		}
5089		/* Only remove non-faulty devices if recovery
5090		 * isn't possible.
5091		 */
5092		if (!test_bit(Faulty, &rdev->flags) &&
5093		    mddev->recovery_disabled != conf->recovery_disabled &&
5094		    !has_failed(conf) &&
5095		    number < conf->raid_disks) {
5096			err = -EBUSY;
5097			goto abort;
5098		}
5099		p->rdev = NULL;
5100		synchronize_rcu();
5101		if (atomic_read(&rdev->nr_pending)) {
5102			/* lost the race, try later */
5103			err = -EBUSY;
5104			p->rdev = rdev;
5105		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5106	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5107abort:
5108
5109	print_raid5_conf(conf);
5110	return err;
5111}
5112
5113static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5114{
5115	raid5_conf_t *conf = mddev->private;
5116	int err = -EEXIST;
5117	int disk;
5118	struct disk_info *p;
5119	int first = 0;
5120	int last = conf->raid_disks - 1;
5121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5122	if (mddev->recovery_disabled == conf->recovery_disabled)
5123		return -EBUSY;
5124
5125	if (has_failed(conf))
5126		/* no point adding a device */
5127		return -EINVAL;
5128
5129	if (rdev->raid_disk >= 0)
5130		first = last = rdev->raid_disk;
5131
5132	/*
5133	 * find the disk ... but prefer rdev->saved_raid_disk
5134	 * if possible.
5135	 */
5136	if (rdev->saved_raid_disk >= 0 &&
5137	    rdev->saved_raid_disk >= first &&
5138	    conf->disks[rdev->saved_raid_disk].rdev == NULL)
5139		disk = rdev->saved_raid_disk;
5140	else
5141		disk = first;
5142	for ( ; disk <= last ; disk++)
5143		if ((p=conf->disks + disk)->rdev == NULL) {
5144			clear_bit(In_sync, &rdev->flags);
5145			rdev->raid_disk = disk;
5146			err = 0;
5147			if (rdev->saved_raid_disk != disk)
5148				conf->fullsync = 1;
5149			rcu_assign_pointer(p->rdev, rdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
5150			break;
5151		}
 
 
5152	print_raid5_conf(conf);
5153	return err;
5154}
5155
5156static int raid5_resize(mddev_t *mddev, sector_t sectors)
5157{
5158	/* no resync is happening, and there is enough space
5159	 * on all devices, so we can resize.
5160	 * We need to make sure resync covers any new space.
5161	 * If the array is shrinking we should possibly wait until
5162	 * any io in the removed space completes, but it hardly seems
5163	 * worth it.
5164	 */
5165	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
5166	md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5167					       mddev->raid_disks));
5168	if (mddev->array_sectors >
5169	    raid5_size(mddev, sectors, mddev->raid_disks))
 
 
 
 
5170		return -EINVAL;
 
 
 
 
 
 
5171	set_capacity(mddev->gendisk, mddev->array_sectors);
5172	revalidate_disk(mddev->gendisk);
5173	if (sectors > mddev->dev_sectors &&
5174	    mddev->recovery_cp > mddev->dev_sectors) {
5175		mddev->recovery_cp = mddev->dev_sectors;
5176		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5177	}
5178	mddev->dev_sectors = sectors;
5179	mddev->resync_max_sectors = sectors;
5180	return 0;
5181}
5182
5183static int check_stripe_cache(mddev_t *mddev)
5184{
5185	/* Can only proceed if there are plenty of stripe_heads.
5186	 * We need a minimum of one full stripe,, and for sensible progress
5187	 * it is best to have about 4 times that.
5188	 * If we require 4 times, then the default 256 4K stripe_heads will
5189	 * allow for chunk sizes up to 256K, which is probably OK.
5190	 * If the chunk size is greater, user-space should request more
5191	 * stripe_heads first.
5192	 */
5193	raid5_conf_t *conf = mddev->private;
5194	if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
5195	    > conf->max_nr_stripes ||
5196	    ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
5197	    > conf->max_nr_stripes) {
5198		printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
5199		       mdname(mddev),
5200		       ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
5201			/ STRIPE_SIZE)*4);
5202		return 0;
5203	}
5204	return 1;
5205}
5206
5207static int check_reshape(mddev_t *mddev)
5208{
5209	raid5_conf_t *conf = mddev->private;
5210
 
 
5211	if (mddev->delta_disks == 0 &&
5212	    mddev->new_layout == mddev->layout &&
5213	    mddev->new_chunk_sectors == mddev->chunk_sectors)
5214		return 0; /* nothing to do */
5215	if (mddev->bitmap)
5216		/* Cannot grow a bitmap yet */
5217		return -EBUSY;
5218	if (has_failed(conf))
5219		return -EINVAL;
5220	if (mddev->delta_disks < 0) {
5221		/* We might be able to shrink, but the devices must
5222		 * be made bigger first.
5223		 * For raid6, 4 is the minimum size.
5224		 * Otherwise 2 is the minimum
5225		 */
5226		int min = 2;
5227		if (mddev->level == 6)
5228			min = 4;
5229		if (mddev->raid_disks + mddev->delta_disks < min)
5230			return -EINVAL;
5231	}
5232
5233	if (!check_stripe_cache(mddev))
5234		return -ENOSPC;
5235
5236	return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
 
 
 
 
 
 
 
 
 
 
5237}
5238
5239static int raid5_start_reshape(mddev_t *mddev)
5240{
5241	raid5_conf_t *conf = mddev->private;
5242	mdk_rdev_t *rdev;
5243	int spares = 0;
5244	unsigned long flags;
5245
5246	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5247		return -EBUSY;
5248
5249	if (!check_stripe_cache(mddev))
5250		return -ENOSPC;
5251
5252	list_for_each_entry(rdev, &mddev->disks, same_set)
 
 
 
5253		if (!test_bit(In_sync, &rdev->flags)
5254		    && !test_bit(Faulty, &rdev->flags))
5255			spares++;
 
5256
5257	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
5258		/* Not enough devices even to make a degraded array
5259		 * of that size
5260		 */
5261		return -EINVAL;
5262
5263	/* Refuse to reduce size of the array.  Any reductions in
5264	 * array size must be through explicit setting of array_size
5265	 * attribute.
5266	 */
5267	if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5268	    < mddev->array_sectors) {
5269		printk(KERN_ERR "md/raid:%s: array size must be reduced "
5270		       "before number of disks\n", mdname(mddev));
5271		return -EINVAL;
5272	}
5273
5274	atomic_set(&conf->reshape_stripes, 0);
5275	spin_lock_irq(&conf->device_lock);
 
5276	conf->previous_raid_disks = conf->raid_disks;
5277	conf->raid_disks += mddev->delta_disks;
5278	conf->prev_chunk_sectors = conf->chunk_sectors;
5279	conf->chunk_sectors = mddev->new_chunk_sectors;
5280	conf->prev_algo = conf->algorithm;
5281	conf->algorithm = mddev->new_layout;
5282	if (mddev->delta_disks < 0)
 
 
 
 
 
5283		conf->reshape_progress = raid5_size(mddev, 0, 0);
5284	else
5285		conf->reshape_progress = 0;
5286	conf->reshape_safe = conf->reshape_progress;
5287	conf->generation++;
5288	spin_unlock_irq(&conf->device_lock);
5289
 
 
 
 
 
 
 
5290	/* Add some new drives, as many as will fit.
5291	 * We know there are enough to make the newly sized array work.
5292	 * Don't add devices if we are reducing the number of
5293	 * devices in the array.  This is because it is not possible
5294	 * to correctly record the "partially reconstructed" state of
5295	 * such devices during the reshape and confusion could result.
5296	 */
5297	if (mddev->delta_disks >= 0) {
5298		int added_devices = 0;
5299		list_for_each_entry(rdev, &mddev->disks, same_set)
5300			if (rdev->raid_disk < 0 &&
5301			    !test_bit(Faulty, &rdev->flags)) {
5302				if (raid5_add_disk(mddev, rdev) == 0) {
5303					if (rdev->raid_disk
5304					    >= conf->previous_raid_disks) {
5305						set_bit(In_sync, &rdev->flags);
5306						added_devices++;
5307					} else
5308						rdev->recovery_offset = 0;
5309
5310					if (sysfs_link_rdev(mddev, rdev))
5311						/* Failure here is OK */;
5312				}
5313			} else if (rdev->raid_disk >= conf->previous_raid_disks
5314				   && !test_bit(Faulty, &rdev->flags)) {
5315				/* This is a spare that was manually added */
5316				set_bit(In_sync, &rdev->flags);
5317				added_devices++;
5318			}
5319
5320		/* When a reshape changes the number of devices,
5321		 * ->degraded is measured against the larger of the
5322		 * pre and post number of devices.
5323		 */
5324		spin_lock_irqsave(&conf->device_lock, flags);
5325		mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5326			- added_devices;
5327		spin_unlock_irqrestore(&conf->device_lock, flags);
5328	}
5329	mddev->raid_disks = conf->raid_disks;
5330	mddev->reshape_position = conf->reshape_progress;
5331	set_bit(MD_CHANGE_DEVS, &mddev->flags);
5332
5333	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5334	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
 
5335	set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5336	set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5337	mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5338						"reshape");
5339	if (!mddev->sync_thread) {
5340		mddev->recovery = 0;
5341		spin_lock_irq(&conf->device_lock);
 
5342		mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
 
 
 
 
 
 
 
5343		conf->reshape_progress = MaxSector;
 
 
5344		spin_unlock_irq(&conf->device_lock);
5345		return -EAGAIN;
5346	}
5347	conf->reshape_checkpoint = jiffies;
5348	md_wakeup_thread(mddev->sync_thread);
5349	md_new_event(mddev);
5350	return 0;
5351}
5352
5353/* This is called from the reshape thread and should make any
5354 * changes needed in 'conf'
5355 */
5356static void end_reshape(raid5_conf_t *conf)
5357{
5358
5359	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
 
5360
5361		spin_lock_irq(&conf->device_lock);
5362		conf->previous_raid_disks = conf->raid_disks;
 
 
 
5363		conf->reshape_progress = MaxSector;
 
5364		spin_unlock_irq(&conf->device_lock);
5365		wake_up(&conf->wait_for_overlap);
5366
5367		/* read-ahead size must cover two whole stripes, which is
5368		 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5369		 */
5370		if (conf->mddev->queue) {
5371			int data_disks = conf->raid_disks - conf->max_degraded;
5372			int stripe = data_disks * ((conf->chunk_sectors << 9)
5373						   / PAGE_SIZE);
5374			if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5375				conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5376		}
5377	}
5378}
5379
5380/* This is called from the raid5d thread with mddev_lock held.
5381 * It makes config changes to the device.
5382 */
5383static void raid5_finish_reshape(mddev_t *mddev)
5384{
5385	raid5_conf_t *conf = mddev->private;
5386
5387	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5388
5389		if (mddev->delta_disks > 0) {
5390			md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5391			set_capacity(mddev->gendisk, mddev->array_sectors);
5392			revalidate_disk(mddev->gendisk);
5393		} else {
5394			int d;
5395			mddev->degraded = conf->raid_disks;
5396			for (d = 0; d < conf->raid_disks ; d++)
5397				if (conf->disks[d].rdev &&
5398				    test_bit(In_sync,
5399					     &conf->disks[d].rdev->flags))
5400					mddev->degraded--;
5401			for (d = conf->raid_disks ;
5402			     d < conf->raid_disks - mddev->delta_disks;
5403			     d++) {
5404				mdk_rdev_t *rdev = conf->disks[d].rdev;
5405				if (rdev && raid5_remove_disk(mddev, d) == 0) {
5406					sysfs_unlink_rdev(mddev, rdev);
5407					rdev->raid_disk = -1;
5408				}
 
5409			}
5410		}
5411		mddev->layout = conf->algorithm;
5412		mddev->chunk_sectors = conf->chunk_sectors;
5413		mddev->reshape_position = MaxSector;
5414		mddev->delta_disks = 0;
 
5415	}
5416}
5417
5418static void raid5_quiesce(mddev_t *mddev, int state)
5419{
5420	raid5_conf_t *conf = mddev->private;
5421
5422	switch(state) {
5423	case 2: /* resume for a suspend */
5424		wake_up(&conf->wait_for_overlap);
5425		break;
5426
5427	case 1: /* stop all writes */
5428		spin_lock_irq(&conf->device_lock);
5429		/* '2' tells resync/reshape to pause so that all
5430		 * active stripes can drain
5431		 */
5432		conf->quiesce = 2;
5433		wait_event_lock_irq(conf->wait_for_stripe,
5434				    atomic_read(&conf->active_stripes) == 0 &&
5435				    atomic_read(&conf->active_aligned_reads) == 0,
5436				    conf->device_lock, /* nothing */);
 
5437		conf->quiesce = 1;
5438		spin_unlock_irq(&conf->device_lock);
5439		/* allow reshape to continue */
5440		wake_up(&conf->wait_for_overlap);
5441		break;
5442
5443	case 0: /* re-enable writes */
5444		spin_lock_irq(&conf->device_lock);
5445		conf->quiesce = 0;
5446		wake_up(&conf->wait_for_stripe);
5447		wake_up(&conf->wait_for_overlap);
5448		spin_unlock_irq(&conf->device_lock);
5449		break;
5450	}
 
5451}
5452
5453
5454static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5455{
5456	struct raid0_private_data *raid0_priv = mddev->private;
5457	sector_t sectors;
5458
5459	/* for raid0 takeover only one zone is supported */
5460	if (raid0_priv->nr_strip_zones > 1) {
5461		printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
5462		       mdname(mddev));
5463		return ERR_PTR(-EINVAL);
5464	}
5465
5466	sectors = raid0_priv->strip_zone[0].zone_end;
5467	sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
5468	mddev->dev_sectors = sectors;
5469	mddev->new_level = level;
5470	mddev->new_layout = ALGORITHM_PARITY_N;
5471	mddev->new_chunk_sectors = mddev->chunk_sectors;
5472	mddev->raid_disks += 1;
5473	mddev->delta_disks = 1;
5474	/* make sure it will be not marked as dirty */
5475	mddev->recovery_cp = MaxSector;
5476
5477	return setup_conf(mddev);
5478}
5479
5480
5481static void *raid5_takeover_raid1(mddev_t *mddev)
5482{
5483	int chunksect;
5484
5485	if (mddev->raid_disks != 2 ||
5486	    mddev->degraded > 1)
5487		return ERR_PTR(-EINVAL);
5488
5489	/* Should check if there are write-behind devices? */
5490
5491	chunksect = 64*2; /* 64K by default */
5492
5493	/* The array must be an exact multiple of chunksize */
5494	while (chunksect && (mddev->array_sectors & (chunksect-1)))
5495		chunksect >>= 1;
5496
5497	if ((chunksect<<9) < STRIPE_SIZE)
5498		/* array size does not allow a suitable chunk size */
5499		return ERR_PTR(-EINVAL);
5500
5501	mddev->new_level = 5;
5502	mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5503	mddev->new_chunk_sectors = chunksect;
5504
5505	return setup_conf(mddev);
5506}
5507
5508static void *raid5_takeover_raid6(mddev_t *mddev)
5509{
5510	int new_layout;
5511
5512	switch (mddev->layout) {
5513	case ALGORITHM_LEFT_ASYMMETRIC_6:
5514		new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5515		break;
5516	case ALGORITHM_RIGHT_ASYMMETRIC_6:
5517		new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5518		break;
5519	case ALGORITHM_LEFT_SYMMETRIC_6:
5520		new_layout = ALGORITHM_LEFT_SYMMETRIC;
5521		break;
5522	case ALGORITHM_RIGHT_SYMMETRIC_6:
5523		new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5524		break;
5525	case ALGORITHM_PARITY_0_6:
5526		new_layout = ALGORITHM_PARITY_0;
5527		break;
5528	case ALGORITHM_PARITY_N:
5529		new_layout = ALGORITHM_PARITY_N;
5530		break;
5531	default:
5532		return ERR_PTR(-EINVAL);
5533	}
5534	mddev->new_level = 5;
5535	mddev->new_layout = new_layout;
5536	mddev->delta_disks = -1;
5537	mddev->raid_disks -= 1;
5538	return setup_conf(mddev);
5539}
5540
5541
5542static int raid5_check_reshape(mddev_t *mddev)
5543{
5544	/* For a 2-drive array, the layout and chunk size can be changed
5545	 * immediately as not restriping is needed.
5546	 * For larger arrays we record the new value - after validation
5547	 * to be used by a reshape pass.
5548	 */
5549	raid5_conf_t *conf = mddev->private;
5550	int new_chunk = mddev->new_chunk_sectors;
5551
5552	if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
5553		return -EINVAL;
5554	if (new_chunk > 0) {
5555		if (!is_power_of_2(new_chunk))
5556			return -EINVAL;
5557		if (new_chunk < (PAGE_SIZE>>9))
5558			return -EINVAL;
5559		if (mddev->array_sectors & (new_chunk-1))
5560			/* not factor of array size */
5561			return -EINVAL;
5562	}
5563
5564	/* They look valid */
5565
5566	if (mddev->raid_disks == 2) {
5567		/* can make the change immediately */
5568		if (mddev->new_layout >= 0) {
5569			conf->algorithm = mddev->new_layout;
5570			mddev->layout = mddev->new_layout;
5571		}
5572		if (new_chunk > 0) {
5573			conf->chunk_sectors = new_chunk ;
5574			mddev->chunk_sectors = new_chunk;
5575		}
5576		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5577		md_wakeup_thread(mddev->thread);
5578	}
5579	return check_reshape(mddev);
5580}
5581
5582static int raid6_check_reshape(mddev_t *mddev)
5583{
5584	int new_chunk = mddev->new_chunk_sectors;
5585
5586	if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
5587		return -EINVAL;
5588	if (new_chunk > 0) {
5589		if (!is_power_of_2(new_chunk))
5590			return -EINVAL;
5591		if (new_chunk < (PAGE_SIZE >> 9))
5592			return -EINVAL;
5593		if (mddev->array_sectors & (new_chunk-1))
5594			/* not factor of array size */
5595			return -EINVAL;
5596	}
5597
5598	/* They look valid */
5599	return check_reshape(mddev);
5600}
5601
5602static void *raid5_takeover(mddev_t *mddev)
5603{
5604	/* raid5 can take over:
5605	 *  raid0 - if there is only one strip zone - make it a raid4 layout
5606	 *  raid1 - if there are two drives.  We need to know the chunk size
5607	 *  raid4 - trivial - just use a raid4 layout.
5608	 *  raid6 - Providing it is a *_6 layout
5609	 */
5610	if (mddev->level == 0)
5611		return raid45_takeover_raid0(mddev, 5);
5612	if (mddev->level == 1)
5613		return raid5_takeover_raid1(mddev);
5614	if (mddev->level == 4) {
5615		mddev->new_layout = ALGORITHM_PARITY_N;
5616		mddev->new_level = 5;
5617		return setup_conf(mddev);
5618	}
5619	if (mddev->level == 6)
5620		return raid5_takeover_raid6(mddev);
5621
5622	return ERR_PTR(-EINVAL);
5623}
5624
5625static void *raid4_takeover(mddev_t *mddev)
5626{
5627	/* raid4 can take over:
5628	 *  raid0 - if there is only one strip zone
5629	 *  raid5 - if layout is right
5630	 */
5631	if (mddev->level == 0)
5632		return raid45_takeover_raid0(mddev, 4);
5633	if (mddev->level == 5 &&
5634	    mddev->layout == ALGORITHM_PARITY_N) {
5635		mddev->new_layout = 0;
5636		mddev->new_level = 4;
5637		return setup_conf(mddev);
5638	}
5639	return ERR_PTR(-EINVAL);
5640}
5641
5642static struct mdk_personality raid5_personality;
5643
5644static void *raid6_takeover(mddev_t *mddev)
5645{
5646	/* Currently can only take over a raid5.  We map the
5647	 * personality to an equivalent raid6 personality
5648	 * with the Q block at the end.
5649	 */
5650	int new_layout;
5651
5652	if (mddev->pers != &raid5_personality)
5653		return ERR_PTR(-EINVAL);
5654	if (mddev->degraded > 1)
5655		return ERR_PTR(-EINVAL);
5656	if (mddev->raid_disks > 253)
5657		return ERR_PTR(-EINVAL);
5658	if (mddev->raid_disks < 3)
5659		return ERR_PTR(-EINVAL);
5660
5661	switch (mddev->layout) {
5662	case ALGORITHM_LEFT_ASYMMETRIC:
5663		new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5664		break;
5665	case ALGORITHM_RIGHT_ASYMMETRIC:
5666		new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5667		break;
5668	case ALGORITHM_LEFT_SYMMETRIC:
5669		new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5670		break;
5671	case ALGORITHM_RIGHT_SYMMETRIC:
5672		new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5673		break;
5674	case ALGORITHM_PARITY_0:
5675		new_layout = ALGORITHM_PARITY_0_6;
5676		break;
5677	case ALGORITHM_PARITY_N:
5678		new_layout = ALGORITHM_PARITY_N;
5679		break;
5680	default:
5681		return ERR_PTR(-EINVAL);
5682	}
5683	mddev->new_level = 6;
5684	mddev->new_layout = new_layout;
5685	mddev->delta_disks = 1;
5686	mddev->raid_disks += 1;
5687	return setup_conf(mddev);
5688}
5689
5690
5691static struct mdk_personality raid6_personality =
5692{
5693	.name		= "raid6",
5694	.level		= 6,
5695	.owner		= THIS_MODULE,
5696	.make_request	= make_request,
5697	.run		= run,
5698	.stop		= stop,
5699	.status		= status,
5700	.error_handler	= error,
5701	.hot_add_disk	= raid5_add_disk,
5702	.hot_remove_disk= raid5_remove_disk,
5703	.spare_active	= raid5_spare_active,
5704	.sync_request	= sync_request,
5705	.resize		= raid5_resize,
5706	.size		= raid5_size,
5707	.check_reshape	= raid6_check_reshape,
5708	.start_reshape  = raid5_start_reshape,
5709	.finish_reshape = raid5_finish_reshape,
5710	.quiesce	= raid5_quiesce,
5711	.takeover	= raid6_takeover,
 
5712};
5713static struct mdk_personality raid5_personality =
5714{
5715	.name		= "raid5",
5716	.level		= 5,
5717	.owner		= THIS_MODULE,
5718	.make_request	= make_request,
5719	.run		= run,
5720	.stop		= stop,
5721	.status		= status,
5722	.error_handler	= error,
5723	.hot_add_disk	= raid5_add_disk,
5724	.hot_remove_disk= raid5_remove_disk,
5725	.spare_active	= raid5_spare_active,
5726	.sync_request	= sync_request,
5727	.resize		= raid5_resize,
5728	.size		= raid5_size,
5729	.check_reshape	= raid5_check_reshape,
5730	.start_reshape  = raid5_start_reshape,
5731	.finish_reshape = raid5_finish_reshape,
5732	.quiesce	= raid5_quiesce,
5733	.takeover	= raid5_takeover,
 
5734};
5735
5736static struct mdk_personality raid4_personality =
5737{
5738	.name		= "raid4",
5739	.level		= 4,
5740	.owner		= THIS_MODULE,
5741	.make_request	= make_request,
5742	.run		= run,
5743	.stop		= stop,
5744	.status		= status,
5745	.error_handler	= error,
5746	.hot_add_disk	= raid5_add_disk,
5747	.hot_remove_disk= raid5_remove_disk,
5748	.spare_active	= raid5_spare_active,
5749	.sync_request	= sync_request,
5750	.resize		= raid5_resize,
5751	.size		= raid5_size,
5752	.check_reshape	= raid5_check_reshape,
5753	.start_reshape  = raid5_start_reshape,
5754	.finish_reshape = raid5_finish_reshape,
5755	.quiesce	= raid5_quiesce,
5756	.takeover	= raid4_takeover,
 
5757};
5758
5759static int __init raid5_init(void)
5760{
 
 
 
 
5761	register_md_personality(&raid6_personality);
5762	register_md_personality(&raid5_personality);
5763	register_md_personality(&raid4_personality);
5764	return 0;
5765}
5766
5767static void raid5_exit(void)
5768{
5769	unregister_md_personality(&raid6_personality);
5770	unregister_md_personality(&raid5_personality);
5771	unregister_md_personality(&raid4_personality);
 
5772}
5773
5774module_init(raid5_init);
5775module_exit(raid5_exit);
5776MODULE_LICENSE("GPL");
5777MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
5778MODULE_ALIAS("md-personality-4"); /* RAID5 */
5779MODULE_ALIAS("md-raid5");
5780MODULE_ALIAS("md-raid4");
5781MODULE_ALIAS("md-level-5");
5782MODULE_ALIAS("md-level-4");
5783MODULE_ALIAS("md-personality-8"); /* RAID6 */
5784MODULE_ALIAS("md-raid6");
5785MODULE_ALIAS("md-level-6");
5786
5787/* This used to be two separate modules, they were: */
5788MODULE_ALIAS("raid5");
5789MODULE_ALIAS("raid6");