Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * Copyright (C) 2012 Fusion-io  All rights reserved.
   3 * Copyright (C) 2012 Intel Corp. All rights reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public
   7 * License v2 as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  12 * General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public
  15 * License along with this program; if not, write to the
  16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17 * Boston, MA 021110-1307, USA.
  18 */
  19#include <linux/sched.h>
  20#include <linux/wait.h>
  21#include <linux/bio.h>
  22#include <linux/slab.h>
  23#include <linux/buffer_head.h>
  24#include <linux/blkdev.h>
  25#include <linux/random.h>
  26#include <linux/iocontext.h>
  27#include <linux/capability.h>
  28#include <linux/ratelimit.h>
  29#include <linux/kthread.h>
  30#include <linux/raid/pq.h>
  31#include <linux/hash.h>
  32#include <linux/list_sort.h>
  33#include <linux/raid/xor.h>
  34#include <linux/vmalloc.h>
  35#include <asm/div64.h>
  36#include "ctree.h"
  37#include "extent_map.h"
  38#include "disk-io.h"
  39#include "transaction.h"
  40#include "print-tree.h"
  41#include "volumes.h"
  42#include "raid56.h"
  43#include "async-thread.h"
  44#include "check-integrity.h"
  45#include "rcu-string.h"
  46
  47/* set when additional merges to this rbio are not allowed */
  48#define RBIO_RMW_LOCKED_BIT	1
  49
  50/*
  51 * set when this rbio is sitting in the hash, but it is just a cache
  52 * of past RMW
  53 */
  54#define RBIO_CACHE_BIT		2
  55
  56/*
  57 * set when it is safe to trust the stripe_pages for caching
  58 */
  59#define RBIO_CACHE_READY_BIT	3
  60
  61#define RBIO_CACHE_SIZE 1024
  62
  63enum btrfs_rbio_ops {
  64	BTRFS_RBIO_WRITE,
  65	BTRFS_RBIO_READ_REBUILD,
  66	BTRFS_RBIO_PARITY_SCRUB,
  67	BTRFS_RBIO_REBUILD_MISSING,
  68};
  69
  70struct btrfs_raid_bio {
  71	struct btrfs_fs_info *fs_info;
  72	struct btrfs_bio *bbio;
  73
  74	/* while we're doing rmw on a stripe
  75	 * we put it into a hash table so we can
  76	 * lock the stripe and merge more rbios
  77	 * into it.
  78	 */
  79	struct list_head hash_list;
  80
  81	/*
  82	 * LRU list for the stripe cache
  83	 */
  84	struct list_head stripe_cache;
  85
  86	/*
  87	 * for scheduling work in the helper threads
  88	 */
  89	struct btrfs_work work;
  90
  91	/*
  92	 * bio list and bio_list_lock are used
  93	 * to add more bios into the stripe
  94	 * in hopes of avoiding the full rmw
  95	 */
  96	struct bio_list bio_list;
  97	spinlock_t bio_list_lock;
  98
  99	/* also protected by the bio_list_lock, the
 100	 * plug list is used by the plugging code
 101	 * to collect partial bios while plugged.  The
 102	 * stripe locking code also uses it to hand off
 103	 * the stripe lock to the next pending IO
 104	 */
 105	struct list_head plug_list;
 106
 107	/*
 108	 * flags that tell us if it is safe to
 109	 * merge with this bio
 110	 */
 111	unsigned long flags;
 112
 113	/* size of each individual stripe on disk */
 114	int stripe_len;
 115
 116	/* number of data stripes (no p/q) */
 117	int nr_data;
 118
 119	int real_stripes;
 120
 121	int stripe_npages;
 122	/*
 123	 * set if we're doing a parity rebuild
 124	 * for a read from higher up, which is handled
 125	 * differently from a parity rebuild as part of
 126	 * rmw
 127	 */
 128	enum btrfs_rbio_ops operation;
 129
 130	/* first bad stripe */
 131	int faila;
 132
 133	/* second bad stripe (for raid6 use) */
 134	int failb;
 135
 136	int scrubp;
 137	/*
 138	 * number of pages needed to represent the full
 139	 * stripe
 140	 */
 141	int nr_pages;
 142
 143	/*
 144	 * size of all the bios in the bio_list.  This
 145	 * helps us decide if the rbio maps to a full
 146	 * stripe or not
 147	 */
 148	int bio_list_bytes;
 149
 150	int generic_bio_cnt;
 151
 152	atomic_t refs;
 153
 154	atomic_t stripes_pending;
 155
 156	atomic_t error;
 157	/*
 158	 * these are two arrays of pointers.  We allocate the
 159	 * rbio big enough to hold them both and setup their
 160	 * locations when the rbio is allocated
 161	 */
 162
 163	/* pointers to pages that we allocated for
 164	 * reading/writing stripes directly from the disk (including P/Q)
 165	 */
 166	struct page **stripe_pages;
 167
 168	/*
 169	 * pointers to the pages in the bio_list.  Stored
 170	 * here for faster lookup
 171	 */
 172	struct page **bio_pages;
 173
 174	/*
 175	 * bitmap to record which horizontal stripe has data
 176	 */
 177	unsigned long *dbitmap;
 178};
 179
 180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
 181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
 182static void rmw_work(struct btrfs_work *work);
 183static void read_rebuild_work(struct btrfs_work *work);
 184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
 185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
 186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
 187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
 188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
 189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 191
 192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
 193					 int need_check);
 194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
 195
 196/*
 197 * the stripe hash table is used for locking, and to collect
 198 * bios in hopes of making a full stripe
 199 */
 200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 201{
 202	struct btrfs_stripe_hash_table *table;
 203	struct btrfs_stripe_hash_table *x;
 204	struct btrfs_stripe_hash *cur;
 205	struct btrfs_stripe_hash *h;
 206	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 207	int i;
 208	int table_size;
 209
 210	if (info->stripe_hash_table)
 211		return 0;
 212
 213	/*
 214	 * The table is large, starting with order 4 and can go as high as
 215	 * order 7 in case lock debugging is turned on.
 216	 *
 217	 * Try harder to allocate and fallback to vmalloc to lower the chance
 218	 * of a failing mount.
 219	 */
 220	table_size = sizeof(*table) + sizeof(*h) * num_entries;
 221	table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
 222	if (!table) {
 223		table = vzalloc(table_size);
 224		if (!table)
 225			return -ENOMEM;
 226	}
 227
 228	spin_lock_init(&table->cache_lock);
 229	INIT_LIST_HEAD(&table->stripe_cache);
 230
 231	h = table->table;
 232
 233	for (i = 0; i < num_entries; i++) {
 234		cur = h + i;
 235		INIT_LIST_HEAD(&cur->hash_list);
 236		spin_lock_init(&cur->lock);
 237		init_waitqueue_head(&cur->wait);
 238	}
 239
 240	x = cmpxchg(&info->stripe_hash_table, NULL, table);
 241	if (x)
 242		kvfree(x);
 243	return 0;
 244}
 245
 246/*
 247 * caching an rbio means to copy anything from the
 248 * bio_pages array into the stripe_pages array.  We
 249 * use the page uptodate bit in the stripe cache array
 250 * to indicate if it has valid data
 251 *
 252 * once the caching is done, we set the cache ready
 253 * bit.
 254 */
 255static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 256{
 257	int i;
 258	char *s;
 259	char *d;
 260	int ret;
 261
 262	ret = alloc_rbio_pages(rbio);
 263	if (ret)
 264		return;
 265
 266	for (i = 0; i < rbio->nr_pages; i++) {
 267		if (!rbio->bio_pages[i])
 268			continue;
 269
 270		s = kmap(rbio->bio_pages[i]);
 271		d = kmap(rbio->stripe_pages[i]);
 272
 273		memcpy(d, s, PAGE_SIZE);
 274
 275		kunmap(rbio->bio_pages[i]);
 276		kunmap(rbio->stripe_pages[i]);
 277		SetPageUptodate(rbio->stripe_pages[i]);
 278	}
 279	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 280}
 281
 282/*
 283 * we hash on the first logical address of the stripe
 284 */
 285static int rbio_bucket(struct btrfs_raid_bio *rbio)
 286{
 287	u64 num = rbio->bbio->raid_map[0];
 288
 289	/*
 290	 * we shift down quite a bit.  We're using byte
 291	 * addressing, and most of the lower bits are zeros.
 292	 * This tends to upset hash_64, and it consistently
 293	 * returns just one or two different values.
 294	 *
 295	 * shifting off the lower bits fixes things.
 296	 */
 297	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 298}
 299
 300/*
 301 * stealing an rbio means taking all the uptodate pages from the stripe
 302 * array in the source rbio and putting them into the destination rbio
 303 */
 304static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 305{
 306	int i;
 307	struct page *s;
 308	struct page *d;
 309
 310	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 311		return;
 312
 313	for (i = 0; i < dest->nr_pages; i++) {
 314		s = src->stripe_pages[i];
 315		if (!s || !PageUptodate(s)) {
 316			continue;
 317		}
 318
 319		d = dest->stripe_pages[i];
 320		if (d)
 321			__free_page(d);
 322
 323		dest->stripe_pages[i] = s;
 324		src->stripe_pages[i] = NULL;
 325	}
 326}
 327
 328/*
 329 * merging means we take the bio_list from the victim and
 330 * splice it into the destination.  The victim should
 331 * be discarded afterwards.
 332 *
 333 * must be called with dest->rbio_list_lock held
 334 */
 335static void merge_rbio(struct btrfs_raid_bio *dest,
 336		       struct btrfs_raid_bio *victim)
 337{
 338	bio_list_merge(&dest->bio_list, &victim->bio_list);
 339	dest->bio_list_bytes += victim->bio_list_bytes;
 340	dest->generic_bio_cnt += victim->generic_bio_cnt;
 341	bio_list_init(&victim->bio_list);
 342}
 343
 344/*
 345 * used to prune items that are in the cache.  The caller
 346 * must hold the hash table lock.
 347 */
 348static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 349{
 350	int bucket = rbio_bucket(rbio);
 351	struct btrfs_stripe_hash_table *table;
 352	struct btrfs_stripe_hash *h;
 353	int freeit = 0;
 354
 355	/*
 356	 * check the bit again under the hash table lock.
 357	 */
 358	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 359		return;
 360
 361	table = rbio->fs_info->stripe_hash_table;
 362	h = table->table + bucket;
 363
 364	/* hold the lock for the bucket because we may be
 365	 * removing it from the hash table
 366	 */
 367	spin_lock(&h->lock);
 368
 369	/*
 370	 * hold the lock for the bio list because we need
 371	 * to make sure the bio list is empty
 372	 */
 373	spin_lock(&rbio->bio_list_lock);
 374
 375	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 376		list_del_init(&rbio->stripe_cache);
 377		table->cache_size -= 1;
 378		freeit = 1;
 379
 380		/* if the bio list isn't empty, this rbio is
 381		 * still involved in an IO.  We take it out
 382		 * of the cache list, and drop the ref that
 383		 * was held for the list.
 384		 *
 385		 * If the bio_list was empty, we also remove
 386		 * the rbio from the hash_table, and drop
 387		 * the corresponding ref
 388		 */
 389		if (bio_list_empty(&rbio->bio_list)) {
 390			if (!list_empty(&rbio->hash_list)) {
 391				list_del_init(&rbio->hash_list);
 392				atomic_dec(&rbio->refs);
 393				BUG_ON(!list_empty(&rbio->plug_list));
 394			}
 395		}
 396	}
 397
 398	spin_unlock(&rbio->bio_list_lock);
 399	spin_unlock(&h->lock);
 400
 401	if (freeit)
 402		__free_raid_bio(rbio);
 403}
 404
 405/*
 406 * prune a given rbio from the cache
 407 */
 408static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 409{
 410	struct btrfs_stripe_hash_table *table;
 411	unsigned long flags;
 412
 413	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 414		return;
 415
 416	table = rbio->fs_info->stripe_hash_table;
 417
 418	spin_lock_irqsave(&table->cache_lock, flags);
 419	__remove_rbio_from_cache(rbio);
 420	spin_unlock_irqrestore(&table->cache_lock, flags);
 421}
 422
 423/*
 424 * remove everything in the cache
 425 */
 426static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 427{
 428	struct btrfs_stripe_hash_table *table;
 429	unsigned long flags;
 430	struct btrfs_raid_bio *rbio;
 431
 432	table = info->stripe_hash_table;
 433
 434	spin_lock_irqsave(&table->cache_lock, flags);
 435	while (!list_empty(&table->stripe_cache)) {
 436		rbio = list_entry(table->stripe_cache.next,
 437				  struct btrfs_raid_bio,
 438				  stripe_cache);
 439		__remove_rbio_from_cache(rbio);
 440	}
 441	spin_unlock_irqrestore(&table->cache_lock, flags);
 442}
 443
 444/*
 445 * remove all cached entries and free the hash table
 446 * used by unmount
 447 */
 448void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 449{
 450	if (!info->stripe_hash_table)
 451		return;
 452	btrfs_clear_rbio_cache(info);
 453	kvfree(info->stripe_hash_table);
 454	info->stripe_hash_table = NULL;
 455}
 456
 457/*
 458 * insert an rbio into the stripe cache.  It
 459 * must have already been prepared by calling
 460 * cache_rbio_pages
 461 *
 462 * If this rbio was already cached, it gets
 463 * moved to the front of the lru.
 464 *
 465 * If the size of the rbio cache is too big, we
 466 * prune an item.
 467 */
 468static void cache_rbio(struct btrfs_raid_bio *rbio)
 469{
 470	struct btrfs_stripe_hash_table *table;
 471	unsigned long flags;
 472
 473	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 474		return;
 475
 476	table = rbio->fs_info->stripe_hash_table;
 477
 478	spin_lock_irqsave(&table->cache_lock, flags);
 479	spin_lock(&rbio->bio_list_lock);
 480
 481	/* bump our ref if we were not in the list before */
 482	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 483		atomic_inc(&rbio->refs);
 484
 485	if (!list_empty(&rbio->stripe_cache)){
 486		list_move(&rbio->stripe_cache, &table->stripe_cache);
 487	} else {
 488		list_add(&rbio->stripe_cache, &table->stripe_cache);
 489		table->cache_size += 1;
 490	}
 491
 492	spin_unlock(&rbio->bio_list_lock);
 493
 494	if (table->cache_size > RBIO_CACHE_SIZE) {
 495		struct btrfs_raid_bio *found;
 496
 497		found = list_entry(table->stripe_cache.prev,
 498				  struct btrfs_raid_bio,
 499				  stripe_cache);
 500
 501		if (found != rbio)
 502			__remove_rbio_from_cache(found);
 503	}
 504
 505	spin_unlock_irqrestore(&table->cache_lock, flags);
 506}
 507
 508/*
 509 * helper function to run the xor_blocks api.  It is only
 510 * able to do MAX_XOR_BLOCKS at a time, so we need to
 511 * loop through.
 512 */
 513static void run_xor(void **pages, int src_cnt, ssize_t len)
 514{
 515	int src_off = 0;
 516	int xor_src_cnt = 0;
 517	void *dest = pages[src_cnt];
 518
 519	while(src_cnt > 0) {
 520		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 521		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 522
 523		src_cnt -= xor_src_cnt;
 524		src_off += xor_src_cnt;
 525	}
 526}
 527
 528/*
 529 * returns true if the bio list inside this rbio
 530 * covers an entire stripe (no rmw required).
 531 * Must be called with the bio list lock held, or
 532 * at a time when you know it is impossible to add
 533 * new bios into the list
 534 */
 535static int __rbio_is_full(struct btrfs_raid_bio *rbio)
 536{
 537	unsigned long size = rbio->bio_list_bytes;
 538	int ret = 1;
 539
 540	if (size != rbio->nr_data * rbio->stripe_len)
 541		ret = 0;
 542
 543	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
 544	return ret;
 545}
 546
 547static int rbio_is_full(struct btrfs_raid_bio *rbio)
 548{
 549	unsigned long flags;
 550	int ret;
 551
 552	spin_lock_irqsave(&rbio->bio_list_lock, flags);
 553	ret = __rbio_is_full(rbio);
 554	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 555	return ret;
 556}
 557
 558/*
 559 * returns 1 if it is safe to merge two rbios together.
 560 * The merging is safe if the two rbios correspond to
 561 * the same stripe and if they are both going in the same
 562 * direction (read vs write), and if neither one is
 563 * locked for final IO
 564 *
 565 * The caller is responsible for locking such that
 566 * rmw_locked is safe to test
 567 */
 568static int rbio_can_merge(struct btrfs_raid_bio *last,
 569			  struct btrfs_raid_bio *cur)
 570{
 571	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 572	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 573		return 0;
 574
 575	/*
 576	 * we can't merge with cached rbios, since the
 577	 * idea is that when we merge the destination
 578	 * rbio is going to run our IO for us.  We can
 579	 * steal from cached rbios though, other functions
 580	 * handle that.
 581	 */
 582	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 583	    test_bit(RBIO_CACHE_BIT, &cur->flags))
 584		return 0;
 585
 586	if (last->bbio->raid_map[0] !=
 587	    cur->bbio->raid_map[0])
 588		return 0;
 589
 590	/* we can't merge with different operations */
 591	if (last->operation != cur->operation)
 592		return 0;
 593	/*
 594	 * We've need read the full stripe from the drive.
 595	 * check and repair the parity and write the new results.
 596	 *
 597	 * We're not allowed to add any new bios to the
 598	 * bio list here, anyone else that wants to
 599	 * change this stripe needs to do their own rmw.
 600	 */
 601	if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
 602	    cur->operation == BTRFS_RBIO_PARITY_SCRUB)
 603		return 0;
 604
 605	if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
 606	    cur->operation == BTRFS_RBIO_REBUILD_MISSING)
 607		return 0;
 608
 609	return 1;
 610}
 611
 612static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
 613				  int index)
 614{
 615	return stripe * rbio->stripe_npages + index;
 616}
 617
 618/*
 619 * these are just the pages from the rbio array, not from anything
 620 * the FS sent down to us
 621 */
 622static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
 623				     int index)
 624{
 625	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
 626}
 627
 628/*
 629 * helper to index into the pstripe
 630 */
 631static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
 632{
 633	return rbio_stripe_page(rbio, rbio->nr_data, index);
 634}
 635
 636/*
 637 * helper to index into the qstripe, returns null
 638 * if there is no qstripe
 639 */
 640static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
 641{
 642	if (rbio->nr_data + 1 == rbio->real_stripes)
 643		return NULL;
 644	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
 645}
 646
 647/*
 648 * The first stripe in the table for a logical address
 649 * has the lock.  rbios are added in one of three ways:
 650 *
 651 * 1) Nobody has the stripe locked yet.  The rbio is given
 652 * the lock and 0 is returned.  The caller must start the IO
 653 * themselves.
 654 *
 655 * 2) Someone has the stripe locked, but we're able to merge
 656 * with the lock owner.  The rbio is freed and the IO will
 657 * start automatically along with the existing rbio.  1 is returned.
 658 *
 659 * 3) Someone has the stripe locked, but we're not able to merge.
 660 * The rbio is added to the lock owner's plug list, or merged into
 661 * an rbio already on the plug list.  When the lock owner unlocks,
 662 * the next rbio on the list is run and the IO is started automatically.
 663 * 1 is returned
 664 *
 665 * If we return 0, the caller still owns the rbio and must continue with
 666 * IO submission.  If we return 1, the caller must assume the rbio has
 667 * already been freed.
 668 */
 669static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 670{
 671	int bucket = rbio_bucket(rbio);
 672	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
 673	struct btrfs_raid_bio *cur;
 674	struct btrfs_raid_bio *pending;
 675	unsigned long flags;
 676	DEFINE_WAIT(wait);
 677	struct btrfs_raid_bio *freeit = NULL;
 678	struct btrfs_raid_bio *cache_drop = NULL;
 679	int ret = 0;
 680	int walk = 0;
 681
 682	spin_lock_irqsave(&h->lock, flags);
 683	list_for_each_entry(cur, &h->hash_list, hash_list) {
 684		walk++;
 685		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
 686			spin_lock(&cur->bio_list_lock);
 687
 688			/* can we steal this cached rbio's pages? */
 689			if (bio_list_empty(&cur->bio_list) &&
 690			    list_empty(&cur->plug_list) &&
 691			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 692			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 693				list_del_init(&cur->hash_list);
 694				atomic_dec(&cur->refs);
 695
 696				steal_rbio(cur, rbio);
 697				cache_drop = cur;
 698				spin_unlock(&cur->bio_list_lock);
 699
 700				goto lockit;
 701			}
 702
 703			/* can we merge into the lock owner? */
 704			if (rbio_can_merge(cur, rbio)) {
 705				merge_rbio(cur, rbio);
 706				spin_unlock(&cur->bio_list_lock);
 707				freeit = rbio;
 708				ret = 1;
 709				goto out;
 710			}
 711
 712
 713			/*
 714			 * we couldn't merge with the running
 715			 * rbio, see if we can merge with the
 716			 * pending ones.  We don't have to
 717			 * check for rmw_locked because there
 718			 * is no way they are inside finish_rmw
 719			 * right now
 720			 */
 721			list_for_each_entry(pending, &cur->plug_list,
 722					    plug_list) {
 723				if (rbio_can_merge(pending, rbio)) {
 724					merge_rbio(pending, rbio);
 725					spin_unlock(&cur->bio_list_lock);
 726					freeit = rbio;
 727					ret = 1;
 728					goto out;
 729				}
 730			}
 731
 732			/* no merging, put us on the tail of the plug list,
 733			 * our rbio will be started with the currently
 734			 * running rbio unlocks
 735			 */
 736			list_add_tail(&rbio->plug_list, &cur->plug_list);
 737			spin_unlock(&cur->bio_list_lock);
 738			ret = 1;
 739			goto out;
 740		}
 741	}
 742lockit:
 743	atomic_inc(&rbio->refs);
 744	list_add(&rbio->hash_list, &h->hash_list);
 745out:
 746	spin_unlock_irqrestore(&h->lock, flags);
 747	if (cache_drop)
 748		remove_rbio_from_cache(cache_drop);
 749	if (freeit)
 750		__free_raid_bio(freeit);
 751	return ret;
 752}
 753
 754/*
 755 * called as rmw or parity rebuild is completed.  If the plug list has more
 756 * rbios waiting for this stripe, the next one on the list will be started
 757 */
 758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 759{
 760	int bucket;
 761	struct btrfs_stripe_hash *h;
 762	unsigned long flags;
 763	int keep_cache = 0;
 764
 765	bucket = rbio_bucket(rbio);
 766	h = rbio->fs_info->stripe_hash_table->table + bucket;
 767
 768	if (list_empty(&rbio->plug_list))
 769		cache_rbio(rbio);
 770
 771	spin_lock_irqsave(&h->lock, flags);
 772	spin_lock(&rbio->bio_list_lock);
 773
 774	if (!list_empty(&rbio->hash_list)) {
 775		/*
 776		 * if we're still cached and there is no other IO
 777		 * to perform, just leave this rbio here for others
 778		 * to steal from later
 779		 */
 780		if (list_empty(&rbio->plug_list) &&
 781		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 782			keep_cache = 1;
 783			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 784			BUG_ON(!bio_list_empty(&rbio->bio_list));
 785			goto done;
 786		}
 787
 788		list_del_init(&rbio->hash_list);
 789		atomic_dec(&rbio->refs);
 790
 791		/*
 792		 * we use the plug list to hold all the rbios
 793		 * waiting for the chance to lock this stripe.
 794		 * hand the lock over to one of them.
 795		 */
 796		if (!list_empty(&rbio->plug_list)) {
 797			struct btrfs_raid_bio *next;
 798			struct list_head *head = rbio->plug_list.next;
 799
 800			next = list_entry(head, struct btrfs_raid_bio,
 801					  plug_list);
 802
 803			list_del_init(&rbio->plug_list);
 804
 805			list_add(&next->hash_list, &h->hash_list);
 806			atomic_inc(&next->refs);
 807			spin_unlock(&rbio->bio_list_lock);
 808			spin_unlock_irqrestore(&h->lock, flags);
 809
 810			if (next->operation == BTRFS_RBIO_READ_REBUILD)
 811				async_read_rebuild(next);
 812			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
 813				steal_rbio(rbio, next);
 814				async_read_rebuild(next);
 815			} else if (next->operation == BTRFS_RBIO_WRITE) {
 816				steal_rbio(rbio, next);
 817				async_rmw_stripe(next);
 818			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 819				steal_rbio(rbio, next);
 820				async_scrub_parity(next);
 821			}
 822
 823			goto done_nolock;
 824			/*
 825			 * The barrier for this waitqueue_active is not needed,
 826			 * we're protected by h->lock and can't miss a wakeup.
 827			 */
 828		} else if (waitqueue_active(&h->wait)) {
 829			spin_unlock(&rbio->bio_list_lock);
 830			spin_unlock_irqrestore(&h->lock, flags);
 831			wake_up(&h->wait);
 832			goto done_nolock;
 833		}
 834	}
 835done:
 836	spin_unlock(&rbio->bio_list_lock);
 837	spin_unlock_irqrestore(&h->lock, flags);
 838
 839done_nolock:
 840	if (!keep_cache)
 841		remove_rbio_from_cache(rbio);
 842}
 843
 844static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 845{
 846	int i;
 847
 848	WARN_ON(atomic_read(&rbio->refs) < 0);
 849	if (!atomic_dec_and_test(&rbio->refs))
 850		return;
 851
 852	WARN_ON(!list_empty(&rbio->stripe_cache));
 853	WARN_ON(!list_empty(&rbio->hash_list));
 854	WARN_ON(!bio_list_empty(&rbio->bio_list));
 855
 856	for (i = 0; i < rbio->nr_pages; i++) {
 857		if (rbio->stripe_pages[i]) {
 858			__free_page(rbio->stripe_pages[i]);
 859			rbio->stripe_pages[i] = NULL;
 860		}
 861	}
 862
 863	btrfs_put_bbio(rbio->bbio);
 864	kfree(rbio);
 865}
 866
 867static void free_raid_bio(struct btrfs_raid_bio *rbio)
 868{
 869	unlock_stripe(rbio);
 870	__free_raid_bio(rbio);
 871}
 872
 873/*
 874 * this frees the rbio and runs through all the bios in the
 875 * bio_list and calls end_io on them
 876 */
 877static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 878{
 879	struct bio *cur = bio_list_get(&rbio->bio_list);
 880	struct bio *next;
 881
 882	if (rbio->generic_bio_cnt)
 883		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
 884
 885	free_raid_bio(rbio);
 886
 887	while (cur) {
 888		next = cur->bi_next;
 889		cur->bi_next = NULL;
 890		cur->bi_error = err;
 891		bio_endio(cur);
 892		cur = next;
 893	}
 894}
 895
 896/*
 897 * end io function used by finish_rmw.  When we finally
 898 * get here, we've written a full stripe
 899 */
 900static void raid_write_end_io(struct bio *bio)
 901{
 902	struct btrfs_raid_bio *rbio = bio->bi_private;
 903	int err = bio->bi_error;
 904	int max_errors;
 905
 906	if (err)
 907		fail_bio_stripe(rbio, bio);
 908
 909	bio_put(bio);
 910
 911	if (!atomic_dec_and_test(&rbio->stripes_pending))
 912		return;
 913
 914	err = 0;
 915
 916	/* OK, we have read all the stripes we need to. */
 917	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
 918		     0 : rbio->bbio->max_errors;
 919	if (atomic_read(&rbio->error) > max_errors)
 920		err = -EIO;
 921
 922	rbio_orig_end_io(rbio, err);
 923}
 924
 925/*
 926 * the read/modify/write code wants to use the original bio for
 927 * any pages it included, and then use the rbio for everything
 928 * else.  This function decides if a given index (stripe number)
 929 * and page number in that stripe fall inside the original bio
 930 * or the rbio.
 931 *
 932 * if you set bio_list_only, you'll get a NULL back for any ranges
 933 * that are outside the bio_list
 934 *
 935 * This doesn't take any refs on anything, you get a bare page pointer
 936 * and the caller must bump refs as required.
 937 *
 938 * You must call index_rbio_pages once before you can trust
 939 * the answers from this function.
 940 */
 941static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
 942				 int index, int pagenr, int bio_list_only)
 943{
 944	int chunk_page;
 945	struct page *p = NULL;
 946
 947	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
 948
 949	spin_lock_irq(&rbio->bio_list_lock);
 950	p = rbio->bio_pages[chunk_page];
 951	spin_unlock_irq(&rbio->bio_list_lock);
 952
 953	if (p || bio_list_only)
 954		return p;
 955
 956	return rbio->stripe_pages[chunk_page];
 957}
 958
 959/*
 960 * number of pages we need for the entire stripe across all the
 961 * drives
 962 */
 963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 964{
 965	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 966}
 967
 968/*
 969 * allocation and initial setup for the btrfs_raid_bio.  Not
 970 * this does not allocate any pages for rbio->pages.
 971 */
 972static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
 973					 struct btrfs_bio *bbio,
 974					 u64 stripe_len)
 975{
 976	struct btrfs_raid_bio *rbio;
 977	int nr_data = 0;
 978	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
 979	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
 980	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
 981	void *p;
 982
 983	rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
 984		       DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
 985		       sizeof(long), GFP_NOFS);
 986	if (!rbio)
 987		return ERR_PTR(-ENOMEM);
 988
 989	bio_list_init(&rbio->bio_list);
 990	INIT_LIST_HEAD(&rbio->plug_list);
 991	spin_lock_init(&rbio->bio_list_lock);
 992	INIT_LIST_HEAD(&rbio->stripe_cache);
 993	INIT_LIST_HEAD(&rbio->hash_list);
 994	rbio->bbio = bbio;
 995	rbio->fs_info = fs_info;
 996	rbio->stripe_len = stripe_len;
 997	rbio->nr_pages = num_pages;
 998	rbio->real_stripes = real_stripes;
 999	rbio->stripe_npages = stripe_npages;
1000	rbio->faila = -1;
1001	rbio->failb = -1;
1002	atomic_set(&rbio->refs, 1);
1003	atomic_set(&rbio->error, 0);
1004	atomic_set(&rbio->stripes_pending, 0);
1005
1006	/*
1007	 * the stripe_pages and bio_pages array point to the extra
1008	 * memory we allocated past the end of the rbio
1009	 */
1010	p = rbio + 1;
1011	rbio->stripe_pages = p;
1012	rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1013	rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
1014
1015	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1016		nr_data = real_stripes - 1;
1017	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1018		nr_data = real_stripes - 2;
1019	else
1020		BUG();
1021
1022	rbio->nr_data = nr_data;
1023	return rbio;
1024}
1025
1026/* allocate pages for all the stripes in the bio, including parity */
1027static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1028{
1029	int i;
1030	struct page *page;
1031
1032	for (i = 0; i < rbio->nr_pages; i++) {
1033		if (rbio->stripe_pages[i])
1034			continue;
1035		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1036		if (!page)
1037			return -ENOMEM;
1038		rbio->stripe_pages[i] = page;
1039	}
1040	return 0;
1041}
1042
1043/* only allocate pages for p/q stripes */
1044static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1045{
1046	int i;
1047	struct page *page;
1048
1049	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1050
1051	for (; i < rbio->nr_pages; i++) {
1052		if (rbio->stripe_pages[i])
1053			continue;
1054		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1055		if (!page)
1056			return -ENOMEM;
1057		rbio->stripe_pages[i] = page;
1058	}
1059	return 0;
1060}
1061
1062/*
1063 * add a single page from a specific stripe into our list of bios for IO
1064 * this will try to merge into existing bios if possible, and returns
1065 * zero if all went well.
1066 */
1067static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1068			    struct bio_list *bio_list,
1069			    struct page *page,
1070			    int stripe_nr,
1071			    unsigned long page_index,
1072			    unsigned long bio_max_len)
1073{
1074	struct bio *last = bio_list->tail;
1075	u64 last_end = 0;
1076	int ret;
1077	struct bio *bio;
1078	struct btrfs_bio_stripe *stripe;
1079	u64 disk_start;
1080
1081	stripe = &rbio->bbio->stripes[stripe_nr];
1082	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1083
1084	/* if the device is missing, just fail this stripe */
1085	if (!stripe->dev->bdev)
1086		return fail_rbio_index(rbio, stripe_nr);
1087
1088	/* see if we can add this page onto our existing bio */
1089	if (last) {
1090		last_end = (u64)last->bi_iter.bi_sector << 9;
1091		last_end += last->bi_iter.bi_size;
1092
1093		/*
1094		 * we can't merge these if they are from different
1095		 * devices or if they are not contiguous
1096		 */
1097		if (last_end == disk_start && stripe->dev->bdev &&
1098		    !last->bi_error &&
1099		    last->bi_bdev == stripe->dev->bdev) {
1100			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1101			if (ret == PAGE_SIZE)
1102				return 0;
1103		}
1104	}
1105
1106	/* put a new bio on the list */
1107	bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1108	if (!bio)
1109		return -ENOMEM;
1110
1111	bio->bi_iter.bi_size = 0;
1112	bio->bi_bdev = stripe->dev->bdev;
1113	bio->bi_iter.bi_sector = disk_start >> 9;
1114
1115	bio_add_page(bio, page, PAGE_SIZE, 0);
1116	bio_list_add(bio_list, bio);
1117	return 0;
1118}
1119
1120/*
1121 * while we're doing the read/modify/write cycle, we could
1122 * have errors in reading pages off the disk.  This checks
1123 * for errors and if we're not able to read the page it'll
1124 * trigger parity reconstruction.  The rmw will be finished
1125 * after we've reconstructed the failed stripes
1126 */
1127static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1128{
1129	if (rbio->faila >= 0 || rbio->failb >= 0) {
1130		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1131		__raid56_parity_recover(rbio);
1132	} else {
1133		finish_rmw(rbio);
1134	}
1135}
1136
1137/*
1138 * helper function to walk our bio list and populate the bio_pages array with
1139 * the result.  This seems expensive, but it is faster than constantly
1140 * searching through the bio list as we setup the IO in finish_rmw or stripe
1141 * reconstruction.
1142 *
1143 * This must be called before you trust the answers from page_in_rbio
1144 */
1145static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1146{
1147	struct bio *bio;
1148	struct bio_vec *bvec;
1149	u64 start;
1150	unsigned long stripe_offset;
1151	unsigned long page_index;
1152	int i;
1153
1154	spin_lock_irq(&rbio->bio_list_lock);
1155	bio_list_for_each(bio, &rbio->bio_list) {
1156		start = (u64)bio->bi_iter.bi_sector << 9;
1157		stripe_offset = start - rbio->bbio->raid_map[0];
1158		page_index = stripe_offset >> PAGE_SHIFT;
1159
1160		bio_for_each_segment_all(bvec, bio, i)
1161			rbio->bio_pages[page_index + i] = bvec->bv_page;
1162	}
1163	spin_unlock_irq(&rbio->bio_list_lock);
1164}
1165
1166/*
1167 * this is called from one of two situations.  We either
1168 * have a full stripe from the higher layers, or we've read all
1169 * the missing bits off disk.
1170 *
1171 * This will calculate the parity and then send down any
1172 * changed blocks.
1173 */
1174static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1175{
1176	struct btrfs_bio *bbio = rbio->bbio;
1177	void *pointers[rbio->real_stripes];
1178	int nr_data = rbio->nr_data;
1179	int stripe;
1180	int pagenr;
1181	int p_stripe = -1;
1182	int q_stripe = -1;
1183	struct bio_list bio_list;
1184	struct bio *bio;
1185	int ret;
1186
1187	bio_list_init(&bio_list);
1188
1189	if (rbio->real_stripes - rbio->nr_data == 1) {
1190		p_stripe = rbio->real_stripes - 1;
1191	} else if (rbio->real_stripes - rbio->nr_data == 2) {
1192		p_stripe = rbio->real_stripes - 2;
1193		q_stripe = rbio->real_stripes - 1;
1194	} else {
1195		BUG();
1196	}
1197
1198	/* at this point we either have a full stripe,
1199	 * or we've read the full stripe from the drive.
1200	 * recalculate the parity and write the new results.
1201	 *
1202	 * We're not allowed to add any new bios to the
1203	 * bio list here, anyone else that wants to
1204	 * change this stripe needs to do their own rmw.
1205	 */
1206	spin_lock_irq(&rbio->bio_list_lock);
1207	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1208	spin_unlock_irq(&rbio->bio_list_lock);
1209
1210	atomic_set(&rbio->error, 0);
1211
1212	/*
1213	 * now that we've set rmw_locked, run through the
1214	 * bio list one last time and map the page pointers
1215	 *
1216	 * We don't cache full rbios because we're assuming
1217	 * the higher layers are unlikely to use this area of
1218	 * the disk again soon.  If they do use it again,
1219	 * hopefully they will send another full bio.
1220	 */
1221	index_rbio_pages(rbio);
1222	if (!rbio_is_full(rbio))
1223		cache_rbio_pages(rbio);
1224	else
1225		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1226
1227	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1228		struct page *p;
1229		/* first collect one page from each data stripe */
1230		for (stripe = 0; stripe < nr_data; stripe++) {
1231			p = page_in_rbio(rbio, stripe, pagenr, 0);
1232			pointers[stripe] = kmap(p);
1233		}
1234
1235		/* then add the parity stripe */
1236		p = rbio_pstripe_page(rbio, pagenr);
1237		SetPageUptodate(p);
1238		pointers[stripe++] = kmap(p);
1239
1240		if (q_stripe != -1) {
1241
1242			/*
1243			 * raid6, add the qstripe and call the
1244			 * library function to fill in our p/q
1245			 */
1246			p = rbio_qstripe_page(rbio, pagenr);
1247			SetPageUptodate(p);
1248			pointers[stripe++] = kmap(p);
1249
1250			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1251						pointers);
1252		} else {
1253			/* raid5 */
1254			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1255			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1256		}
1257
1258
1259		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1260			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1261	}
1262
1263	/*
1264	 * time to start writing.  Make bios for everything from the
1265	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1266	 * everything else.
1267	 */
1268	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1269		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1270			struct page *page;
1271			if (stripe < rbio->nr_data) {
1272				page = page_in_rbio(rbio, stripe, pagenr, 1);
1273				if (!page)
1274					continue;
1275			} else {
1276			       page = rbio_stripe_page(rbio, stripe, pagenr);
1277			}
1278
1279			ret = rbio_add_io_page(rbio, &bio_list,
1280				       page, stripe, pagenr, rbio->stripe_len);
1281			if (ret)
1282				goto cleanup;
1283		}
1284	}
1285
1286	if (likely(!bbio->num_tgtdevs))
1287		goto write_data;
1288
1289	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1290		if (!bbio->tgtdev_map[stripe])
1291			continue;
1292
1293		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1294			struct page *page;
1295			if (stripe < rbio->nr_data) {
1296				page = page_in_rbio(rbio, stripe, pagenr, 1);
1297				if (!page)
1298					continue;
1299			} else {
1300			       page = rbio_stripe_page(rbio, stripe, pagenr);
1301			}
1302
1303			ret = rbio_add_io_page(rbio, &bio_list, page,
1304					       rbio->bbio->tgtdev_map[stripe],
1305					       pagenr, rbio->stripe_len);
1306			if (ret)
1307				goto cleanup;
1308		}
1309	}
1310
1311write_data:
1312	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1313	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1314
1315	while (1) {
1316		bio = bio_list_pop(&bio_list);
1317		if (!bio)
1318			break;
1319
1320		bio->bi_private = rbio;
1321		bio->bi_end_io = raid_write_end_io;
1322		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1323
1324		submit_bio(bio);
1325	}
1326	return;
1327
1328cleanup:
1329	rbio_orig_end_io(rbio, -EIO);
1330}
1331
1332/*
1333 * helper to find the stripe number for a given bio.  Used to figure out which
1334 * stripe has failed.  This expects the bio to correspond to a physical disk,
1335 * so it looks up based on physical sector numbers.
1336 */
1337static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1338			   struct bio *bio)
1339{
1340	u64 physical = bio->bi_iter.bi_sector;
1341	u64 stripe_start;
1342	int i;
1343	struct btrfs_bio_stripe *stripe;
1344
1345	physical <<= 9;
1346
1347	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1348		stripe = &rbio->bbio->stripes[i];
1349		stripe_start = stripe->physical;
1350		if (physical >= stripe_start &&
1351		    physical < stripe_start + rbio->stripe_len &&
1352		    bio->bi_bdev == stripe->dev->bdev) {
1353			return i;
1354		}
1355	}
1356	return -1;
1357}
1358
1359/*
1360 * helper to find the stripe number for a given
1361 * bio (before mapping).  Used to figure out which stripe has
1362 * failed.  This looks up based on logical block numbers.
1363 */
1364static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1365				   struct bio *bio)
1366{
1367	u64 logical = bio->bi_iter.bi_sector;
1368	u64 stripe_start;
1369	int i;
1370
1371	logical <<= 9;
1372
1373	for (i = 0; i < rbio->nr_data; i++) {
1374		stripe_start = rbio->bbio->raid_map[i];
1375		if (logical >= stripe_start &&
1376		    logical < stripe_start + rbio->stripe_len) {
1377			return i;
1378		}
1379	}
1380	return -1;
1381}
1382
1383/*
1384 * returns -EIO if we had too many failures
1385 */
1386static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1387{
1388	unsigned long flags;
1389	int ret = 0;
1390
1391	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1392
1393	/* we already know this stripe is bad, move on */
1394	if (rbio->faila == failed || rbio->failb == failed)
1395		goto out;
1396
1397	if (rbio->faila == -1) {
1398		/* first failure on this rbio */
1399		rbio->faila = failed;
1400		atomic_inc(&rbio->error);
1401	} else if (rbio->failb == -1) {
1402		/* second failure on this rbio */
1403		rbio->failb = failed;
1404		atomic_inc(&rbio->error);
1405	} else {
1406		ret = -EIO;
1407	}
1408out:
1409	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1410
1411	return ret;
1412}
1413
1414/*
1415 * helper to fail a stripe based on a physical disk
1416 * bio.
1417 */
1418static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1419			   struct bio *bio)
1420{
1421	int failed = find_bio_stripe(rbio, bio);
1422
1423	if (failed < 0)
1424		return -EIO;
1425
1426	return fail_rbio_index(rbio, failed);
1427}
1428
1429/*
1430 * this sets each page in the bio uptodate.  It should only be used on private
1431 * rbio pages, nothing that comes in from the higher layers
1432 */
1433static void set_bio_pages_uptodate(struct bio *bio)
1434{
1435	struct bio_vec *bvec;
1436	int i;
1437
1438	bio_for_each_segment_all(bvec, bio, i)
1439		SetPageUptodate(bvec->bv_page);
1440}
1441
1442/*
1443 * end io for the read phase of the rmw cycle.  All the bios here are physical
1444 * stripe bios we've read from the disk so we can recalculate the parity of the
1445 * stripe.
1446 *
1447 * This will usually kick off finish_rmw once all the bios are read in, but it
1448 * may trigger parity reconstruction if we had any errors along the way
1449 */
1450static void raid_rmw_end_io(struct bio *bio)
1451{
1452	struct btrfs_raid_bio *rbio = bio->bi_private;
1453
1454	if (bio->bi_error)
1455		fail_bio_stripe(rbio, bio);
1456	else
1457		set_bio_pages_uptodate(bio);
1458
1459	bio_put(bio);
1460
1461	if (!atomic_dec_and_test(&rbio->stripes_pending))
1462		return;
1463
1464	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1465		goto cleanup;
1466
1467	/*
1468	 * this will normally call finish_rmw to start our write
1469	 * but if there are any failed stripes we'll reconstruct
1470	 * from parity first
1471	 */
1472	validate_rbio_for_rmw(rbio);
1473	return;
1474
1475cleanup:
1476
1477	rbio_orig_end_io(rbio, -EIO);
1478}
1479
1480static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1481{
1482	btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1483	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1484}
1485
1486static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1487{
1488	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1489			read_rebuild_work, NULL, NULL);
1490
1491	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1492}
1493
1494/*
1495 * the stripe must be locked by the caller.  It will
1496 * unlock after all the writes are done
1497 */
1498static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1499{
1500	int bios_to_read = 0;
1501	struct bio_list bio_list;
1502	int ret;
1503	int pagenr;
1504	int stripe;
1505	struct bio *bio;
1506
1507	bio_list_init(&bio_list);
1508
1509	ret = alloc_rbio_pages(rbio);
1510	if (ret)
1511		goto cleanup;
1512
1513	index_rbio_pages(rbio);
1514
1515	atomic_set(&rbio->error, 0);
1516	/*
1517	 * build a list of bios to read all the missing parts of this
1518	 * stripe
1519	 */
1520	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1521		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1522			struct page *page;
1523			/*
1524			 * we want to find all the pages missing from
1525			 * the rbio and read them from the disk.  If
1526			 * page_in_rbio finds a page in the bio list
1527			 * we don't need to read it off the stripe.
1528			 */
1529			page = page_in_rbio(rbio, stripe, pagenr, 1);
1530			if (page)
1531				continue;
1532
1533			page = rbio_stripe_page(rbio, stripe, pagenr);
1534			/*
1535			 * the bio cache may have handed us an uptodate
1536			 * page.  If so, be happy and use it
1537			 */
1538			if (PageUptodate(page))
1539				continue;
1540
1541			ret = rbio_add_io_page(rbio, &bio_list, page,
1542				       stripe, pagenr, rbio->stripe_len);
1543			if (ret)
1544				goto cleanup;
1545		}
1546	}
1547
1548	bios_to_read = bio_list_size(&bio_list);
1549	if (!bios_to_read) {
1550		/*
1551		 * this can happen if others have merged with
1552		 * us, it means there is nothing left to read.
1553		 * But if there are missing devices it may not be
1554		 * safe to do the full stripe write yet.
1555		 */
1556		goto finish;
1557	}
1558
1559	/*
1560	 * the bbio may be freed once we submit the last bio.  Make sure
1561	 * not to touch it after that
1562	 */
1563	atomic_set(&rbio->stripes_pending, bios_to_read);
1564	while (1) {
1565		bio = bio_list_pop(&bio_list);
1566		if (!bio)
1567			break;
1568
1569		bio->bi_private = rbio;
1570		bio->bi_end_io = raid_rmw_end_io;
1571		bio_set_op_attrs(bio, REQ_OP_READ, 0);
1572
1573		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1574
1575		submit_bio(bio);
1576	}
1577	/* the actual write will happen once the reads are done */
1578	return 0;
1579
1580cleanup:
1581	rbio_orig_end_io(rbio, -EIO);
1582	return -EIO;
1583
1584finish:
1585	validate_rbio_for_rmw(rbio);
1586	return 0;
1587}
1588
1589/*
1590 * if the upper layers pass in a full stripe, we thank them by only allocating
1591 * enough pages to hold the parity, and sending it all down quickly.
1592 */
1593static int full_stripe_write(struct btrfs_raid_bio *rbio)
1594{
1595	int ret;
1596
1597	ret = alloc_rbio_parity_pages(rbio);
1598	if (ret) {
1599		__free_raid_bio(rbio);
1600		return ret;
1601	}
1602
1603	ret = lock_stripe_add(rbio);
1604	if (ret == 0)
1605		finish_rmw(rbio);
1606	return 0;
1607}
1608
1609/*
1610 * partial stripe writes get handed over to async helpers.
1611 * We're really hoping to merge a few more writes into this
1612 * rbio before calculating new parity
1613 */
1614static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1615{
1616	int ret;
1617
1618	ret = lock_stripe_add(rbio);
1619	if (ret == 0)
1620		async_rmw_stripe(rbio);
1621	return 0;
1622}
1623
1624/*
1625 * sometimes while we were reading from the drive to
1626 * recalculate parity, enough new bios come into create
1627 * a full stripe.  So we do a check here to see if we can
1628 * go directly to finish_rmw
1629 */
1630static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1631{
1632	/* head off into rmw land if we don't have a full stripe */
1633	if (!rbio_is_full(rbio))
1634		return partial_stripe_write(rbio);
1635	return full_stripe_write(rbio);
1636}
1637
1638/*
1639 * We use plugging call backs to collect full stripes.
1640 * Any time we get a partial stripe write while plugged
1641 * we collect it into a list.  When the unplug comes down,
1642 * we sort the list by logical block number and merge
1643 * everything we can into the same rbios
1644 */
1645struct btrfs_plug_cb {
1646	struct blk_plug_cb cb;
1647	struct btrfs_fs_info *info;
1648	struct list_head rbio_list;
1649	struct btrfs_work work;
1650};
1651
1652/*
1653 * rbios on the plug list are sorted for easier merging.
1654 */
1655static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1656{
1657	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1658						 plug_list);
1659	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1660						 plug_list);
1661	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1662	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1663
1664	if (a_sector < b_sector)
1665		return -1;
1666	if (a_sector > b_sector)
1667		return 1;
1668	return 0;
1669}
1670
1671static void run_plug(struct btrfs_plug_cb *plug)
1672{
1673	struct btrfs_raid_bio *cur;
1674	struct btrfs_raid_bio *last = NULL;
1675
1676	/*
1677	 * sort our plug list then try to merge
1678	 * everything we can in hopes of creating full
1679	 * stripes.
1680	 */
1681	list_sort(NULL, &plug->rbio_list, plug_cmp);
1682	while (!list_empty(&plug->rbio_list)) {
1683		cur = list_entry(plug->rbio_list.next,
1684				 struct btrfs_raid_bio, plug_list);
1685		list_del_init(&cur->plug_list);
1686
1687		if (rbio_is_full(cur)) {
1688			/* we have a full stripe, send it down */
1689			full_stripe_write(cur);
1690			continue;
1691		}
1692		if (last) {
1693			if (rbio_can_merge(last, cur)) {
1694				merge_rbio(last, cur);
1695				__free_raid_bio(cur);
1696				continue;
1697
1698			}
1699			__raid56_parity_write(last);
1700		}
1701		last = cur;
1702	}
1703	if (last) {
1704		__raid56_parity_write(last);
1705	}
1706	kfree(plug);
1707}
1708
1709/*
1710 * if the unplug comes from schedule, we have to push the
1711 * work off to a helper thread
1712 */
1713static void unplug_work(struct btrfs_work *work)
1714{
1715	struct btrfs_plug_cb *plug;
1716	plug = container_of(work, struct btrfs_plug_cb, work);
1717	run_plug(plug);
1718}
1719
1720static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1721{
1722	struct btrfs_plug_cb *plug;
1723	plug = container_of(cb, struct btrfs_plug_cb, cb);
1724
1725	if (from_schedule) {
1726		btrfs_init_work(&plug->work, btrfs_rmw_helper,
1727				unplug_work, NULL, NULL);
1728		btrfs_queue_work(plug->info->rmw_workers,
1729				 &plug->work);
1730		return;
1731	}
1732	run_plug(plug);
1733}
1734
1735/*
1736 * our main entry point for writes from the rest of the FS.
1737 */
1738int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1739			struct btrfs_bio *bbio, u64 stripe_len)
1740{
1741	struct btrfs_raid_bio *rbio;
1742	struct btrfs_plug_cb *plug = NULL;
1743	struct blk_plug_cb *cb;
1744	int ret;
1745
1746	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1747	if (IS_ERR(rbio)) {
1748		btrfs_put_bbio(bbio);
1749		return PTR_ERR(rbio);
1750	}
1751	bio_list_add(&rbio->bio_list, bio);
1752	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1753	rbio->operation = BTRFS_RBIO_WRITE;
1754
1755	btrfs_bio_counter_inc_noblocked(fs_info);
1756	rbio->generic_bio_cnt = 1;
1757
1758	/*
1759	 * don't plug on full rbios, just get them out the door
1760	 * as quickly as we can
1761	 */
1762	if (rbio_is_full(rbio)) {
1763		ret = full_stripe_write(rbio);
1764		if (ret)
1765			btrfs_bio_counter_dec(fs_info);
1766		return ret;
1767	}
1768
1769	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1770	if (cb) {
1771		plug = container_of(cb, struct btrfs_plug_cb, cb);
1772		if (!plug->info) {
1773			plug->info = fs_info;
1774			INIT_LIST_HEAD(&plug->rbio_list);
1775		}
1776		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1777		ret = 0;
1778	} else {
1779		ret = __raid56_parity_write(rbio);
1780		if (ret)
1781			btrfs_bio_counter_dec(fs_info);
1782	}
1783	return ret;
1784}
1785
1786/*
1787 * all parity reconstruction happens here.  We've read in everything
1788 * we can find from the drives and this does the heavy lifting of
1789 * sorting the good from the bad.
1790 */
1791static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1792{
1793	int pagenr, stripe;
1794	void **pointers;
1795	int faila = -1, failb = -1;
1796	struct page *page;
1797	int err;
1798	int i;
1799
1800	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1801	if (!pointers) {
1802		err = -ENOMEM;
1803		goto cleanup_io;
1804	}
1805
1806	faila = rbio->faila;
1807	failb = rbio->failb;
1808
1809	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1810	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1811		spin_lock_irq(&rbio->bio_list_lock);
1812		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1813		spin_unlock_irq(&rbio->bio_list_lock);
1814	}
1815
1816	index_rbio_pages(rbio);
1817
1818	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1819		/*
1820		 * Now we just use bitmap to mark the horizontal stripes in
1821		 * which we have data when doing parity scrub.
1822		 */
1823		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1824		    !test_bit(pagenr, rbio->dbitmap))
1825			continue;
1826
1827		/* setup our array of pointers with pages
1828		 * from each stripe
1829		 */
1830		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1831			/*
1832			 * if we're rebuilding a read, we have to use
1833			 * pages from the bio list
1834			 */
1835			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1836			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1837			    (stripe == faila || stripe == failb)) {
1838				page = page_in_rbio(rbio, stripe, pagenr, 0);
1839			} else {
1840				page = rbio_stripe_page(rbio, stripe, pagenr);
1841			}
1842			pointers[stripe] = kmap(page);
1843		}
1844
1845		/* all raid6 handling here */
1846		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1847			/*
1848			 * single failure, rebuild from parity raid5
1849			 * style
1850			 */
1851			if (failb < 0) {
1852				if (faila == rbio->nr_data) {
1853					/*
1854					 * Just the P stripe has failed, without
1855					 * a bad data or Q stripe.
1856					 * TODO, we should redo the xor here.
1857					 */
1858					err = -EIO;
1859					goto cleanup;
1860				}
1861				/*
1862				 * a single failure in raid6 is rebuilt
1863				 * in the pstripe code below
1864				 */
1865				goto pstripe;
1866			}
1867
1868			/* make sure our ps and qs are in order */
1869			if (faila > failb) {
1870				int tmp = failb;
1871				failb = faila;
1872				faila = tmp;
1873			}
1874
1875			/* if the q stripe is failed, do a pstripe reconstruction
1876			 * from the xors.
1877			 * If both the q stripe and the P stripe are failed, we're
1878			 * here due to a crc mismatch and we can't give them the
1879			 * data they want
1880			 */
1881			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1882				if (rbio->bbio->raid_map[faila] ==
1883				    RAID5_P_STRIPE) {
1884					err = -EIO;
1885					goto cleanup;
1886				}
1887				/*
1888				 * otherwise we have one bad data stripe and
1889				 * a good P stripe.  raid5!
1890				 */
1891				goto pstripe;
1892			}
1893
1894			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1895				raid6_datap_recov(rbio->real_stripes,
1896						  PAGE_SIZE, faila, pointers);
1897			} else {
1898				raid6_2data_recov(rbio->real_stripes,
1899						  PAGE_SIZE, faila, failb,
1900						  pointers);
1901			}
1902		} else {
1903			void *p;
1904
1905			/* rebuild from P stripe here (raid5 or raid6) */
1906			BUG_ON(failb != -1);
1907pstripe:
1908			/* Copy parity block into failed block to start with */
1909			memcpy(pointers[faila],
1910			       pointers[rbio->nr_data],
1911			       PAGE_SIZE);
1912
1913			/* rearrange the pointer array */
1914			p = pointers[faila];
1915			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1916				pointers[stripe] = pointers[stripe + 1];
1917			pointers[rbio->nr_data - 1] = p;
1918
1919			/* xor in the rest */
1920			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1921		}
1922		/* if we're doing this rebuild as part of an rmw, go through
1923		 * and set all of our private rbio pages in the
1924		 * failed stripes as uptodate.  This way finish_rmw will
1925		 * know they can be trusted.  If this was a read reconstruction,
1926		 * other endio functions will fiddle the uptodate bits
1927		 */
1928		if (rbio->operation == BTRFS_RBIO_WRITE) {
1929			for (i = 0;  i < rbio->stripe_npages; i++) {
1930				if (faila != -1) {
1931					page = rbio_stripe_page(rbio, faila, i);
1932					SetPageUptodate(page);
1933				}
1934				if (failb != -1) {
1935					page = rbio_stripe_page(rbio, failb, i);
1936					SetPageUptodate(page);
1937				}
1938			}
1939		}
1940		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1941			/*
1942			 * if we're rebuilding a read, we have to use
1943			 * pages from the bio list
1944			 */
1945			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1946			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1947			    (stripe == faila || stripe == failb)) {
1948				page = page_in_rbio(rbio, stripe, pagenr, 0);
1949			} else {
1950				page = rbio_stripe_page(rbio, stripe, pagenr);
1951			}
1952			kunmap(page);
1953		}
1954	}
1955
1956	err = 0;
1957cleanup:
1958	kfree(pointers);
1959
1960cleanup_io:
1961	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1962		if (err == 0)
1963			cache_rbio_pages(rbio);
1964		else
1965			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1966
1967		rbio_orig_end_io(rbio, err);
1968	} else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1969		rbio_orig_end_io(rbio, err);
1970	} else if (err == 0) {
1971		rbio->faila = -1;
1972		rbio->failb = -1;
1973
1974		if (rbio->operation == BTRFS_RBIO_WRITE)
1975			finish_rmw(rbio);
1976		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1977			finish_parity_scrub(rbio, 0);
1978		else
1979			BUG();
1980	} else {
1981		rbio_orig_end_io(rbio, err);
1982	}
1983}
1984
1985/*
1986 * This is called only for stripes we've read from disk to
1987 * reconstruct the parity.
1988 */
1989static void raid_recover_end_io(struct bio *bio)
1990{
1991	struct btrfs_raid_bio *rbio = bio->bi_private;
1992
1993	/*
1994	 * we only read stripe pages off the disk, set them
1995	 * up to date if there were no errors
1996	 */
1997	if (bio->bi_error)
1998		fail_bio_stripe(rbio, bio);
1999	else
2000		set_bio_pages_uptodate(bio);
2001	bio_put(bio);
2002
2003	if (!atomic_dec_and_test(&rbio->stripes_pending))
2004		return;
2005
2006	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2007		rbio_orig_end_io(rbio, -EIO);
2008	else
2009		__raid_recover_end_io(rbio);
2010}
2011
2012/*
2013 * reads everything we need off the disk to reconstruct
2014 * the parity. endio handlers trigger final reconstruction
2015 * when the IO is done.
2016 *
2017 * This is used both for reads from the higher layers and for
2018 * parity construction required to finish a rmw cycle.
2019 */
2020static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2021{
2022	int bios_to_read = 0;
2023	struct bio_list bio_list;
2024	int ret;
2025	int pagenr;
2026	int stripe;
2027	struct bio *bio;
2028
2029	bio_list_init(&bio_list);
2030
2031	ret = alloc_rbio_pages(rbio);
2032	if (ret)
2033		goto cleanup;
2034
2035	atomic_set(&rbio->error, 0);
2036
2037	/*
2038	 * read everything that hasn't failed.  Thanks to the
2039	 * stripe cache, it is possible that some or all of these
2040	 * pages are going to be uptodate.
2041	 */
2042	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2043		if (rbio->faila == stripe || rbio->failb == stripe) {
2044			atomic_inc(&rbio->error);
2045			continue;
2046		}
2047
2048		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2049			struct page *p;
2050
2051			/*
2052			 * the rmw code may have already read this
2053			 * page in
2054			 */
2055			p = rbio_stripe_page(rbio, stripe, pagenr);
2056			if (PageUptodate(p))
2057				continue;
2058
2059			ret = rbio_add_io_page(rbio, &bio_list,
2060				       rbio_stripe_page(rbio, stripe, pagenr),
2061				       stripe, pagenr, rbio->stripe_len);
2062			if (ret < 0)
2063				goto cleanup;
2064		}
2065	}
2066
2067	bios_to_read = bio_list_size(&bio_list);
2068	if (!bios_to_read) {
2069		/*
2070		 * we might have no bios to read just because the pages
2071		 * were up to date, or we might have no bios to read because
2072		 * the devices were gone.
2073		 */
2074		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2075			__raid_recover_end_io(rbio);
2076			goto out;
2077		} else {
2078			goto cleanup;
2079		}
2080	}
2081
2082	/*
2083	 * the bbio may be freed once we submit the last bio.  Make sure
2084	 * not to touch it after that
2085	 */
2086	atomic_set(&rbio->stripes_pending, bios_to_read);
2087	while (1) {
2088		bio = bio_list_pop(&bio_list);
2089		if (!bio)
2090			break;
2091
2092		bio->bi_private = rbio;
2093		bio->bi_end_io = raid_recover_end_io;
2094		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2095
2096		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2097
2098		submit_bio(bio);
2099	}
2100out:
2101	return 0;
2102
2103cleanup:
2104	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2105	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2106		rbio_orig_end_io(rbio, -EIO);
2107	return -EIO;
2108}
2109
2110/*
2111 * the main entry point for reads from the higher layers.  This
2112 * is really only called when the normal read path had a failure,
2113 * so we assume the bio they send down corresponds to a failed part
2114 * of the drive.
2115 */
2116int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2117			  struct btrfs_bio *bbio, u64 stripe_len,
2118			  int mirror_num, int generic_io)
2119{
2120	struct btrfs_raid_bio *rbio;
2121	int ret;
2122
2123	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2124	if (IS_ERR(rbio)) {
2125		if (generic_io)
2126			btrfs_put_bbio(bbio);
2127		return PTR_ERR(rbio);
2128	}
2129
2130	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2131	bio_list_add(&rbio->bio_list, bio);
2132	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2133
2134	rbio->faila = find_logical_bio_stripe(rbio, bio);
2135	if (rbio->faila == -1) {
2136		btrfs_warn(fs_info,
2137	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2138			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2139			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2140		if (generic_io)
2141			btrfs_put_bbio(bbio);
2142		kfree(rbio);
2143		return -EIO;
2144	}
2145
2146	if (generic_io) {
2147		btrfs_bio_counter_inc_noblocked(fs_info);
2148		rbio->generic_bio_cnt = 1;
2149	} else {
2150		btrfs_get_bbio(bbio);
2151	}
2152
2153	/*
2154	 * reconstruct from the q stripe if they are
2155	 * asking for mirror 3
2156	 */
2157	if (mirror_num == 3)
2158		rbio->failb = rbio->real_stripes - 2;
2159
2160	ret = lock_stripe_add(rbio);
2161
2162	/*
2163	 * __raid56_parity_recover will end the bio with
2164	 * any errors it hits.  We don't want to return
2165	 * its error value up the stack because our caller
2166	 * will end up calling bio_endio with any nonzero
2167	 * return
2168	 */
2169	if (ret == 0)
2170		__raid56_parity_recover(rbio);
2171	/*
2172	 * our rbio has been added to the list of
2173	 * rbios that will be handled after the
2174	 * currently lock owner is done
2175	 */
2176	return 0;
2177
2178}
2179
2180static void rmw_work(struct btrfs_work *work)
2181{
2182	struct btrfs_raid_bio *rbio;
2183
2184	rbio = container_of(work, struct btrfs_raid_bio, work);
2185	raid56_rmw_stripe(rbio);
2186}
2187
2188static void read_rebuild_work(struct btrfs_work *work)
2189{
2190	struct btrfs_raid_bio *rbio;
2191
2192	rbio = container_of(work, struct btrfs_raid_bio, work);
2193	__raid56_parity_recover(rbio);
2194}
2195
2196/*
2197 * The following code is used to scrub/replace the parity stripe
2198 *
2199 * Note: We need make sure all the pages that add into the scrub/replace
2200 * raid bio are correct and not be changed during the scrub/replace. That
2201 * is those pages just hold metadata or file data with checksum.
2202 */
2203
2204struct btrfs_raid_bio *
2205raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2206			       struct btrfs_bio *bbio, u64 stripe_len,
2207			       struct btrfs_device *scrub_dev,
2208			       unsigned long *dbitmap, int stripe_nsectors)
2209{
2210	struct btrfs_raid_bio *rbio;
2211	int i;
2212
2213	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2214	if (IS_ERR(rbio))
2215		return NULL;
2216	bio_list_add(&rbio->bio_list, bio);
2217	/*
2218	 * This is a special bio which is used to hold the completion handler
2219	 * and make the scrub rbio is similar to the other types
2220	 */
2221	ASSERT(!bio->bi_iter.bi_size);
2222	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2223
2224	for (i = 0; i < rbio->real_stripes; i++) {
2225		if (bbio->stripes[i].dev == scrub_dev) {
2226			rbio->scrubp = i;
2227			break;
2228		}
2229	}
2230
2231	/* Now we just support the sectorsize equals to page size */
2232	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2233	ASSERT(rbio->stripe_npages == stripe_nsectors);
2234	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2235
2236	return rbio;
2237}
2238
2239/* Used for both parity scrub and missing. */
2240void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2241			    u64 logical)
2242{
2243	int stripe_offset;
2244	int index;
2245
2246	ASSERT(logical >= rbio->bbio->raid_map[0]);
2247	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2248				rbio->stripe_len * rbio->nr_data);
2249	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2250	index = stripe_offset >> PAGE_SHIFT;
2251	rbio->bio_pages[index] = page;
2252}
2253
2254/*
2255 * We just scrub the parity that we have correct data on the same horizontal,
2256 * so we needn't allocate all pages for all the stripes.
2257 */
2258static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2259{
2260	int i;
2261	int bit;
2262	int index;
2263	struct page *page;
2264
2265	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2266		for (i = 0; i < rbio->real_stripes; i++) {
2267			index = i * rbio->stripe_npages + bit;
2268			if (rbio->stripe_pages[index])
2269				continue;
2270
2271			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2272			if (!page)
2273				return -ENOMEM;
2274			rbio->stripe_pages[index] = page;
2275		}
2276	}
2277	return 0;
2278}
2279
2280static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2281					 int need_check)
2282{
2283	struct btrfs_bio *bbio = rbio->bbio;
2284	void *pointers[rbio->real_stripes];
2285	DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2286	int nr_data = rbio->nr_data;
2287	int stripe;
2288	int pagenr;
2289	int p_stripe = -1;
2290	int q_stripe = -1;
2291	struct page *p_page = NULL;
2292	struct page *q_page = NULL;
2293	struct bio_list bio_list;
2294	struct bio *bio;
2295	int is_replace = 0;
2296	int ret;
2297
2298	bio_list_init(&bio_list);
2299
2300	if (rbio->real_stripes - rbio->nr_data == 1) {
2301		p_stripe = rbio->real_stripes - 1;
2302	} else if (rbio->real_stripes - rbio->nr_data == 2) {
2303		p_stripe = rbio->real_stripes - 2;
2304		q_stripe = rbio->real_stripes - 1;
2305	} else {
2306		BUG();
2307	}
2308
2309	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2310		is_replace = 1;
2311		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2312	}
2313
2314	/*
2315	 * Because the higher layers(scrubber) are unlikely to
2316	 * use this area of the disk again soon, so don't cache
2317	 * it.
2318	 */
2319	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2320
2321	if (!need_check)
2322		goto writeback;
2323
2324	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2325	if (!p_page)
2326		goto cleanup;
2327	SetPageUptodate(p_page);
2328
2329	if (q_stripe != -1) {
2330		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2331		if (!q_page) {
2332			__free_page(p_page);
2333			goto cleanup;
2334		}
2335		SetPageUptodate(q_page);
2336	}
2337
2338	atomic_set(&rbio->error, 0);
2339
2340	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2341		struct page *p;
2342		void *parity;
2343		/* first collect one page from each data stripe */
2344		for (stripe = 0; stripe < nr_data; stripe++) {
2345			p = page_in_rbio(rbio, stripe, pagenr, 0);
2346			pointers[stripe] = kmap(p);
2347		}
2348
2349		/* then add the parity stripe */
2350		pointers[stripe++] = kmap(p_page);
2351
2352		if (q_stripe != -1) {
2353
2354			/*
2355			 * raid6, add the qstripe and call the
2356			 * library function to fill in our p/q
2357			 */
2358			pointers[stripe++] = kmap(q_page);
2359
2360			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2361						pointers);
2362		} else {
2363			/* raid5 */
2364			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2365			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2366		}
2367
2368		/* Check scrubbing parity and repair it */
2369		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2370		parity = kmap(p);
2371		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2372			memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2373		else
2374			/* Parity is right, needn't writeback */
2375			bitmap_clear(rbio->dbitmap, pagenr, 1);
2376		kunmap(p);
2377
2378		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2379			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2380	}
2381
2382	__free_page(p_page);
2383	if (q_page)
2384		__free_page(q_page);
2385
2386writeback:
2387	/*
2388	 * time to start writing.  Make bios for everything from the
2389	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2390	 * everything else.
2391	 */
2392	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2393		struct page *page;
2394
2395		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2396		ret = rbio_add_io_page(rbio, &bio_list,
2397			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2398		if (ret)
2399			goto cleanup;
2400	}
2401
2402	if (!is_replace)
2403		goto submit_write;
2404
2405	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2406		struct page *page;
2407
2408		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2409		ret = rbio_add_io_page(rbio, &bio_list, page,
2410				       bbio->tgtdev_map[rbio->scrubp],
2411				       pagenr, rbio->stripe_len);
2412		if (ret)
2413			goto cleanup;
2414	}
2415
2416submit_write:
2417	nr_data = bio_list_size(&bio_list);
2418	if (!nr_data) {
2419		/* Every parity is right */
2420		rbio_orig_end_io(rbio, 0);
2421		return;
2422	}
2423
2424	atomic_set(&rbio->stripes_pending, nr_data);
2425
2426	while (1) {
2427		bio = bio_list_pop(&bio_list);
2428		if (!bio)
2429			break;
2430
2431		bio->bi_private = rbio;
2432		bio->bi_end_io = raid_write_end_io;
2433		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2434
2435		submit_bio(bio);
2436	}
2437	return;
2438
2439cleanup:
2440	rbio_orig_end_io(rbio, -EIO);
2441}
2442
2443static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2444{
2445	if (stripe >= 0 && stripe < rbio->nr_data)
2446		return 1;
2447	return 0;
2448}
2449
2450/*
2451 * While we're doing the parity check and repair, we could have errors
2452 * in reading pages off the disk.  This checks for errors and if we're
2453 * not able to read the page it'll trigger parity reconstruction.  The
2454 * parity scrub will be finished after we've reconstructed the failed
2455 * stripes
2456 */
2457static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2458{
2459	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2460		goto cleanup;
2461
2462	if (rbio->faila >= 0 || rbio->failb >= 0) {
2463		int dfail = 0, failp = -1;
2464
2465		if (is_data_stripe(rbio, rbio->faila))
2466			dfail++;
2467		else if (is_parity_stripe(rbio->faila))
2468			failp = rbio->faila;
2469
2470		if (is_data_stripe(rbio, rbio->failb))
2471			dfail++;
2472		else if (is_parity_stripe(rbio->failb))
2473			failp = rbio->failb;
2474
2475		/*
2476		 * Because we can not use a scrubbing parity to repair
2477		 * the data, so the capability of the repair is declined.
2478		 * (In the case of RAID5, we can not repair anything)
2479		 */
2480		if (dfail > rbio->bbio->max_errors - 1)
2481			goto cleanup;
2482
2483		/*
2484		 * If all data is good, only parity is correctly, just
2485		 * repair the parity.
2486		 */
2487		if (dfail == 0) {
2488			finish_parity_scrub(rbio, 0);
2489			return;
2490		}
2491
2492		/*
2493		 * Here means we got one corrupted data stripe and one
2494		 * corrupted parity on RAID6, if the corrupted parity
2495		 * is scrubbing parity, luckily, use the other one to repair
2496		 * the data, or we can not repair the data stripe.
2497		 */
2498		if (failp != rbio->scrubp)
2499			goto cleanup;
2500
2501		__raid_recover_end_io(rbio);
2502	} else {
2503		finish_parity_scrub(rbio, 1);
2504	}
2505	return;
2506
2507cleanup:
2508	rbio_orig_end_io(rbio, -EIO);
2509}
2510
2511/*
2512 * end io for the read phase of the rmw cycle.  All the bios here are physical
2513 * stripe bios we've read from the disk so we can recalculate the parity of the
2514 * stripe.
2515 *
2516 * This will usually kick off finish_rmw once all the bios are read in, but it
2517 * may trigger parity reconstruction if we had any errors along the way
2518 */
2519static void raid56_parity_scrub_end_io(struct bio *bio)
2520{
2521	struct btrfs_raid_bio *rbio = bio->bi_private;
2522
2523	if (bio->bi_error)
2524		fail_bio_stripe(rbio, bio);
2525	else
2526		set_bio_pages_uptodate(bio);
2527
2528	bio_put(bio);
2529
2530	if (!atomic_dec_and_test(&rbio->stripes_pending))
2531		return;
2532
2533	/*
2534	 * this will normally call finish_rmw to start our write
2535	 * but if there are any failed stripes we'll reconstruct
2536	 * from parity first
2537	 */
2538	validate_rbio_for_parity_scrub(rbio);
2539}
2540
2541static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2542{
2543	int bios_to_read = 0;
2544	struct bio_list bio_list;
2545	int ret;
2546	int pagenr;
2547	int stripe;
2548	struct bio *bio;
2549
2550	ret = alloc_rbio_essential_pages(rbio);
2551	if (ret)
2552		goto cleanup;
2553
2554	bio_list_init(&bio_list);
2555
2556	atomic_set(&rbio->error, 0);
2557	/*
2558	 * build a list of bios to read all the missing parts of this
2559	 * stripe
2560	 */
2561	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2562		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2563			struct page *page;
2564			/*
2565			 * we want to find all the pages missing from
2566			 * the rbio and read them from the disk.  If
2567			 * page_in_rbio finds a page in the bio list
2568			 * we don't need to read it off the stripe.
2569			 */
2570			page = page_in_rbio(rbio, stripe, pagenr, 1);
2571			if (page)
2572				continue;
2573
2574			page = rbio_stripe_page(rbio, stripe, pagenr);
2575			/*
2576			 * the bio cache may have handed us an uptodate
2577			 * page.  If so, be happy and use it
2578			 */
2579			if (PageUptodate(page))
2580				continue;
2581
2582			ret = rbio_add_io_page(rbio, &bio_list, page,
2583				       stripe, pagenr, rbio->stripe_len);
2584			if (ret)
2585				goto cleanup;
2586		}
2587	}
2588
2589	bios_to_read = bio_list_size(&bio_list);
2590	if (!bios_to_read) {
2591		/*
2592		 * this can happen if others have merged with
2593		 * us, it means there is nothing left to read.
2594		 * But if there are missing devices it may not be
2595		 * safe to do the full stripe write yet.
2596		 */
2597		goto finish;
2598	}
2599
2600	/*
2601	 * the bbio may be freed once we submit the last bio.  Make sure
2602	 * not to touch it after that
2603	 */
2604	atomic_set(&rbio->stripes_pending, bios_to_read);
2605	while (1) {
2606		bio = bio_list_pop(&bio_list);
2607		if (!bio)
2608			break;
2609
2610		bio->bi_private = rbio;
2611		bio->bi_end_io = raid56_parity_scrub_end_io;
2612		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2613
2614		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2615
2616		submit_bio(bio);
2617	}
2618	/* the actual write will happen once the reads are done */
2619	return;
2620
2621cleanup:
2622	rbio_orig_end_io(rbio, -EIO);
2623	return;
2624
2625finish:
2626	validate_rbio_for_parity_scrub(rbio);
2627}
2628
2629static void scrub_parity_work(struct btrfs_work *work)
2630{
2631	struct btrfs_raid_bio *rbio;
2632
2633	rbio = container_of(work, struct btrfs_raid_bio, work);
2634	raid56_parity_scrub_stripe(rbio);
2635}
2636
2637static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2638{
2639	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2640			scrub_parity_work, NULL, NULL);
2641
2642	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2643}
2644
2645void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2646{
2647	if (!lock_stripe_add(rbio))
2648		async_scrub_parity(rbio);
2649}
2650
2651/* The following code is used for dev replace of a missing RAID 5/6 device. */
2652
2653struct btrfs_raid_bio *
2654raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2655			  struct btrfs_bio *bbio, u64 length)
2656{
2657	struct btrfs_raid_bio *rbio;
2658
2659	rbio = alloc_rbio(fs_info, bbio, length);
2660	if (IS_ERR(rbio))
2661		return NULL;
2662
2663	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2664	bio_list_add(&rbio->bio_list, bio);
2665	/*
2666	 * This is a special bio which is used to hold the completion handler
2667	 * and make the scrub rbio is similar to the other types
2668	 */
2669	ASSERT(!bio->bi_iter.bi_size);
2670
2671	rbio->faila = find_logical_bio_stripe(rbio, bio);
2672	if (rbio->faila == -1) {
2673		BUG();
2674		kfree(rbio);
2675		return NULL;
2676	}
2677
2678	return rbio;
2679}
2680
2681static void missing_raid56_work(struct btrfs_work *work)
2682{
2683	struct btrfs_raid_bio *rbio;
2684
2685	rbio = container_of(work, struct btrfs_raid_bio, work);
2686	__raid56_parity_recover(rbio);
2687}
2688
2689static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2690{
2691	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2692			missing_raid56_work, NULL, NULL);
2693
2694	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2695}
2696
2697void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2698{
2699	if (!lock_stripe_add(rbio))
2700		async_missing_raid56(rbio);
2701}