Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2012 Fusion-io  All rights reserved.
   4 * Copyright (C) 2012 Intel Corp. All rights reserved.
   5 */
   6
   7#include <linux/sched.h>
 
   8#include <linux/bio.h>
   9#include <linux/slab.h>
 
  10#include <linux/blkdev.h>
 
 
 
 
 
  11#include <linux/raid/pq.h>
  12#include <linux/hash.h>
  13#include <linux/list_sort.h>
  14#include <linux/raid/xor.h>
  15#include <linux/mm.h>
 
  16#include "ctree.h"
 
  17#include "disk-io.h"
 
 
  18#include "volumes.h"
  19#include "raid56.h"
  20#include "async-thread.h"
 
 
  21
  22/* set when additional merges to this rbio are not allowed */
  23#define RBIO_RMW_LOCKED_BIT	1
  24
  25/*
  26 * set when this rbio is sitting in the hash, but it is just a cache
  27 * of past RMW
  28 */
  29#define RBIO_CACHE_BIT		2
  30
  31/*
  32 * set when it is safe to trust the stripe_pages for caching
  33 */
  34#define RBIO_CACHE_READY_BIT	3
  35
  36#define RBIO_CACHE_SIZE 1024
  37
  38#define BTRFS_STRIPE_HASH_TABLE_BITS				11
  39
  40/* Used by the raid56 code to lock stripes for read/modify/write */
  41struct btrfs_stripe_hash {
  42	struct list_head hash_list;
  43	spinlock_t lock;
  44};
  45
  46/* Used by the raid56 code to lock stripes for read/modify/write */
  47struct btrfs_stripe_hash_table {
  48	struct list_head stripe_cache;
  49	spinlock_t cache_lock;
  50	int cache_size;
  51	struct btrfs_stripe_hash table[];
  52};
  53
  54enum btrfs_rbio_ops {
  55	BTRFS_RBIO_WRITE,
  56	BTRFS_RBIO_READ_REBUILD,
  57	BTRFS_RBIO_PARITY_SCRUB,
  58	BTRFS_RBIO_REBUILD_MISSING,
  59};
  60
  61struct btrfs_raid_bio {
  62	struct btrfs_fs_info *fs_info;
  63	struct btrfs_bio *bbio;
  64
  65	/* while we're doing rmw on a stripe
  66	 * we put it into a hash table so we can
  67	 * lock the stripe and merge more rbios
  68	 * into it.
  69	 */
  70	struct list_head hash_list;
  71
  72	/*
  73	 * LRU list for the stripe cache
  74	 */
  75	struct list_head stripe_cache;
  76
  77	/*
  78	 * for scheduling work in the helper threads
  79	 */
  80	struct btrfs_work work;
  81
  82	/*
  83	 * bio list and bio_list_lock are used
  84	 * to add more bios into the stripe
  85	 * in hopes of avoiding the full rmw
  86	 */
  87	struct bio_list bio_list;
  88	spinlock_t bio_list_lock;
  89
  90	/* also protected by the bio_list_lock, the
  91	 * plug list is used by the plugging code
  92	 * to collect partial bios while plugged.  The
  93	 * stripe locking code also uses it to hand off
  94	 * the stripe lock to the next pending IO
  95	 */
  96	struct list_head plug_list;
  97
  98	/*
  99	 * flags that tell us if it is safe to
 100	 * merge with this bio
 101	 */
 102	unsigned long flags;
 103
 104	/* size of each individual stripe on disk */
 105	int stripe_len;
 106
 107	/* number of data stripes (no p/q) */
 108	int nr_data;
 109
 110	int real_stripes;
 111
 112	int stripe_npages;
 113	/*
 114	 * set if we're doing a parity rebuild
 115	 * for a read from higher up, which is handled
 116	 * differently from a parity rebuild as part of
 117	 * rmw
 118	 */
 119	enum btrfs_rbio_ops operation;
 120
 121	/* first bad stripe */
 122	int faila;
 123
 124	/* second bad stripe (for raid6 use) */
 125	int failb;
 126
 127	int scrubp;
 128	/*
 129	 * number of pages needed to represent the full
 130	 * stripe
 131	 */
 132	int nr_pages;
 133
 134	/*
 135	 * size of all the bios in the bio_list.  This
 136	 * helps us decide if the rbio maps to a full
 137	 * stripe or not
 138	 */
 139	int bio_list_bytes;
 140
 141	int generic_bio_cnt;
 142
 143	refcount_t refs;
 144
 145	atomic_t stripes_pending;
 146
 147	atomic_t error;
 148	/*
 149	 * these are two arrays of pointers.  We allocate the
 150	 * rbio big enough to hold them both and setup their
 151	 * locations when the rbio is allocated
 152	 */
 153
 154	/* pointers to pages that we allocated for
 155	 * reading/writing stripes directly from the disk (including P/Q)
 156	 */
 157	struct page **stripe_pages;
 158
 159	/*
 160	 * pointers to the pages in the bio_list.  Stored
 161	 * here for faster lookup
 162	 */
 163	struct page **bio_pages;
 164
 165	/*
 166	 * bitmap to record which horizontal stripe has data
 167	 */
 168	unsigned long *dbitmap;
 169
 170	/* allocated with real_stripes-many pointers for finish_*() calls */
 171	void **finish_pointers;
 172
 173	/* allocated with stripe_npages-many bits for finish_*() calls */
 174	unsigned long *finish_pbitmap;
 175};
 176
 177static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
 178static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
 179static void rmw_work(struct btrfs_work *work);
 180static void read_rebuild_work(struct btrfs_work *work);
 
 
 181static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
 182static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
 183static void __free_raid_bio(struct btrfs_raid_bio *rbio);
 184static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 185static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 186
 187static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
 188					 int need_check);
 189static void scrub_parity_work(struct btrfs_work *work);
 190
 191static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
 192{
 193	btrfs_init_work(&rbio->work, work_func, NULL, NULL);
 194	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
 195}
 196
 197/*
 198 * the stripe hash table is used for locking, and to collect
 199 * bios in hopes of making a full stripe
 200 */
 201int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 202{
 203	struct btrfs_stripe_hash_table *table;
 204	struct btrfs_stripe_hash_table *x;
 205	struct btrfs_stripe_hash *cur;
 206	struct btrfs_stripe_hash *h;
 207	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 208	int i;
 
 209
 210	if (info->stripe_hash_table)
 211		return 0;
 212
 213	/*
 214	 * The table is large, starting with order 4 and can go as high as
 215	 * order 7 in case lock debugging is turned on.
 216	 *
 217	 * Try harder to allocate and fallback to vmalloc to lower the chance
 218	 * of a failing mount.
 219	 */
 220	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
 
 221	if (!table)
 222		return -ENOMEM;
 223
 224	spin_lock_init(&table->cache_lock);
 225	INIT_LIST_HEAD(&table->stripe_cache);
 226
 227	h = table->table;
 228
 229	for (i = 0; i < num_entries; i++) {
 230		cur = h + i;
 231		INIT_LIST_HEAD(&cur->hash_list);
 232		spin_lock_init(&cur->lock);
 233	}
 234
 235	x = cmpxchg(&info->stripe_hash_table, NULL, table);
 236	if (x)
 237		kvfree(x);
 238	return 0;
 239}
 240
 241/*
 242 * caching an rbio means to copy anything from the
 243 * bio_pages array into the stripe_pages array.  We
 244 * use the page uptodate bit in the stripe cache array
 245 * to indicate if it has valid data
 246 *
 247 * once the caching is done, we set the cache ready
 248 * bit.
 249 */
 250static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 251{
 252	int i;
 253	char *s;
 254	char *d;
 255	int ret;
 256
 257	ret = alloc_rbio_pages(rbio);
 258	if (ret)
 259		return;
 260
 261	for (i = 0; i < rbio->nr_pages; i++) {
 262		if (!rbio->bio_pages[i])
 263			continue;
 264
 265		s = kmap(rbio->bio_pages[i]);
 266		d = kmap(rbio->stripe_pages[i]);
 267
 268		copy_page(d, s);
 269
 270		kunmap(rbio->bio_pages[i]);
 271		kunmap(rbio->stripe_pages[i]);
 272		SetPageUptodate(rbio->stripe_pages[i]);
 273	}
 274	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 275}
 276
 277/*
 278 * we hash on the first logical address of the stripe
 279 */
 280static int rbio_bucket(struct btrfs_raid_bio *rbio)
 281{
 282	u64 num = rbio->bbio->raid_map[0];
 283
 284	/*
 285	 * we shift down quite a bit.  We're using byte
 286	 * addressing, and most of the lower bits are zeros.
 287	 * This tends to upset hash_64, and it consistently
 288	 * returns just one or two different values.
 289	 *
 290	 * shifting off the lower bits fixes things.
 291	 */
 292	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 293}
 294
 295/*
 296 * stealing an rbio means taking all the uptodate pages from the stripe
 297 * array in the source rbio and putting them into the destination rbio
 298 */
 299static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 300{
 301	int i;
 302	struct page *s;
 303	struct page *d;
 304
 305	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 306		return;
 307
 308	for (i = 0; i < dest->nr_pages; i++) {
 309		s = src->stripe_pages[i];
 310		if (!s || !PageUptodate(s)) {
 311			continue;
 312		}
 313
 314		d = dest->stripe_pages[i];
 315		if (d)
 316			__free_page(d);
 317
 318		dest->stripe_pages[i] = s;
 319		src->stripe_pages[i] = NULL;
 320	}
 321}
 322
 323/*
 324 * merging means we take the bio_list from the victim and
 325 * splice it into the destination.  The victim should
 326 * be discarded afterwards.
 327 *
 328 * must be called with dest->rbio_list_lock held
 329 */
 330static void merge_rbio(struct btrfs_raid_bio *dest,
 331		       struct btrfs_raid_bio *victim)
 332{
 333	bio_list_merge(&dest->bio_list, &victim->bio_list);
 334	dest->bio_list_bytes += victim->bio_list_bytes;
 335	dest->generic_bio_cnt += victim->generic_bio_cnt;
 336	bio_list_init(&victim->bio_list);
 337}
 338
 339/*
 340 * used to prune items that are in the cache.  The caller
 341 * must hold the hash table lock.
 342 */
 343static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 344{
 345	int bucket = rbio_bucket(rbio);
 346	struct btrfs_stripe_hash_table *table;
 347	struct btrfs_stripe_hash *h;
 348	int freeit = 0;
 349
 350	/*
 351	 * check the bit again under the hash table lock.
 352	 */
 353	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 354		return;
 355
 356	table = rbio->fs_info->stripe_hash_table;
 357	h = table->table + bucket;
 358
 359	/* hold the lock for the bucket because we may be
 360	 * removing it from the hash table
 361	 */
 362	spin_lock(&h->lock);
 363
 364	/*
 365	 * hold the lock for the bio list because we need
 366	 * to make sure the bio list is empty
 367	 */
 368	spin_lock(&rbio->bio_list_lock);
 369
 370	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 371		list_del_init(&rbio->stripe_cache);
 372		table->cache_size -= 1;
 373		freeit = 1;
 374
 375		/* if the bio list isn't empty, this rbio is
 376		 * still involved in an IO.  We take it out
 377		 * of the cache list, and drop the ref that
 378		 * was held for the list.
 379		 *
 380		 * If the bio_list was empty, we also remove
 381		 * the rbio from the hash_table, and drop
 382		 * the corresponding ref
 383		 */
 384		if (bio_list_empty(&rbio->bio_list)) {
 385			if (!list_empty(&rbio->hash_list)) {
 386				list_del_init(&rbio->hash_list);
 387				refcount_dec(&rbio->refs);
 388				BUG_ON(!list_empty(&rbio->plug_list));
 389			}
 390		}
 391	}
 392
 393	spin_unlock(&rbio->bio_list_lock);
 394	spin_unlock(&h->lock);
 395
 396	if (freeit)
 397		__free_raid_bio(rbio);
 398}
 399
 400/*
 401 * prune a given rbio from the cache
 402 */
 403static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 404{
 405	struct btrfs_stripe_hash_table *table;
 406	unsigned long flags;
 407
 408	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 409		return;
 410
 411	table = rbio->fs_info->stripe_hash_table;
 412
 413	spin_lock_irqsave(&table->cache_lock, flags);
 414	__remove_rbio_from_cache(rbio);
 415	spin_unlock_irqrestore(&table->cache_lock, flags);
 416}
 417
 418/*
 419 * remove everything in the cache
 420 */
 421static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 422{
 423	struct btrfs_stripe_hash_table *table;
 424	unsigned long flags;
 425	struct btrfs_raid_bio *rbio;
 426
 427	table = info->stripe_hash_table;
 428
 429	spin_lock_irqsave(&table->cache_lock, flags);
 430	while (!list_empty(&table->stripe_cache)) {
 431		rbio = list_entry(table->stripe_cache.next,
 432				  struct btrfs_raid_bio,
 433				  stripe_cache);
 434		__remove_rbio_from_cache(rbio);
 435	}
 436	spin_unlock_irqrestore(&table->cache_lock, flags);
 437}
 438
 439/*
 440 * remove all cached entries and free the hash table
 441 * used by unmount
 442 */
 443void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 444{
 445	if (!info->stripe_hash_table)
 446		return;
 447	btrfs_clear_rbio_cache(info);
 448	kvfree(info->stripe_hash_table);
 449	info->stripe_hash_table = NULL;
 450}
 451
 452/*
 453 * insert an rbio into the stripe cache.  It
 454 * must have already been prepared by calling
 455 * cache_rbio_pages
 456 *
 457 * If this rbio was already cached, it gets
 458 * moved to the front of the lru.
 459 *
 460 * If the size of the rbio cache is too big, we
 461 * prune an item.
 462 */
 463static void cache_rbio(struct btrfs_raid_bio *rbio)
 464{
 465	struct btrfs_stripe_hash_table *table;
 466	unsigned long flags;
 467
 468	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 469		return;
 470
 471	table = rbio->fs_info->stripe_hash_table;
 472
 473	spin_lock_irqsave(&table->cache_lock, flags);
 474	spin_lock(&rbio->bio_list_lock);
 475
 476	/* bump our ref if we were not in the list before */
 477	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 478		refcount_inc(&rbio->refs);
 479
 480	if (!list_empty(&rbio->stripe_cache)){
 481		list_move(&rbio->stripe_cache, &table->stripe_cache);
 482	} else {
 483		list_add(&rbio->stripe_cache, &table->stripe_cache);
 484		table->cache_size += 1;
 485	}
 486
 487	spin_unlock(&rbio->bio_list_lock);
 488
 489	if (table->cache_size > RBIO_CACHE_SIZE) {
 490		struct btrfs_raid_bio *found;
 491
 492		found = list_entry(table->stripe_cache.prev,
 493				  struct btrfs_raid_bio,
 494				  stripe_cache);
 495
 496		if (found != rbio)
 497			__remove_rbio_from_cache(found);
 498	}
 499
 500	spin_unlock_irqrestore(&table->cache_lock, flags);
 501}
 502
 503/*
 504 * helper function to run the xor_blocks api.  It is only
 505 * able to do MAX_XOR_BLOCKS at a time, so we need to
 506 * loop through.
 507 */
 508static void run_xor(void **pages, int src_cnt, ssize_t len)
 509{
 510	int src_off = 0;
 511	int xor_src_cnt = 0;
 512	void *dest = pages[src_cnt];
 513
 514	while(src_cnt > 0) {
 515		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 516		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 517
 518		src_cnt -= xor_src_cnt;
 519		src_off += xor_src_cnt;
 520	}
 521}
 522
 523/*
 524 * Returns true if the bio list inside this rbio covers an entire stripe (no
 525 * rmw required).
 
 
 
 526 */
 527static int rbio_is_full(struct btrfs_raid_bio *rbio)
 528{
 529	unsigned long flags;
 530	unsigned long size = rbio->bio_list_bytes;
 531	int ret = 1;
 532
 533	spin_lock_irqsave(&rbio->bio_list_lock, flags);
 534	if (size != rbio->nr_data * rbio->stripe_len)
 535		ret = 0;
 
 536	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
 537	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 
 538
 
 
 
 
 
 
 
 
 539	return ret;
 540}
 541
 542/*
 543 * returns 1 if it is safe to merge two rbios together.
 544 * The merging is safe if the two rbios correspond to
 545 * the same stripe and if they are both going in the same
 546 * direction (read vs write), and if neither one is
 547 * locked for final IO
 548 *
 549 * The caller is responsible for locking such that
 550 * rmw_locked is safe to test
 551 */
 552static int rbio_can_merge(struct btrfs_raid_bio *last,
 553			  struct btrfs_raid_bio *cur)
 554{
 555	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 556	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 557		return 0;
 558
 559	/*
 560	 * we can't merge with cached rbios, since the
 561	 * idea is that when we merge the destination
 562	 * rbio is going to run our IO for us.  We can
 563	 * steal from cached rbios though, other functions
 564	 * handle that.
 565	 */
 566	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 567	    test_bit(RBIO_CACHE_BIT, &cur->flags))
 568		return 0;
 569
 570	if (last->bbio->raid_map[0] !=
 571	    cur->bbio->raid_map[0])
 572		return 0;
 573
 574	/* we can't merge with different operations */
 575	if (last->operation != cur->operation)
 576		return 0;
 577	/*
 578	 * We've need read the full stripe from the drive.
 579	 * check and repair the parity and write the new results.
 580	 *
 581	 * We're not allowed to add any new bios to the
 582	 * bio list here, anyone else that wants to
 583	 * change this stripe needs to do their own rmw.
 584	 */
 585	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
 586		return 0;
 587
 588	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
 589		return 0;
 590
 591	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
 592		int fa = last->faila;
 593		int fb = last->failb;
 594		int cur_fa = cur->faila;
 595		int cur_fb = cur->failb;
 596
 597		if (last->faila >= last->failb) {
 598			fa = last->failb;
 599			fb = last->faila;
 600		}
 601
 602		if (cur->faila >= cur->failb) {
 603			cur_fa = cur->failb;
 604			cur_fb = cur->faila;
 605		}
 606
 607		if (fa != cur_fa || fb != cur_fb)
 608			return 0;
 609	}
 610	return 1;
 611}
 612
 613static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
 614				  int index)
 615{
 616	return stripe * rbio->stripe_npages + index;
 617}
 618
 619/*
 620 * these are just the pages from the rbio array, not from anything
 621 * the FS sent down to us
 622 */
 623static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
 624				     int index)
 625{
 626	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
 627}
 628
 629/*
 630 * helper to index into the pstripe
 631 */
 632static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
 633{
 634	return rbio_stripe_page(rbio, rbio->nr_data, index);
 635}
 636
 637/*
 638 * helper to index into the qstripe, returns null
 639 * if there is no qstripe
 640 */
 641static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
 642{
 643	if (rbio->nr_data + 1 == rbio->real_stripes)
 644		return NULL;
 645	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
 646}
 647
 648/*
 649 * The first stripe in the table for a logical address
 650 * has the lock.  rbios are added in one of three ways:
 651 *
 652 * 1) Nobody has the stripe locked yet.  The rbio is given
 653 * the lock and 0 is returned.  The caller must start the IO
 654 * themselves.
 655 *
 656 * 2) Someone has the stripe locked, but we're able to merge
 657 * with the lock owner.  The rbio is freed and the IO will
 658 * start automatically along with the existing rbio.  1 is returned.
 659 *
 660 * 3) Someone has the stripe locked, but we're not able to merge.
 661 * The rbio is added to the lock owner's plug list, or merged into
 662 * an rbio already on the plug list.  When the lock owner unlocks,
 663 * the next rbio on the list is run and the IO is started automatically.
 664 * 1 is returned
 665 *
 666 * If we return 0, the caller still owns the rbio and must continue with
 667 * IO submission.  If we return 1, the caller must assume the rbio has
 668 * already been freed.
 669 */
 670static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 671{
 672	struct btrfs_stripe_hash *h;
 
 673	struct btrfs_raid_bio *cur;
 674	struct btrfs_raid_bio *pending;
 675	unsigned long flags;
 676	struct btrfs_raid_bio *freeit = NULL;
 677	struct btrfs_raid_bio *cache_drop = NULL;
 678	int ret = 0;
 679
 680	h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
 681
 682	spin_lock_irqsave(&h->lock, flags);
 683	list_for_each_entry(cur, &h->hash_list, hash_list) {
 684		if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
 685			continue;
 686
 687		spin_lock(&cur->bio_list_lock);
 688
 689		/* Can we steal this cached rbio's pages? */
 690		if (bio_list_empty(&cur->bio_list) &&
 691		    list_empty(&cur->plug_list) &&
 692		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 693		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 694			list_del_init(&cur->hash_list);
 695			refcount_dec(&cur->refs);
 696
 697			steal_rbio(cur, rbio);
 698			cache_drop = cur;
 699			spin_unlock(&cur->bio_list_lock);
 700
 701			goto lockit;
 702		}
 703
 704		/* Can we merge into the lock owner? */
 705		if (rbio_can_merge(cur, rbio)) {
 706			merge_rbio(cur, rbio);
 707			spin_unlock(&cur->bio_list_lock);
 708			freeit = rbio;
 709			ret = 1;
 710			goto out;
 711		}
 712
 
 
 713
 714		/*
 715		 * We couldn't merge with the running rbio, see if we can merge
 716		 * with the pending ones.  We don't have to check for rmw_locked
 717		 * because there is no way they are inside finish_rmw right now
 718		 */
 719		list_for_each_entry(pending, &cur->plug_list, plug_list) {
 720			if (rbio_can_merge(pending, rbio)) {
 721				merge_rbio(pending, rbio);
 722				spin_unlock(&cur->bio_list_lock);
 723				freeit = rbio;
 724				ret = 1;
 725				goto out;
 726			}
 727		}
 728
 729		/*
 730		 * No merging, put us on the tail of the plug list, our rbio
 731		 * will be started with the currently running rbio unlocks
 732		 */
 733		list_add_tail(&rbio->plug_list, &cur->plug_list);
 734		spin_unlock(&cur->bio_list_lock);
 735		ret = 1;
 736		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737	}
 738lockit:
 739	refcount_inc(&rbio->refs);
 740	list_add(&rbio->hash_list, &h->hash_list);
 741out:
 742	spin_unlock_irqrestore(&h->lock, flags);
 743	if (cache_drop)
 744		remove_rbio_from_cache(cache_drop);
 745	if (freeit)
 746		__free_raid_bio(freeit);
 747	return ret;
 748}
 749
 750/*
 751 * called as rmw or parity rebuild is completed.  If the plug list has more
 752 * rbios waiting for this stripe, the next one on the list will be started
 753 */
 754static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 755{
 756	int bucket;
 757	struct btrfs_stripe_hash *h;
 758	unsigned long flags;
 759	int keep_cache = 0;
 760
 761	bucket = rbio_bucket(rbio);
 762	h = rbio->fs_info->stripe_hash_table->table + bucket;
 763
 764	if (list_empty(&rbio->plug_list))
 765		cache_rbio(rbio);
 766
 767	spin_lock_irqsave(&h->lock, flags);
 768	spin_lock(&rbio->bio_list_lock);
 769
 770	if (!list_empty(&rbio->hash_list)) {
 771		/*
 772		 * if we're still cached and there is no other IO
 773		 * to perform, just leave this rbio here for others
 774		 * to steal from later
 775		 */
 776		if (list_empty(&rbio->plug_list) &&
 777		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 778			keep_cache = 1;
 779			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 780			BUG_ON(!bio_list_empty(&rbio->bio_list));
 781			goto done;
 782		}
 783
 784		list_del_init(&rbio->hash_list);
 785		refcount_dec(&rbio->refs);
 786
 787		/*
 788		 * we use the plug list to hold all the rbios
 789		 * waiting for the chance to lock this stripe.
 790		 * hand the lock over to one of them.
 791		 */
 792		if (!list_empty(&rbio->plug_list)) {
 793			struct btrfs_raid_bio *next;
 794			struct list_head *head = rbio->plug_list.next;
 795
 796			next = list_entry(head, struct btrfs_raid_bio,
 797					  plug_list);
 798
 799			list_del_init(&rbio->plug_list);
 800
 801			list_add(&next->hash_list, &h->hash_list);
 802			refcount_inc(&next->refs);
 803			spin_unlock(&rbio->bio_list_lock);
 804			spin_unlock_irqrestore(&h->lock, flags);
 805
 806			if (next->operation == BTRFS_RBIO_READ_REBUILD)
 807				start_async_work(next, read_rebuild_work);
 808			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
 809				steal_rbio(rbio, next);
 810				start_async_work(next, read_rebuild_work);
 811			} else if (next->operation == BTRFS_RBIO_WRITE) {
 812				steal_rbio(rbio, next);
 813				start_async_work(next, rmw_work);
 814			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 815				steal_rbio(rbio, next);
 816				start_async_work(next, scrub_parity_work);
 817			}
 818
 819			goto done_nolock;
 820		}
 821	}
 822done:
 823	spin_unlock(&rbio->bio_list_lock);
 824	spin_unlock_irqrestore(&h->lock, flags);
 825
 826done_nolock:
 827	if (!keep_cache)
 828		remove_rbio_from_cache(rbio);
 829}
 830
 831static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 832{
 833	int i;
 834
 835	if (!refcount_dec_and_test(&rbio->refs))
 836		return;
 837
 838	WARN_ON(!list_empty(&rbio->stripe_cache));
 839	WARN_ON(!list_empty(&rbio->hash_list));
 840	WARN_ON(!bio_list_empty(&rbio->bio_list));
 841
 842	for (i = 0; i < rbio->nr_pages; i++) {
 843		if (rbio->stripe_pages[i]) {
 844			__free_page(rbio->stripe_pages[i]);
 845			rbio->stripe_pages[i] = NULL;
 846		}
 847	}
 848
 849	btrfs_put_bbio(rbio->bbio);
 850	kfree(rbio);
 851}
 852
 853static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
 854{
 855	struct bio *next;
 856
 857	while (cur) {
 858		next = cur->bi_next;
 859		cur->bi_next = NULL;
 860		cur->bi_status = err;
 861		bio_endio(cur);
 862		cur = next;
 863	}
 864}
 865
 866/*
 867 * this frees the rbio and runs through all the bios in the
 868 * bio_list and calls end_io on them
 869 */
 870static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 871{
 872	struct bio *cur = bio_list_get(&rbio->bio_list);
 873	struct bio *extra;
 874
 875	if (rbio->generic_bio_cnt)
 876		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
 877
 878	/*
 879	 * At this moment, rbio->bio_list is empty, however since rbio does not
 880	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
 881	 * hash list, rbio may be merged with others so that rbio->bio_list
 882	 * becomes non-empty.
 883	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
 884	 * more and we can call bio_endio() on all queued bios.
 885	 */
 886	unlock_stripe(rbio);
 887	extra = bio_list_get(&rbio->bio_list);
 888	__free_raid_bio(rbio);
 889
 890	rbio_endio_bio_list(cur, err);
 891	if (extra)
 892		rbio_endio_bio_list(extra, err);
 893}
 894
 895/*
 896 * end io function used by finish_rmw.  When we finally
 897 * get here, we've written a full stripe
 898 */
 899static void raid_write_end_io(struct bio *bio)
 900{
 901	struct btrfs_raid_bio *rbio = bio->bi_private;
 902	blk_status_t err = bio->bi_status;
 903	int max_errors;
 904
 905	if (err)
 906		fail_bio_stripe(rbio, bio);
 907
 908	bio_put(bio);
 909
 910	if (!atomic_dec_and_test(&rbio->stripes_pending))
 911		return;
 912
 913	err = BLK_STS_OK;
 914
 915	/* OK, we have read all the stripes we need to. */
 916	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
 917		     0 : rbio->bbio->max_errors;
 918	if (atomic_read(&rbio->error) > max_errors)
 919		err = BLK_STS_IOERR;
 920
 921	rbio_orig_end_io(rbio, err);
 922}
 923
 924/*
 925 * the read/modify/write code wants to use the original bio for
 926 * any pages it included, and then use the rbio for everything
 927 * else.  This function decides if a given index (stripe number)
 928 * and page number in that stripe fall inside the original bio
 929 * or the rbio.
 930 *
 931 * if you set bio_list_only, you'll get a NULL back for any ranges
 932 * that are outside the bio_list
 933 *
 934 * This doesn't take any refs on anything, you get a bare page pointer
 935 * and the caller must bump refs as required.
 936 *
 937 * You must call index_rbio_pages once before you can trust
 938 * the answers from this function.
 939 */
 940static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
 941				 int index, int pagenr, int bio_list_only)
 942{
 943	int chunk_page;
 944	struct page *p = NULL;
 945
 946	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
 947
 948	spin_lock_irq(&rbio->bio_list_lock);
 949	p = rbio->bio_pages[chunk_page];
 950	spin_unlock_irq(&rbio->bio_list_lock);
 951
 952	if (p || bio_list_only)
 953		return p;
 954
 955	return rbio->stripe_pages[chunk_page];
 956}
 957
 958/*
 959 * number of pages we need for the entire stripe across all the
 960 * drives
 961 */
 962static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 963{
 964	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 965}
 966
 967/*
 968 * allocation and initial setup for the btrfs_raid_bio.  Not
 969 * this does not allocate any pages for rbio->pages.
 970 */
 971static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
 972					 struct btrfs_bio *bbio,
 973					 u64 stripe_len)
 974{
 975	struct btrfs_raid_bio *rbio;
 976	int nr_data = 0;
 977	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
 978	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
 979	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
 980	void *p;
 981
 982	rbio = kzalloc(sizeof(*rbio) +
 983		       sizeof(*rbio->stripe_pages) * num_pages +
 984		       sizeof(*rbio->bio_pages) * num_pages +
 985		       sizeof(*rbio->finish_pointers) * real_stripes +
 986		       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
 987		       sizeof(*rbio->finish_pbitmap) *
 988				BITS_TO_LONGS(stripe_npages),
 989		       GFP_NOFS);
 990	if (!rbio)
 991		return ERR_PTR(-ENOMEM);
 992
 993	bio_list_init(&rbio->bio_list);
 994	INIT_LIST_HEAD(&rbio->plug_list);
 995	spin_lock_init(&rbio->bio_list_lock);
 996	INIT_LIST_HEAD(&rbio->stripe_cache);
 997	INIT_LIST_HEAD(&rbio->hash_list);
 998	rbio->bbio = bbio;
 999	rbio->fs_info = fs_info;
1000	rbio->stripe_len = stripe_len;
1001	rbio->nr_pages = num_pages;
1002	rbio->real_stripes = real_stripes;
1003	rbio->stripe_npages = stripe_npages;
1004	rbio->faila = -1;
1005	rbio->failb = -1;
1006	refcount_set(&rbio->refs, 1);
1007	atomic_set(&rbio->error, 0);
1008	atomic_set(&rbio->stripes_pending, 0);
1009
1010	/*
1011	 * the stripe_pages, bio_pages, etc arrays point to the extra
1012	 * memory we allocated past the end of the rbio
1013	 */
1014	p = rbio + 1;
1015#define CONSUME_ALLOC(ptr, count)	do {				\
1016		ptr = p;						\
1017		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
1018	} while (0)
1019	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1020	CONSUME_ALLOC(rbio->bio_pages, num_pages);
1021	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1022	CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1023	CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1024#undef  CONSUME_ALLOC
1025
1026	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1027		nr_data = real_stripes - 1;
1028	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1029		nr_data = real_stripes - 2;
1030	else
1031		BUG();
1032
1033	rbio->nr_data = nr_data;
1034	return rbio;
1035}
1036
1037/* allocate pages for all the stripes in the bio, including parity */
1038static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1039{
1040	int i;
1041	struct page *page;
1042
1043	for (i = 0; i < rbio->nr_pages; i++) {
1044		if (rbio->stripe_pages[i])
1045			continue;
1046		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1047		if (!page)
1048			return -ENOMEM;
1049		rbio->stripe_pages[i] = page;
1050	}
1051	return 0;
1052}
1053
1054/* only allocate pages for p/q stripes */
1055static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1056{
1057	int i;
1058	struct page *page;
1059
1060	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1061
1062	for (; i < rbio->nr_pages; i++) {
1063		if (rbio->stripe_pages[i])
1064			continue;
1065		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1066		if (!page)
1067			return -ENOMEM;
1068		rbio->stripe_pages[i] = page;
1069	}
1070	return 0;
1071}
1072
1073/*
1074 * add a single page from a specific stripe into our list of bios for IO
1075 * this will try to merge into existing bios if possible, and returns
1076 * zero if all went well.
1077 */
1078static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1079			    struct bio_list *bio_list,
1080			    struct page *page,
1081			    int stripe_nr,
1082			    unsigned long page_index,
1083			    unsigned long bio_max_len)
1084{
1085	struct bio *last = bio_list->tail;
 
1086	int ret;
1087	struct bio *bio;
1088	struct btrfs_bio_stripe *stripe;
1089	u64 disk_start;
1090
1091	stripe = &rbio->bbio->stripes[stripe_nr];
1092	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1093
1094	/* if the device is missing, just fail this stripe */
1095	if (!stripe->dev->bdev)
1096		return fail_rbio_index(rbio, stripe_nr);
1097
1098	/* see if we can add this page onto our existing bio */
1099	if (last) {
1100		u64 last_end = (u64)last->bi_iter.bi_sector << 9;
1101		last_end += last->bi_iter.bi_size;
1102
1103		/*
1104		 * we can't merge these if they are from different
1105		 * devices or if they are not contiguous
1106		 */
1107		if (last_end == disk_start && !last->bi_status &&
 
1108		    last->bi_disk == stripe->dev->bdev->bd_disk &&
1109		    last->bi_partno == stripe->dev->bdev->bd_partno) {
1110			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1111			if (ret == PAGE_SIZE)
1112				return 0;
1113		}
1114	}
1115
1116	/* put a new bio on the list */
1117	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1118	btrfs_io_bio(bio)->device = stripe->dev;
1119	bio->bi_iter.bi_size = 0;
1120	bio_set_dev(bio, stripe->dev->bdev);
1121	bio->bi_iter.bi_sector = disk_start >> 9;
1122
1123	bio_add_page(bio, page, PAGE_SIZE, 0);
1124	bio_list_add(bio_list, bio);
1125	return 0;
1126}
1127
1128/*
1129 * while we're doing the read/modify/write cycle, we could
1130 * have errors in reading pages off the disk.  This checks
1131 * for errors and if we're not able to read the page it'll
1132 * trigger parity reconstruction.  The rmw will be finished
1133 * after we've reconstructed the failed stripes
1134 */
1135static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1136{
1137	if (rbio->faila >= 0 || rbio->failb >= 0) {
1138		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1139		__raid56_parity_recover(rbio);
1140	} else {
1141		finish_rmw(rbio);
1142	}
1143}
1144
1145/*
1146 * helper function to walk our bio list and populate the bio_pages array with
1147 * the result.  This seems expensive, but it is faster than constantly
1148 * searching through the bio list as we setup the IO in finish_rmw or stripe
1149 * reconstruction.
1150 *
1151 * This must be called before you trust the answers from page_in_rbio
1152 */
1153static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1154{
1155	struct bio *bio;
1156	u64 start;
1157	unsigned long stripe_offset;
1158	unsigned long page_index;
1159
1160	spin_lock_irq(&rbio->bio_list_lock);
1161	bio_list_for_each(bio, &rbio->bio_list) {
1162		struct bio_vec bvec;
1163		struct bvec_iter iter;
1164		int i = 0;
1165
1166		start = (u64)bio->bi_iter.bi_sector << 9;
1167		stripe_offset = start - rbio->bbio->raid_map[0];
1168		page_index = stripe_offset >> PAGE_SHIFT;
1169
1170		if (bio_flagged(bio, BIO_CLONED))
1171			bio->bi_iter = btrfs_io_bio(bio)->iter;
1172
1173		bio_for_each_segment(bvec, bio, iter) {
1174			rbio->bio_pages[page_index + i] = bvec.bv_page;
1175			i++;
1176		}
1177	}
1178	spin_unlock_irq(&rbio->bio_list_lock);
1179}
1180
1181/*
1182 * this is called from one of two situations.  We either
1183 * have a full stripe from the higher layers, or we've read all
1184 * the missing bits off disk.
1185 *
1186 * This will calculate the parity and then send down any
1187 * changed blocks.
1188 */
1189static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1190{
1191	struct btrfs_bio *bbio = rbio->bbio;
1192	void **pointers = rbio->finish_pointers;
1193	int nr_data = rbio->nr_data;
1194	int stripe;
1195	int pagenr;
1196	bool has_qstripe;
 
1197	struct bio_list bio_list;
1198	struct bio *bio;
1199	int ret;
1200
1201	bio_list_init(&bio_list);
1202
1203	if (rbio->real_stripes - rbio->nr_data == 1)
1204		has_qstripe = false;
1205	else if (rbio->real_stripes - rbio->nr_data == 2)
1206		has_qstripe = true;
1207	else
 
1208		BUG();
 
1209
1210	/* at this point we either have a full stripe,
1211	 * or we've read the full stripe from the drive.
1212	 * recalculate the parity and write the new results.
1213	 *
1214	 * We're not allowed to add any new bios to the
1215	 * bio list here, anyone else that wants to
1216	 * change this stripe needs to do their own rmw.
1217	 */
1218	spin_lock_irq(&rbio->bio_list_lock);
1219	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1220	spin_unlock_irq(&rbio->bio_list_lock);
1221
1222	atomic_set(&rbio->error, 0);
1223
1224	/*
1225	 * now that we've set rmw_locked, run through the
1226	 * bio list one last time and map the page pointers
1227	 *
1228	 * We don't cache full rbios because we're assuming
1229	 * the higher layers are unlikely to use this area of
1230	 * the disk again soon.  If they do use it again,
1231	 * hopefully they will send another full bio.
1232	 */
1233	index_rbio_pages(rbio);
1234	if (!rbio_is_full(rbio))
1235		cache_rbio_pages(rbio);
1236	else
1237		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1238
1239	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1240		struct page *p;
1241		/* first collect one page from each data stripe */
1242		for (stripe = 0; stripe < nr_data; stripe++) {
1243			p = page_in_rbio(rbio, stripe, pagenr, 0);
1244			pointers[stripe] = kmap(p);
1245		}
1246
1247		/* then add the parity stripe */
1248		p = rbio_pstripe_page(rbio, pagenr);
1249		SetPageUptodate(p);
1250		pointers[stripe++] = kmap(p);
1251
1252		if (has_qstripe) {
1253
1254			/*
1255			 * raid6, add the qstripe and call the
1256			 * library function to fill in our p/q
1257			 */
1258			p = rbio_qstripe_page(rbio, pagenr);
1259			SetPageUptodate(p);
1260			pointers[stripe++] = kmap(p);
1261
1262			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1263						pointers);
1264		} else {
1265			/* raid5 */
1266			copy_page(pointers[nr_data], pointers[0]);
1267			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1268		}
1269
1270
1271		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1272			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1273	}
1274
1275	/*
1276	 * time to start writing.  Make bios for everything from the
1277	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1278	 * everything else.
1279	 */
1280	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1281		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1282			struct page *page;
1283			if (stripe < rbio->nr_data) {
1284				page = page_in_rbio(rbio, stripe, pagenr, 1);
1285				if (!page)
1286					continue;
1287			} else {
1288			       page = rbio_stripe_page(rbio, stripe, pagenr);
1289			}
1290
1291			ret = rbio_add_io_page(rbio, &bio_list,
1292				       page, stripe, pagenr, rbio->stripe_len);
1293			if (ret)
1294				goto cleanup;
1295		}
1296	}
1297
1298	if (likely(!bbio->num_tgtdevs))
1299		goto write_data;
1300
1301	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1302		if (!bbio->tgtdev_map[stripe])
1303			continue;
1304
1305		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1306			struct page *page;
1307			if (stripe < rbio->nr_data) {
1308				page = page_in_rbio(rbio, stripe, pagenr, 1);
1309				if (!page)
1310					continue;
1311			} else {
1312			       page = rbio_stripe_page(rbio, stripe, pagenr);
1313			}
1314
1315			ret = rbio_add_io_page(rbio, &bio_list, page,
1316					       rbio->bbio->tgtdev_map[stripe],
1317					       pagenr, rbio->stripe_len);
1318			if (ret)
1319				goto cleanup;
1320		}
1321	}
1322
1323write_data:
1324	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1325	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1326
1327	while ((bio = bio_list_pop(&bio_list))) {
 
 
 
 
1328		bio->bi_private = rbio;
1329		bio->bi_end_io = raid_write_end_io;
1330		bio->bi_opf = REQ_OP_WRITE;
1331
1332		submit_bio(bio);
1333	}
1334	return;
1335
1336cleanup:
1337	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1338
1339	while ((bio = bio_list_pop(&bio_list)))
1340		bio_put(bio);
1341}
1342
1343/*
1344 * helper to find the stripe number for a given bio.  Used to figure out which
1345 * stripe has failed.  This expects the bio to correspond to a physical disk,
1346 * so it looks up based on physical sector numbers.
1347 */
1348static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1349			   struct bio *bio)
1350{
1351	u64 physical = bio->bi_iter.bi_sector;
 
1352	int i;
1353	struct btrfs_bio_stripe *stripe;
1354
1355	physical <<= 9;
1356
1357	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1358		stripe = &rbio->bbio->stripes[i];
1359		if (in_range(physical, stripe->physical, rbio->stripe_len) &&
 
 
1360		    stripe->dev->bdev &&
1361		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1362		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1363			return i;
1364		}
1365	}
1366	return -1;
1367}
1368
1369/*
1370 * helper to find the stripe number for a given
1371 * bio (before mapping).  Used to figure out which stripe has
1372 * failed.  This looks up based on logical block numbers.
1373 */
1374static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1375				   struct bio *bio)
1376{
1377	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
 
1378	int i;
1379
1380	for (i = 0; i < rbio->nr_data; i++) {
1381		u64 stripe_start = rbio->bbio->raid_map[i];
1382
1383		if (in_range(logical, stripe_start, rbio->stripe_len))
 
 
 
1384			return i;
 
1385	}
1386	return -1;
1387}
1388
1389/*
1390 * returns -EIO if we had too many failures
1391 */
1392static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1393{
1394	unsigned long flags;
1395	int ret = 0;
1396
1397	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1398
1399	/* we already know this stripe is bad, move on */
1400	if (rbio->faila == failed || rbio->failb == failed)
1401		goto out;
1402
1403	if (rbio->faila == -1) {
1404		/* first failure on this rbio */
1405		rbio->faila = failed;
1406		atomic_inc(&rbio->error);
1407	} else if (rbio->failb == -1) {
1408		/* second failure on this rbio */
1409		rbio->failb = failed;
1410		atomic_inc(&rbio->error);
1411	} else {
1412		ret = -EIO;
1413	}
1414out:
1415	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1416
1417	return ret;
1418}
1419
1420/*
1421 * helper to fail a stripe based on a physical disk
1422 * bio.
1423 */
1424static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1425			   struct bio *bio)
1426{
1427	int failed = find_bio_stripe(rbio, bio);
1428
1429	if (failed < 0)
1430		return -EIO;
1431
1432	return fail_rbio_index(rbio, failed);
1433}
1434
1435/*
1436 * this sets each page in the bio uptodate.  It should only be used on private
1437 * rbio pages, nothing that comes in from the higher layers
1438 */
1439static void set_bio_pages_uptodate(struct bio *bio)
1440{
1441	struct bio_vec *bvec;
1442	struct bvec_iter_all iter_all;
1443
1444	ASSERT(!bio_flagged(bio, BIO_CLONED));
1445
1446	bio_for_each_segment_all(bvec, bio, iter_all)
1447		SetPageUptodate(bvec->bv_page);
1448}
1449
1450/*
1451 * end io for the read phase of the rmw cycle.  All the bios here are physical
1452 * stripe bios we've read from the disk so we can recalculate the parity of the
1453 * stripe.
1454 *
1455 * This will usually kick off finish_rmw once all the bios are read in, but it
1456 * may trigger parity reconstruction if we had any errors along the way
1457 */
1458static void raid_rmw_end_io(struct bio *bio)
1459{
1460	struct btrfs_raid_bio *rbio = bio->bi_private;
1461
1462	if (bio->bi_status)
1463		fail_bio_stripe(rbio, bio);
1464	else
1465		set_bio_pages_uptodate(bio);
1466
1467	bio_put(bio);
1468
1469	if (!atomic_dec_and_test(&rbio->stripes_pending))
1470		return;
1471
1472	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1473		goto cleanup;
1474
1475	/*
1476	 * this will normally call finish_rmw to start our write
1477	 * but if there are any failed stripes we'll reconstruct
1478	 * from parity first
1479	 */
1480	validate_rbio_for_rmw(rbio);
1481	return;
1482
1483cleanup:
1484
1485	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1486}
1487
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1488/*
1489 * the stripe must be locked by the caller.  It will
1490 * unlock after all the writes are done
1491 */
1492static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1493{
1494	int bios_to_read = 0;
1495	struct bio_list bio_list;
1496	int ret;
1497	int pagenr;
1498	int stripe;
1499	struct bio *bio;
1500
1501	bio_list_init(&bio_list);
1502
1503	ret = alloc_rbio_pages(rbio);
1504	if (ret)
1505		goto cleanup;
1506
1507	index_rbio_pages(rbio);
1508
1509	atomic_set(&rbio->error, 0);
1510	/*
1511	 * build a list of bios to read all the missing parts of this
1512	 * stripe
1513	 */
1514	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1515		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1516			struct page *page;
1517			/*
1518			 * we want to find all the pages missing from
1519			 * the rbio and read them from the disk.  If
1520			 * page_in_rbio finds a page in the bio list
1521			 * we don't need to read it off the stripe.
1522			 */
1523			page = page_in_rbio(rbio, stripe, pagenr, 1);
1524			if (page)
1525				continue;
1526
1527			page = rbio_stripe_page(rbio, stripe, pagenr);
1528			/*
1529			 * the bio cache may have handed us an uptodate
1530			 * page.  If so, be happy and use it
1531			 */
1532			if (PageUptodate(page))
1533				continue;
1534
1535			ret = rbio_add_io_page(rbio, &bio_list, page,
1536				       stripe, pagenr, rbio->stripe_len);
1537			if (ret)
1538				goto cleanup;
1539		}
1540	}
1541
1542	bios_to_read = bio_list_size(&bio_list);
1543	if (!bios_to_read) {
1544		/*
1545		 * this can happen if others have merged with
1546		 * us, it means there is nothing left to read.
1547		 * But if there are missing devices it may not be
1548		 * safe to do the full stripe write yet.
1549		 */
1550		goto finish;
1551	}
1552
1553	/*
1554	 * the bbio may be freed once we submit the last bio.  Make sure
1555	 * not to touch it after that
1556	 */
1557	atomic_set(&rbio->stripes_pending, bios_to_read);
1558	while ((bio = bio_list_pop(&bio_list))) {
 
 
 
 
1559		bio->bi_private = rbio;
1560		bio->bi_end_io = raid_rmw_end_io;
1561		bio->bi_opf = REQ_OP_READ;
1562
1563		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1564
1565		submit_bio(bio);
1566	}
1567	/* the actual write will happen once the reads are done */
1568	return 0;
1569
1570cleanup:
1571	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1572
1573	while ((bio = bio_list_pop(&bio_list)))
1574		bio_put(bio);
1575
1576	return -EIO;
1577
1578finish:
1579	validate_rbio_for_rmw(rbio);
1580	return 0;
1581}
1582
1583/*
1584 * if the upper layers pass in a full stripe, we thank them by only allocating
1585 * enough pages to hold the parity, and sending it all down quickly.
1586 */
1587static int full_stripe_write(struct btrfs_raid_bio *rbio)
1588{
1589	int ret;
1590
1591	ret = alloc_rbio_parity_pages(rbio);
1592	if (ret) {
1593		__free_raid_bio(rbio);
1594		return ret;
1595	}
1596
1597	ret = lock_stripe_add(rbio);
1598	if (ret == 0)
1599		finish_rmw(rbio);
1600	return 0;
1601}
1602
1603/*
1604 * partial stripe writes get handed over to async helpers.
1605 * We're really hoping to merge a few more writes into this
1606 * rbio before calculating new parity
1607 */
1608static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1609{
1610	int ret;
1611
1612	ret = lock_stripe_add(rbio);
1613	if (ret == 0)
1614		start_async_work(rbio, rmw_work);
1615	return 0;
1616}
1617
1618/*
1619 * sometimes while we were reading from the drive to
1620 * recalculate parity, enough new bios come into create
1621 * a full stripe.  So we do a check here to see if we can
1622 * go directly to finish_rmw
1623 */
1624static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1625{
1626	/* head off into rmw land if we don't have a full stripe */
1627	if (!rbio_is_full(rbio))
1628		return partial_stripe_write(rbio);
1629	return full_stripe_write(rbio);
1630}
1631
1632/*
1633 * We use plugging call backs to collect full stripes.
1634 * Any time we get a partial stripe write while plugged
1635 * we collect it into a list.  When the unplug comes down,
1636 * we sort the list by logical block number and merge
1637 * everything we can into the same rbios
1638 */
1639struct btrfs_plug_cb {
1640	struct blk_plug_cb cb;
1641	struct btrfs_fs_info *info;
1642	struct list_head rbio_list;
1643	struct btrfs_work work;
1644};
1645
1646/*
1647 * rbios on the plug list are sorted for easier merging.
1648 */
1649static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1650{
1651	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1652						 plug_list);
1653	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1654						 plug_list);
1655	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1656	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1657
1658	if (a_sector < b_sector)
1659		return -1;
1660	if (a_sector > b_sector)
1661		return 1;
1662	return 0;
1663}
1664
1665static void run_plug(struct btrfs_plug_cb *plug)
1666{
1667	struct btrfs_raid_bio *cur;
1668	struct btrfs_raid_bio *last = NULL;
1669
1670	/*
1671	 * sort our plug list then try to merge
1672	 * everything we can in hopes of creating full
1673	 * stripes.
1674	 */
1675	list_sort(NULL, &plug->rbio_list, plug_cmp);
1676	while (!list_empty(&plug->rbio_list)) {
1677		cur = list_entry(plug->rbio_list.next,
1678				 struct btrfs_raid_bio, plug_list);
1679		list_del_init(&cur->plug_list);
1680
1681		if (rbio_is_full(cur)) {
1682			int ret;
1683
1684			/* we have a full stripe, send it down */
1685			ret = full_stripe_write(cur);
1686			BUG_ON(ret);
1687			continue;
1688		}
1689		if (last) {
1690			if (rbio_can_merge(last, cur)) {
1691				merge_rbio(last, cur);
1692				__free_raid_bio(cur);
1693				continue;
1694
1695			}
1696			__raid56_parity_write(last);
1697		}
1698		last = cur;
1699	}
1700	if (last) {
1701		__raid56_parity_write(last);
1702	}
1703	kfree(plug);
1704}
1705
1706/*
1707 * if the unplug comes from schedule, we have to push the
1708 * work off to a helper thread
1709 */
1710static void unplug_work(struct btrfs_work *work)
1711{
1712	struct btrfs_plug_cb *plug;
1713	plug = container_of(work, struct btrfs_plug_cb, work);
1714	run_plug(plug);
1715}
1716
1717static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1718{
1719	struct btrfs_plug_cb *plug;
1720	plug = container_of(cb, struct btrfs_plug_cb, cb);
1721
1722	if (from_schedule) {
1723		btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
 
1724		btrfs_queue_work(plug->info->rmw_workers,
1725				 &plug->work);
1726		return;
1727	}
1728	run_plug(plug);
1729}
1730
1731/*
1732 * our main entry point for writes from the rest of the FS.
1733 */
1734int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1735			struct btrfs_bio *bbio, u64 stripe_len)
1736{
1737	struct btrfs_raid_bio *rbio;
1738	struct btrfs_plug_cb *plug = NULL;
1739	struct blk_plug_cb *cb;
1740	int ret;
1741
1742	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1743	if (IS_ERR(rbio)) {
1744		btrfs_put_bbio(bbio);
1745		return PTR_ERR(rbio);
1746	}
1747	bio_list_add(&rbio->bio_list, bio);
1748	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1749	rbio->operation = BTRFS_RBIO_WRITE;
1750
1751	btrfs_bio_counter_inc_noblocked(fs_info);
1752	rbio->generic_bio_cnt = 1;
1753
1754	/*
1755	 * don't plug on full rbios, just get them out the door
1756	 * as quickly as we can
1757	 */
1758	if (rbio_is_full(rbio)) {
1759		ret = full_stripe_write(rbio);
1760		if (ret)
1761			btrfs_bio_counter_dec(fs_info);
1762		return ret;
1763	}
1764
1765	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1766	if (cb) {
1767		plug = container_of(cb, struct btrfs_plug_cb, cb);
1768		if (!plug->info) {
1769			plug->info = fs_info;
1770			INIT_LIST_HEAD(&plug->rbio_list);
1771		}
1772		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1773		ret = 0;
1774	} else {
1775		ret = __raid56_parity_write(rbio);
1776		if (ret)
1777			btrfs_bio_counter_dec(fs_info);
1778	}
1779	return ret;
1780}
1781
1782/*
1783 * all parity reconstruction happens here.  We've read in everything
1784 * we can find from the drives and this does the heavy lifting of
1785 * sorting the good from the bad.
1786 */
1787static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1788{
1789	int pagenr, stripe;
1790	void **pointers;
1791	int faila = -1, failb = -1;
1792	struct page *page;
1793	blk_status_t err;
1794	int i;
1795
1796	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1797	if (!pointers) {
1798		err = BLK_STS_RESOURCE;
1799		goto cleanup_io;
1800	}
1801
1802	faila = rbio->faila;
1803	failb = rbio->failb;
1804
1805	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1806	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1807		spin_lock_irq(&rbio->bio_list_lock);
1808		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1809		spin_unlock_irq(&rbio->bio_list_lock);
1810	}
1811
1812	index_rbio_pages(rbio);
1813
1814	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1815		/*
1816		 * Now we just use bitmap to mark the horizontal stripes in
1817		 * which we have data when doing parity scrub.
1818		 */
1819		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1820		    !test_bit(pagenr, rbio->dbitmap))
1821			continue;
1822
1823		/* setup our array of pointers with pages
1824		 * from each stripe
1825		 */
1826		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1827			/*
1828			 * if we're rebuilding a read, we have to use
1829			 * pages from the bio list
1830			 */
1831			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1832			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1833			    (stripe == faila || stripe == failb)) {
1834				page = page_in_rbio(rbio, stripe, pagenr, 0);
1835			} else {
1836				page = rbio_stripe_page(rbio, stripe, pagenr);
1837			}
1838			pointers[stripe] = kmap(page);
1839		}
1840
1841		/* all raid6 handling here */
1842		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1843			/*
1844			 * single failure, rebuild from parity raid5
1845			 * style
1846			 */
1847			if (failb < 0) {
1848				if (faila == rbio->nr_data) {
1849					/*
1850					 * Just the P stripe has failed, without
1851					 * a bad data or Q stripe.
1852					 * TODO, we should redo the xor here.
1853					 */
1854					err = BLK_STS_IOERR;
1855					goto cleanup;
1856				}
1857				/*
1858				 * a single failure in raid6 is rebuilt
1859				 * in the pstripe code below
1860				 */
1861				goto pstripe;
1862			}
1863
1864			/* make sure our ps and qs are in order */
1865			if (faila > failb)
1866				swap(faila, failb);
 
 
 
1867
1868			/* if the q stripe is failed, do a pstripe reconstruction
1869			 * from the xors.
1870			 * If both the q stripe and the P stripe are failed, we're
1871			 * here due to a crc mismatch and we can't give them the
1872			 * data they want
1873			 */
1874			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1875				if (rbio->bbio->raid_map[faila] ==
1876				    RAID5_P_STRIPE) {
1877					err = BLK_STS_IOERR;
1878					goto cleanup;
1879				}
1880				/*
1881				 * otherwise we have one bad data stripe and
1882				 * a good P stripe.  raid5!
1883				 */
1884				goto pstripe;
1885			}
1886
1887			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1888				raid6_datap_recov(rbio->real_stripes,
1889						  PAGE_SIZE, faila, pointers);
1890			} else {
1891				raid6_2data_recov(rbio->real_stripes,
1892						  PAGE_SIZE, faila, failb,
1893						  pointers);
1894			}
1895		} else {
1896			void *p;
1897
1898			/* rebuild from P stripe here (raid5 or raid6) */
1899			BUG_ON(failb != -1);
1900pstripe:
1901			/* Copy parity block into failed block to start with */
1902			copy_page(pointers[faila], pointers[rbio->nr_data]);
 
 
1903
1904			/* rearrange the pointer array */
1905			p = pointers[faila];
1906			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1907				pointers[stripe] = pointers[stripe + 1];
1908			pointers[rbio->nr_data - 1] = p;
1909
1910			/* xor in the rest */
1911			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1912		}
1913		/* if we're doing this rebuild as part of an rmw, go through
1914		 * and set all of our private rbio pages in the
1915		 * failed stripes as uptodate.  This way finish_rmw will
1916		 * know they can be trusted.  If this was a read reconstruction,
1917		 * other endio functions will fiddle the uptodate bits
1918		 */
1919		if (rbio->operation == BTRFS_RBIO_WRITE) {
1920			for (i = 0;  i < rbio->stripe_npages; i++) {
1921				if (faila != -1) {
1922					page = rbio_stripe_page(rbio, faila, i);
1923					SetPageUptodate(page);
1924				}
1925				if (failb != -1) {
1926					page = rbio_stripe_page(rbio, failb, i);
1927					SetPageUptodate(page);
1928				}
1929			}
1930		}
1931		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1932			/*
1933			 * if we're rebuilding a read, we have to use
1934			 * pages from the bio list
1935			 */
1936			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1937			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1938			    (stripe == faila || stripe == failb)) {
1939				page = page_in_rbio(rbio, stripe, pagenr, 0);
1940			} else {
1941				page = rbio_stripe_page(rbio, stripe, pagenr);
1942			}
1943			kunmap(page);
1944		}
1945	}
1946
1947	err = BLK_STS_OK;
1948cleanup:
1949	kfree(pointers);
1950
1951cleanup_io:
1952	/*
1953	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1954	 * valid rbio which is consistent with ondisk content, thus such a
1955	 * valid rbio can be cached to avoid further disk reads.
1956	 */
1957	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1958	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1959		/*
1960		 * - In case of two failures, where rbio->failb != -1:
1961		 *
1962		 *   Do not cache this rbio since the above read reconstruction
1963		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
1964		 *   changed some content of stripes which are not identical to
1965		 *   on-disk content any more, otherwise, a later write/recover
1966		 *   may steal stripe_pages from this rbio and end up with
1967		 *   corruptions or rebuild failures.
1968		 *
1969		 * - In case of single failure, where rbio->failb == -1:
1970		 *
1971		 *   Cache this rbio iff the above read reconstruction is
1972		 *   executed without problems.
1973		 */
1974		if (err == BLK_STS_OK && rbio->failb < 0)
1975			cache_rbio_pages(rbio);
1976		else
1977			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1978
1979		rbio_orig_end_io(rbio, err);
1980	} else if (err == BLK_STS_OK) {
1981		rbio->faila = -1;
1982		rbio->failb = -1;
1983
1984		if (rbio->operation == BTRFS_RBIO_WRITE)
1985			finish_rmw(rbio);
1986		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1987			finish_parity_scrub(rbio, 0);
1988		else
1989			BUG();
1990	} else {
1991		rbio_orig_end_io(rbio, err);
1992	}
1993}
1994
1995/*
1996 * This is called only for stripes we've read from disk to
1997 * reconstruct the parity.
1998 */
1999static void raid_recover_end_io(struct bio *bio)
2000{
2001	struct btrfs_raid_bio *rbio = bio->bi_private;
2002
2003	/*
2004	 * we only read stripe pages off the disk, set them
2005	 * up to date if there were no errors
2006	 */
2007	if (bio->bi_status)
2008		fail_bio_stripe(rbio, bio);
2009	else
2010		set_bio_pages_uptodate(bio);
2011	bio_put(bio);
2012
2013	if (!atomic_dec_and_test(&rbio->stripes_pending))
2014		return;
2015
2016	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2017		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2018	else
2019		__raid_recover_end_io(rbio);
2020}
2021
2022/*
2023 * reads everything we need off the disk to reconstruct
2024 * the parity. endio handlers trigger final reconstruction
2025 * when the IO is done.
2026 *
2027 * This is used both for reads from the higher layers and for
2028 * parity construction required to finish a rmw cycle.
2029 */
2030static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2031{
2032	int bios_to_read = 0;
2033	struct bio_list bio_list;
2034	int ret;
2035	int pagenr;
2036	int stripe;
2037	struct bio *bio;
2038
2039	bio_list_init(&bio_list);
2040
2041	ret = alloc_rbio_pages(rbio);
2042	if (ret)
2043		goto cleanup;
2044
2045	atomic_set(&rbio->error, 0);
2046
2047	/*
2048	 * read everything that hasn't failed.  Thanks to the
2049	 * stripe cache, it is possible that some or all of these
2050	 * pages are going to be uptodate.
2051	 */
2052	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2053		if (rbio->faila == stripe || rbio->failb == stripe) {
2054			atomic_inc(&rbio->error);
2055			continue;
2056		}
2057
2058		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2059			struct page *p;
2060
2061			/*
2062			 * the rmw code may have already read this
2063			 * page in
2064			 */
2065			p = rbio_stripe_page(rbio, stripe, pagenr);
2066			if (PageUptodate(p))
2067				continue;
2068
2069			ret = rbio_add_io_page(rbio, &bio_list,
2070				       rbio_stripe_page(rbio, stripe, pagenr),
2071				       stripe, pagenr, rbio->stripe_len);
2072			if (ret < 0)
2073				goto cleanup;
2074		}
2075	}
2076
2077	bios_to_read = bio_list_size(&bio_list);
2078	if (!bios_to_read) {
2079		/*
2080		 * we might have no bios to read just because the pages
2081		 * were up to date, or we might have no bios to read because
2082		 * the devices were gone.
2083		 */
2084		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2085			__raid_recover_end_io(rbio);
2086			return 0;
2087		} else {
2088			goto cleanup;
2089		}
2090	}
2091
2092	/*
2093	 * the bbio may be freed once we submit the last bio.  Make sure
2094	 * not to touch it after that
2095	 */
2096	atomic_set(&rbio->stripes_pending, bios_to_read);
2097	while ((bio = bio_list_pop(&bio_list))) {
 
 
 
 
2098		bio->bi_private = rbio;
2099		bio->bi_end_io = raid_recover_end_io;
2100		bio->bi_opf = REQ_OP_READ;
2101
2102		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2103
2104		submit_bio(bio);
2105	}
2106
2107	return 0;
2108
2109cleanup:
2110	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2111	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2112		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2113
2114	while ((bio = bio_list_pop(&bio_list)))
2115		bio_put(bio);
2116
2117	return -EIO;
2118}
2119
2120/*
2121 * the main entry point for reads from the higher layers.  This
2122 * is really only called when the normal read path had a failure,
2123 * so we assume the bio they send down corresponds to a failed part
2124 * of the drive.
2125 */
2126int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2127			  struct btrfs_bio *bbio, u64 stripe_len,
2128			  int mirror_num, int generic_io)
2129{
2130	struct btrfs_raid_bio *rbio;
2131	int ret;
2132
2133	if (generic_io) {
2134		ASSERT(bbio->mirror_num == mirror_num);
2135		btrfs_io_bio(bio)->mirror_num = mirror_num;
2136	}
2137
2138	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2139	if (IS_ERR(rbio)) {
2140		if (generic_io)
2141			btrfs_put_bbio(bbio);
2142		return PTR_ERR(rbio);
2143	}
2144
2145	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2146	bio_list_add(&rbio->bio_list, bio);
2147	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2148
2149	rbio->faila = find_logical_bio_stripe(rbio, bio);
2150	if (rbio->faila == -1) {
2151		btrfs_warn(fs_info,
2152	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2153			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2154			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2155		if (generic_io)
2156			btrfs_put_bbio(bbio);
2157		kfree(rbio);
2158		return -EIO;
2159	}
2160
2161	if (generic_io) {
2162		btrfs_bio_counter_inc_noblocked(fs_info);
2163		rbio->generic_bio_cnt = 1;
2164	} else {
2165		btrfs_get_bbio(bbio);
2166	}
2167
2168	/*
2169	 * Loop retry:
2170	 * for 'mirror == 2', reconstruct from all other stripes.
2171	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2172	 */
2173	if (mirror_num > 2) {
2174		/*
2175		 * 'mirror == 3' is to fail the p stripe and
2176		 * reconstruct from the q stripe.  'mirror > 3' is to
2177		 * fail a data stripe and reconstruct from p+q stripe.
2178		 */
2179		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2180		ASSERT(rbio->failb > 0);
2181		if (rbio->failb <= rbio->faila)
2182			rbio->failb--;
2183	}
2184
2185	ret = lock_stripe_add(rbio);
2186
2187	/*
2188	 * __raid56_parity_recover will end the bio with
2189	 * any errors it hits.  We don't want to return
2190	 * its error value up the stack because our caller
2191	 * will end up calling bio_endio with any nonzero
2192	 * return
2193	 */
2194	if (ret == 0)
2195		__raid56_parity_recover(rbio);
2196	/*
2197	 * our rbio has been added to the list of
2198	 * rbios that will be handled after the
2199	 * currently lock owner is done
2200	 */
2201	return 0;
2202
2203}
2204
2205static void rmw_work(struct btrfs_work *work)
2206{
2207	struct btrfs_raid_bio *rbio;
2208
2209	rbio = container_of(work, struct btrfs_raid_bio, work);
2210	raid56_rmw_stripe(rbio);
2211}
2212
2213static void read_rebuild_work(struct btrfs_work *work)
2214{
2215	struct btrfs_raid_bio *rbio;
2216
2217	rbio = container_of(work, struct btrfs_raid_bio, work);
2218	__raid56_parity_recover(rbio);
2219}
2220
2221/*
2222 * The following code is used to scrub/replace the parity stripe
2223 *
2224 * Caller must have already increased bio_counter for getting @bbio.
2225 *
2226 * Note: We need make sure all the pages that add into the scrub/replace
2227 * raid bio are correct and not be changed during the scrub/replace. That
2228 * is those pages just hold metadata or file data with checksum.
2229 */
2230
2231struct btrfs_raid_bio *
2232raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2233			       struct btrfs_bio *bbio, u64 stripe_len,
2234			       struct btrfs_device *scrub_dev,
2235			       unsigned long *dbitmap, int stripe_nsectors)
2236{
2237	struct btrfs_raid_bio *rbio;
2238	int i;
2239
2240	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2241	if (IS_ERR(rbio))
2242		return NULL;
2243	bio_list_add(&rbio->bio_list, bio);
2244	/*
2245	 * This is a special bio which is used to hold the completion handler
2246	 * and make the scrub rbio is similar to the other types
2247	 */
2248	ASSERT(!bio->bi_iter.bi_size);
2249	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2250
2251	/*
2252	 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2253	 * to the end position, so this search can start from the first parity
2254	 * stripe.
2255	 */
2256	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2257		if (bbio->stripes[i].dev == scrub_dev) {
2258			rbio->scrubp = i;
2259			break;
2260		}
2261	}
2262	ASSERT(i < rbio->real_stripes);
2263
2264	/* Now we just support the sectorsize equals to page size */
2265	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2266	ASSERT(rbio->stripe_npages == stripe_nsectors);
2267	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2268
2269	/*
2270	 * We have already increased bio_counter when getting bbio, record it
2271	 * so we can free it at rbio_orig_end_io().
2272	 */
2273	rbio->generic_bio_cnt = 1;
2274
2275	return rbio;
2276}
2277
2278/* Used for both parity scrub and missing. */
2279void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2280			    u64 logical)
2281{
2282	int stripe_offset;
2283	int index;
2284
2285	ASSERT(logical >= rbio->bbio->raid_map[0]);
2286	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2287				rbio->stripe_len * rbio->nr_data);
2288	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2289	index = stripe_offset >> PAGE_SHIFT;
2290	rbio->bio_pages[index] = page;
2291}
2292
2293/*
2294 * We just scrub the parity that we have correct data on the same horizontal,
2295 * so we needn't allocate all pages for all the stripes.
2296 */
2297static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2298{
2299	int i;
2300	int bit;
2301	int index;
2302	struct page *page;
2303
2304	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2305		for (i = 0; i < rbio->real_stripes; i++) {
2306			index = i * rbio->stripe_npages + bit;
2307			if (rbio->stripe_pages[index])
2308				continue;
2309
2310			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2311			if (!page)
2312				return -ENOMEM;
2313			rbio->stripe_pages[index] = page;
2314		}
2315	}
2316	return 0;
2317}
2318
2319static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2320					 int need_check)
2321{
2322	struct btrfs_bio *bbio = rbio->bbio;
2323	void **pointers = rbio->finish_pointers;
2324	unsigned long *pbitmap = rbio->finish_pbitmap;
2325	int nr_data = rbio->nr_data;
2326	int stripe;
2327	int pagenr;
2328	bool has_qstripe;
 
2329	struct page *p_page = NULL;
2330	struct page *q_page = NULL;
2331	struct bio_list bio_list;
2332	struct bio *bio;
2333	int is_replace = 0;
2334	int ret;
2335
2336	bio_list_init(&bio_list);
2337
2338	if (rbio->real_stripes - rbio->nr_data == 1)
2339		has_qstripe = false;
2340	else if (rbio->real_stripes - rbio->nr_data == 2)
2341		has_qstripe = true;
2342	else
 
2343		BUG();
 
2344
2345	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2346		is_replace = 1;
2347		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2348	}
2349
2350	/*
2351	 * Because the higher layers(scrubber) are unlikely to
2352	 * use this area of the disk again soon, so don't cache
2353	 * it.
2354	 */
2355	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2356
2357	if (!need_check)
2358		goto writeback;
2359
2360	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2361	if (!p_page)
2362		goto cleanup;
2363	SetPageUptodate(p_page);
2364
2365	if (has_qstripe) {
2366		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2367		if (!q_page) {
2368			__free_page(p_page);
2369			goto cleanup;
2370		}
2371		SetPageUptodate(q_page);
2372	}
2373
2374	atomic_set(&rbio->error, 0);
2375
2376	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2377		struct page *p;
2378		void *parity;
2379		/* first collect one page from each data stripe */
2380		for (stripe = 0; stripe < nr_data; stripe++) {
2381			p = page_in_rbio(rbio, stripe, pagenr, 0);
2382			pointers[stripe] = kmap(p);
2383		}
2384
2385		/* then add the parity stripe */
2386		pointers[stripe++] = kmap(p_page);
2387
2388		if (has_qstripe) {
 
2389			/*
2390			 * raid6, add the qstripe and call the
2391			 * library function to fill in our p/q
2392			 */
2393			pointers[stripe++] = kmap(q_page);
2394
2395			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2396						pointers);
2397		} else {
2398			/* raid5 */
2399			copy_page(pointers[nr_data], pointers[0]);
2400			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2401		}
2402
2403		/* Check scrubbing parity and repair it */
2404		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2405		parity = kmap(p);
2406		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2407			copy_page(parity, pointers[rbio->scrubp]);
2408		else
2409			/* Parity is right, needn't writeback */
2410			bitmap_clear(rbio->dbitmap, pagenr, 1);
2411		kunmap(p);
2412
2413		for (stripe = 0; stripe < nr_data; stripe++)
2414			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2415		kunmap(p_page);
2416	}
2417
2418	__free_page(p_page);
2419	if (q_page)
2420		__free_page(q_page);
2421
2422writeback:
2423	/*
2424	 * time to start writing.  Make bios for everything from the
2425	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2426	 * everything else.
2427	 */
2428	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2429		struct page *page;
2430
2431		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2432		ret = rbio_add_io_page(rbio, &bio_list,
2433			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2434		if (ret)
2435			goto cleanup;
2436	}
2437
2438	if (!is_replace)
2439		goto submit_write;
2440
2441	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2442		struct page *page;
2443
2444		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2445		ret = rbio_add_io_page(rbio, &bio_list, page,
2446				       bbio->tgtdev_map[rbio->scrubp],
2447				       pagenr, rbio->stripe_len);
2448		if (ret)
2449			goto cleanup;
2450	}
2451
2452submit_write:
2453	nr_data = bio_list_size(&bio_list);
2454	if (!nr_data) {
2455		/* Every parity is right */
2456		rbio_orig_end_io(rbio, BLK_STS_OK);
2457		return;
2458	}
2459
2460	atomic_set(&rbio->stripes_pending, nr_data);
2461
2462	while ((bio = bio_list_pop(&bio_list))) {
 
 
 
 
2463		bio->bi_private = rbio;
2464		bio->bi_end_io = raid_write_end_io;
2465		bio->bi_opf = REQ_OP_WRITE;
2466
2467		submit_bio(bio);
2468	}
2469	return;
2470
2471cleanup:
2472	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2473
2474	while ((bio = bio_list_pop(&bio_list)))
2475		bio_put(bio);
2476}
2477
2478static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2479{
2480	if (stripe >= 0 && stripe < rbio->nr_data)
2481		return 1;
2482	return 0;
2483}
2484
2485/*
2486 * While we're doing the parity check and repair, we could have errors
2487 * in reading pages off the disk.  This checks for errors and if we're
2488 * not able to read the page it'll trigger parity reconstruction.  The
2489 * parity scrub will be finished after we've reconstructed the failed
2490 * stripes
2491 */
2492static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2493{
2494	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2495		goto cleanup;
2496
2497	if (rbio->faila >= 0 || rbio->failb >= 0) {
2498		int dfail = 0, failp = -1;
2499
2500		if (is_data_stripe(rbio, rbio->faila))
2501			dfail++;
2502		else if (is_parity_stripe(rbio->faila))
2503			failp = rbio->faila;
2504
2505		if (is_data_stripe(rbio, rbio->failb))
2506			dfail++;
2507		else if (is_parity_stripe(rbio->failb))
2508			failp = rbio->failb;
2509
2510		/*
2511		 * Because we can not use a scrubbing parity to repair
2512		 * the data, so the capability of the repair is declined.
2513		 * (In the case of RAID5, we can not repair anything)
2514		 */
2515		if (dfail > rbio->bbio->max_errors - 1)
2516			goto cleanup;
2517
2518		/*
2519		 * If all data is good, only parity is correctly, just
2520		 * repair the parity.
2521		 */
2522		if (dfail == 0) {
2523			finish_parity_scrub(rbio, 0);
2524			return;
2525		}
2526
2527		/*
2528		 * Here means we got one corrupted data stripe and one
2529		 * corrupted parity on RAID6, if the corrupted parity
2530		 * is scrubbing parity, luckily, use the other one to repair
2531		 * the data, or we can not repair the data stripe.
2532		 */
2533		if (failp != rbio->scrubp)
2534			goto cleanup;
2535
2536		__raid_recover_end_io(rbio);
2537	} else {
2538		finish_parity_scrub(rbio, 1);
2539	}
2540	return;
2541
2542cleanup:
2543	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2544}
2545
2546/*
2547 * end io for the read phase of the rmw cycle.  All the bios here are physical
2548 * stripe bios we've read from the disk so we can recalculate the parity of the
2549 * stripe.
2550 *
2551 * This will usually kick off finish_rmw once all the bios are read in, but it
2552 * may trigger parity reconstruction if we had any errors along the way
2553 */
2554static void raid56_parity_scrub_end_io(struct bio *bio)
2555{
2556	struct btrfs_raid_bio *rbio = bio->bi_private;
2557
2558	if (bio->bi_status)
2559		fail_bio_stripe(rbio, bio);
2560	else
2561		set_bio_pages_uptodate(bio);
2562
2563	bio_put(bio);
2564
2565	if (!atomic_dec_and_test(&rbio->stripes_pending))
2566		return;
2567
2568	/*
2569	 * this will normally call finish_rmw to start our write
2570	 * but if there are any failed stripes we'll reconstruct
2571	 * from parity first
2572	 */
2573	validate_rbio_for_parity_scrub(rbio);
2574}
2575
2576static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2577{
2578	int bios_to_read = 0;
2579	struct bio_list bio_list;
2580	int ret;
2581	int pagenr;
2582	int stripe;
2583	struct bio *bio;
2584
2585	bio_list_init(&bio_list);
2586
2587	ret = alloc_rbio_essential_pages(rbio);
2588	if (ret)
2589		goto cleanup;
2590
2591	atomic_set(&rbio->error, 0);
2592	/*
2593	 * build a list of bios to read all the missing parts of this
2594	 * stripe
2595	 */
2596	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2597		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2598			struct page *page;
2599			/*
2600			 * we want to find all the pages missing from
2601			 * the rbio and read them from the disk.  If
2602			 * page_in_rbio finds a page in the bio list
2603			 * we don't need to read it off the stripe.
2604			 */
2605			page = page_in_rbio(rbio, stripe, pagenr, 1);
2606			if (page)
2607				continue;
2608
2609			page = rbio_stripe_page(rbio, stripe, pagenr);
2610			/*
2611			 * the bio cache may have handed us an uptodate
2612			 * page.  If so, be happy and use it
2613			 */
2614			if (PageUptodate(page))
2615				continue;
2616
2617			ret = rbio_add_io_page(rbio, &bio_list, page,
2618				       stripe, pagenr, rbio->stripe_len);
2619			if (ret)
2620				goto cleanup;
2621		}
2622	}
2623
2624	bios_to_read = bio_list_size(&bio_list);
2625	if (!bios_to_read) {
2626		/*
2627		 * this can happen if others have merged with
2628		 * us, it means there is nothing left to read.
2629		 * But if there are missing devices it may not be
2630		 * safe to do the full stripe write yet.
2631		 */
2632		goto finish;
2633	}
2634
2635	/*
2636	 * the bbio may be freed once we submit the last bio.  Make sure
2637	 * not to touch it after that
2638	 */
2639	atomic_set(&rbio->stripes_pending, bios_to_read);
2640	while ((bio = bio_list_pop(&bio_list))) {
 
 
 
 
2641		bio->bi_private = rbio;
2642		bio->bi_end_io = raid56_parity_scrub_end_io;
2643		bio->bi_opf = REQ_OP_READ;
2644
2645		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2646
2647		submit_bio(bio);
2648	}
2649	/* the actual write will happen once the reads are done */
2650	return;
2651
2652cleanup:
2653	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2654
2655	while ((bio = bio_list_pop(&bio_list)))
2656		bio_put(bio);
2657
2658	return;
2659
2660finish:
2661	validate_rbio_for_parity_scrub(rbio);
2662}
2663
2664static void scrub_parity_work(struct btrfs_work *work)
2665{
2666	struct btrfs_raid_bio *rbio;
2667
2668	rbio = container_of(work, struct btrfs_raid_bio, work);
2669	raid56_parity_scrub_stripe(rbio);
2670}
2671
 
 
 
 
 
 
 
 
2672void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2673{
2674	if (!lock_stripe_add(rbio))
2675		start_async_work(rbio, scrub_parity_work);
2676}
2677
2678/* The following code is used for dev replace of a missing RAID 5/6 device. */
2679
2680struct btrfs_raid_bio *
2681raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2682			  struct btrfs_bio *bbio, u64 length)
2683{
2684	struct btrfs_raid_bio *rbio;
2685
2686	rbio = alloc_rbio(fs_info, bbio, length);
2687	if (IS_ERR(rbio))
2688		return NULL;
2689
2690	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2691	bio_list_add(&rbio->bio_list, bio);
2692	/*
2693	 * This is a special bio which is used to hold the completion handler
2694	 * and make the scrub rbio is similar to the other types
2695	 */
2696	ASSERT(!bio->bi_iter.bi_size);
2697
2698	rbio->faila = find_logical_bio_stripe(rbio, bio);
2699	if (rbio->faila == -1) {
2700		BUG();
2701		kfree(rbio);
2702		return NULL;
2703	}
2704
2705	/*
2706	 * When we get bbio, we have already increased bio_counter, record it
2707	 * so we can free it at rbio_orig_end_io()
2708	 */
2709	rbio->generic_bio_cnt = 1;
2710
2711	return rbio;
2712}
2713
2714void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2715{
2716	if (!lock_stripe_add(rbio))
2717		start_async_work(rbio, read_rebuild_work);
2718}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2012 Fusion-io  All rights reserved.
   4 * Copyright (C) 2012 Intel Corp. All rights reserved.
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/wait.h>
   9#include <linux/bio.h>
  10#include <linux/slab.h>
  11#include <linux/buffer_head.h>
  12#include <linux/blkdev.h>
  13#include <linux/random.h>
  14#include <linux/iocontext.h>
  15#include <linux/capability.h>
  16#include <linux/ratelimit.h>
  17#include <linux/kthread.h>
  18#include <linux/raid/pq.h>
  19#include <linux/hash.h>
  20#include <linux/list_sort.h>
  21#include <linux/raid/xor.h>
  22#include <linux/mm.h>
  23#include <asm/div64.h>
  24#include "ctree.h"
  25#include "extent_map.h"
  26#include "disk-io.h"
  27#include "transaction.h"
  28#include "print-tree.h"
  29#include "volumes.h"
  30#include "raid56.h"
  31#include "async-thread.h"
  32#include "check-integrity.h"
  33#include "rcu-string.h"
  34
  35/* set when additional merges to this rbio are not allowed */
  36#define RBIO_RMW_LOCKED_BIT	1
  37
  38/*
  39 * set when this rbio is sitting in the hash, but it is just a cache
  40 * of past RMW
  41 */
  42#define RBIO_CACHE_BIT		2
  43
  44/*
  45 * set when it is safe to trust the stripe_pages for caching
  46 */
  47#define RBIO_CACHE_READY_BIT	3
  48
  49#define RBIO_CACHE_SIZE 1024
  50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51enum btrfs_rbio_ops {
  52	BTRFS_RBIO_WRITE,
  53	BTRFS_RBIO_READ_REBUILD,
  54	BTRFS_RBIO_PARITY_SCRUB,
  55	BTRFS_RBIO_REBUILD_MISSING,
  56};
  57
  58struct btrfs_raid_bio {
  59	struct btrfs_fs_info *fs_info;
  60	struct btrfs_bio *bbio;
  61
  62	/* while we're doing rmw on a stripe
  63	 * we put it into a hash table so we can
  64	 * lock the stripe and merge more rbios
  65	 * into it.
  66	 */
  67	struct list_head hash_list;
  68
  69	/*
  70	 * LRU list for the stripe cache
  71	 */
  72	struct list_head stripe_cache;
  73
  74	/*
  75	 * for scheduling work in the helper threads
  76	 */
  77	struct btrfs_work work;
  78
  79	/*
  80	 * bio list and bio_list_lock are used
  81	 * to add more bios into the stripe
  82	 * in hopes of avoiding the full rmw
  83	 */
  84	struct bio_list bio_list;
  85	spinlock_t bio_list_lock;
  86
  87	/* also protected by the bio_list_lock, the
  88	 * plug list is used by the plugging code
  89	 * to collect partial bios while plugged.  The
  90	 * stripe locking code also uses it to hand off
  91	 * the stripe lock to the next pending IO
  92	 */
  93	struct list_head plug_list;
  94
  95	/*
  96	 * flags that tell us if it is safe to
  97	 * merge with this bio
  98	 */
  99	unsigned long flags;
 100
 101	/* size of each individual stripe on disk */
 102	int stripe_len;
 103
 104	/* number of data stripes (no p/q) */
 105	int nr_data;
 106
 107	int real_stripes;
 108
 109	int stripe_npages;
 110	/*
 111	 * set if we're doing a parity rebuild
 112	 * for a read from higher up, which is handled
 113	 * differently from a parity rebuild as part of
 114	 * rmw
 115	 */
 116	enum btrfs_rbio_ops operation;
 117
 118	/* first bad stripe */
 119	int faila;
 120
 121	/* second bad stripe (for raid6 use) */
 122	int failb;
 123
 124	int scrubp;
 125	/*
 126	 * number of pages needed to represent the full
 127	 * stripe
 128	 */
 129	int nr_pages;
 130
 131	/*
 132	 * size of all the bios in the bio_list.  This
 133	 * helps us decide if the rbio maps to a full
 134	 * stripe or not
 135	 */
 136	int bio_list_bytes;
 137
 138	int generic_bio_cnt;
 139
 140	refcount_t refs;
 141
 142	atomic_t stripes_pending;
 143
 144	atomic_t error;
 145	/*
 146	 * these are two arrays of pointers.  We allocate the
 147	 * rbio big enough to hold them both and setup their
 148	 * locations when the rbio is allocated
 149	 */
 150
 151	/* pointers to pages that we allocated for
 152	 * reading/writing stripes directly from the disk (including P/Q)
 153	 */
 154	struct page **stripe_pages;
 155
 156	/*
 157	 * pointers to the pages in the bio_list.  Stored
 158	 * here for faster lookup
 159	 */
 160	struct page **bio_pages;
 161
 162	/*
 163	 * bitmap to record which horizontal stripe has data
 164	 */
 165	unsigned long *dbitmap;
 
 
 
 
 
 
 166};
 167
 168static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
 169static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
 170static void rmw_work(struct btrfs_work *work);
 171static void read_rebuild_work(struct btrfs_work *work);
 172static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
 173static void async_read_rebuild(struct btrfs_raid_bio *rbio);
 174static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
 175static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
 176static void __free_raid_bio(struct btrfs_raid_bio *rbio);
 177static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 178static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 179
 180static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
 181					 int need_check);
 182static void async_scrub_parity(struct btrfs_raid_bio *rbio);
 
 
 
 
 
 
 183
 184/*
 185 * the stripe hash table is used for locking, and to collect
 186 * bios in hopes of making a full stripe
 187 */
 188int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 189{
 190	struct btrfs_stripe_hash_table *table;
 191	struct btrfs_stripe_hash_table *x;
 192	struct btrfs_stripe_hash *cur;
 193	struct btrfs_stripe_hash *h;
 194	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 195	int i;
 196	int table_size;
 197
 198	if (info->stripe_hash_table)
 199		return 0;
 200
 201	/*
 202	 * The table is large, starting with order 4 and can go as high as
 203	 * order 7 in case lock debugging is turned on.
 204	 *
 205	 * Try harder to allocate and fallback to vmalloc to lower the chance
 206	 * of a failing mount.
 207	 */
 208	table_size = sizeof(*table) + sizeof(*h) * num_entries;
 209	table = kvzalloc(table_size, GFP_KERNEL);
 210	if (!table)
 211		return -ENOMEM;
 212
 213	spin_lock_init(&table->cache_lock);
 214	INIT_LIST_HEAD(&table->stripe_cache);
 215
 216	h = table->table;
 217
 218	for (i = 0; i < num_entries; i++) {
 219		cur = h + i;
 220		INIT_LIST_HEAD(&cur->hash_list);
 221		spin_lock_init(&cur->lock);
 222	}
 223
 224	x = cmpxchg(&info->stripe_hash_table, NULL, table);
 225	if (x)
 226		kvfree(x);
 227	return 0;
 228}
 229
 230/*
 231 * caching an rbio means to copy anything from the
 232 * bio_pages array into the stripe_pages array.  We
 233 * use the page uptodate bit in the stripe cache array
 234 * to indicate if it has valid data
 235 *
 236 * once the caching is done, we set the cache ready
 237 * bit.
 238 */
 239static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 240{
 241	int i;
 242	char *s;
 243	char *d;
 244	int ret;
 245
 246	ret = alloc_rbio_pages(rbio);
 247	if (ret)
 248		return;
 249
 250	for (i = 0; i < rbio->nr_pages; i++) {
 251		if (!rbio->bio_pages[i])
 252			continue;
 253
 254		s = kmap(rbio->bio_pages[i]);
 255		d = kmap(rbio->stripe_pages[i]);
 256
 257		memcpy(d, s, PAGE_SIZE);
 258
 259		kunmap(rbio->bio_pages[i]);
 260		kunmap(rbio->stripe_pages[i]);
 261		SetPageUptodate(rbio->stripe_pages[i]);
 262	}
 263	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 264}
 265
 266/*
 267 * we hash on the first logical address of the stripe
 268 */
 269static int rbio_bucket(struct btrfs_raid_bio *rbio)
 270{
 271	u64 num = rbio->bbio->raid_map[0];
 272
 273	/*
 274	 * we shift down quite a bit.  We're using byte
 275	 * addressing, and most of the lower bits are zeros.
 276	 * This tends to upset hash_64, and it consistently
 277	 * returns just one or two different values.
 278	 *
 279	 * shifting off the lower bits fixes things.
 280	 */
 281	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 282}
 283
 284/*
 285 * stealing an rbio means taking all the uptodate pages from the stripe
 286 * array in the source rbio and putting them into the destination rbio
 287 */
 288static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 289{
 290	int i;
 291	struct page *s;
 292	struct page *d;
 293
 294	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 295		return;
 296
 297	for (i = 0; i < dest->nr_pages; i++) {
 298		s = src->stripe_pages[i];
 299		if (!s || !PageUptodate(s)) {
 300			continue;
 301		}
 302
 303		d = dest->stripe_pages[i];
 304		if (d)
 305			__free_page(d);
 306
 307		dest->stripe_pages[i] = s;
 308		src->stripe_pages[i] = NULL;
 309	}
 310}
 311
 312/*
 313 * merging means we take the bio_list from the victim and
 314 * splice it into the destination.  The victim should
 315 * be discarded afterwards.
 316 *
 317 * must be called with dest->rbio_list_lock held
 318 */
 319static void merge_rbio(struct btrfs_raid_bio *dest,
 320		       struct btrfs_raid_bio *victim)
 321{
 322	bio_list_merge(&dest->bio_list, &victim->bio_list);
 323	dest->bio_list_bytes += victim->bio_list_bytes;
 324	dest->generic_bio_cnt += victim->generic_bio_cnt;
 325	bio_list_init(&victim->bio_list);
 326}
 327
 328/*
 329 * used to prune items that are in the cache.  The caller
 330 * must hold the hash table lock.
 331 */
 332static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 333{
 334	int bucket = rbio_bucket(rbio);
 335	struct btrfs_stripe_hash_table *table;
 336	struct btrfs_stripe_hash *h;
 337	int freeit = 0;
 338
 339	/*
 340	 * check the bit again under the hash table lock.
 341	 */
 342	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 343		return;
 344
 345	table = rbio->fs_info->stripe_hash_table;
 346	h = table->table + bucket;
 347
 348	/* hold the lock for the bucket because we may be
 349	 * removing it from the hash table
 350	 */
 351	spin_lock(&h->lock);
 352
 353	/*
 354	 * hold the lock for the bio list because we need
 355	 * to make sure the bio list is empty
 356	 */
 357	spin_lock(&rbio->bio_list_lock);
 358
 359	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 360		list_del_init(&rbio->stripe_cache);
 361		table->cache_size -= 1;
 362		freeit = 1;
 363
 364		/* if the bio list isn't empty, this rbio is
 365		 * still involved in an IO.  We take it out
 366		 * of the cache list, and drop the ref that
 367		 * was held for the list.
 368		 *
 369		 * If the bio_list was empty, we also remove
 370		 * the rbio from the hash_table, and drop
 371		 * the corresponding ref
 372		 */
 373		if (bio_list_empty(&rbio->bio_list)) {
 374			if (!list_empty(&rbio->hash_list)) {
 375				list_del_init(&rbio->hash_list);
 376				refcount_dec(&rbio->refs);
 377				BUG_ON(!list_empty(&rbio->plug_list));
 378			}
 379		}
 380	}
 381
 382	spin_unlock(&rbio->bio_list_lock);
 383	spin_unlock(&h->lock);
 384
 385	if (freeit)
 386		__free_raid_bio(rbio);
 387}
 388
 389/*
 390 * prune a given rbio from the cache
 391 */
 392static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 393{
 394	struct btrfs_stripe_hash_table *table;
 395	unsigned long flags;
 396
 397	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 398		return;
 399
 400	table = rbio->fs_info->stripe_hash_table;
 401
 402	spin_lock_irqsave(&table->cache_lock, flags);
 403	__remove_rbio_from_cache(rbio);
 404	spin_unlock_irqrestore(&table->cache_lock, flags);
 405}
 406
 407/*
 408 * remove everything in the cache
 409 */
 410static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 411{
 412	struct btrfs_stripe_hash_table *table;
 413	unsigned long flags;
 414	struct btrfs_raid_bio *rbio;
 415
 416	table = info->stripe_hash_table;
 417
 418	spin_lock_irqsave(&table->cache_lock, flags);
 419	while (!list_empty(&table->stripe_cache)) {
 420		rbio = list_entry(table->stripe_cache.next,
 421				  struct btrfs_raid_bio,
 422				  stripe_cache);
 423		__remove_rbio_from_cache(rbio);
 424	}
 425	spin_unlock_irqrestore(&table->cache_lock, flags);
 426}
 427
 428/*
 429 * remove all cached entries and free the hash table
 430 * used by unmount
 431 */
 432void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 433{
 434	if (!info->stripe_hash_table)
 435		return;
 436	btrfs_clear_rbio_cache(info);
 437	kvfree(info->stripe_hash_table);
 438	info->stripe_hash_table = NULL;
 439}
 440
 441/*
 442 * insert an rbio into the stripe cache.  It
 443 * must have already been prepared by calling
 444 * cache_rbio_pages
 445 *
 446 * If this rbio was already cached, it gets
 447 * moved to the front of the lru.
 448 *
 449 * If the size of the rbio cache is too big, we
 450 * prune an item.
 451 */
 452static void cache_rbio(struct btrfs_raid_bio *rbio)
 453{
 454	struct btrfs_stripe_hash_table *table;
 455	unsigned long flags;
 456
 457	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 458		return;
 459
 460	table = rbio->fs_info->stripe_hash_table;
 461
 462	spin_lock_irqsave(&table->cache_lock, flags);
 463	spin_lock(&rbio->bio_list_lock);
 464
 465	/* bump our ref if we were not in the list before */
 466	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 467		refcount_inc(&rbio->refs);
 468
 469	if (!list_empty(&rbio->stripe_cache)){
 470		list_move(&rbio->stripe_cache, &table->stripe_cache);
 471	} else {
 472		list_add(&rbio->stripe_cache, &table->stripe_cache);
 473		table->cache_size += 1;
 474	}
 475
 476	spin_unlock(&rbio->bio_list_lock);
 477
 478	if (table->cache_size > RBIO_CACHE_SIZE) {
 479		struct btrfs_raid_bio *found;
 480
 481		found = list_entry(table->stripe_cache.prev,
 482				  struct btrfs_raid_bio,
 483				  stripe_cache);
 484
 485		if (found != rbio)
 486			__remove_rbio_from_cache(found);
 487	}
 488
 489	spin_unlock_irqrestore(&table->cache_lock, flags);
 490}
 491
 492/*
 493 * helper function to run the xor_blocks api.  It is only
 494 * able to do MAX_XOR_BLOCKS at a time, so we need to
 495 * loop through.
 496 */
 497static void run_xor(void **pages, int src_cnt, ssize_t len)
 498{
 499	int src_off = 0;
 500	int xor_src_cnt = 0;
 501	void *dest = pages[src_cnt];
 502
 503	while(src_cnt > 0) {
 504		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 505		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 506
 507		src_cnt -= xor_src_cnt;
 508		src_off += xor_src_cnt;
 509	}
 510}
 511
 512/*
 513 * returns true if the bio list inside this rbio
 514 * covers an entire stripe (no rmw required).
 515 * Must be called with the bio list lock held, or
 516 * at a time when you know it is impossible to add
 517 * new bios into the list
 518 */
 519static int __rbio_is_full(struct btrfs_raid_bio *rbio)
 520{
 
 521	unsigned long size = rbio->bio_list_bytes;
 522	int ret = 1;
 523
 
 524	if (size != rbio->nr_data * rbio->stripe_len)
 525		ret = 0;
 526
 527	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
 528	return ret;
 529}
 530
 531static int rbio_is_full(struct btrfs_raid_bio *rbio)
 532{
 533	unsigned long flags;
 534	int ret;
 535
 536	spin_lock_irqsave(&rbio->bio_list_lock, flags);
 537	ret = __rbio_is_full(rbio);
 538	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 539	return ret;
 540}
 541
 542/*
 543 * returns 1 if it is safe to merge two rbios together.
 544 * The merging is safe if the two rbios correspond to
 545 * the same stripe and if they are both going in the same
 546 * direction (read vs write), and if neither one is
 547 * locked for final IO
 548 *
 549 * The caller is responsible for locking such that
 550 * rmw_locked is safe to test
 551 */
 552static int rbio_can_merge(struct btrfs_raid_bio *last,
 553			  struct btrfs_raid_bio *cur)
 554{
 555	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 556	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 557		return 0;
 558
 559	/*
 560	 * we can't merge with cached rbios, since the
 561	 * idea is that when we merge the destination
 562	 * rbio is going to run our IO for us.  We can
 563	 * steal from cached rbios though, other functions
 564	 * handle that.
 565	 */
 566	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 567	    test_bit(RBIO_CACHE_BIT, &cur->flags))
 568		return 0;
 569
 570	if (last->bbio->raid_map[0] !=
 571	    cur->bbio->raid_map[0])
 572		return 0;
 573
 574	/* we can't merge with different operations */
 575	if (last->operation != cur->operation)
 576		return 0;
 577	/*
 578	 * We've need read the full stripe from the drive.
 579	 * check and repair the parity and write the new results.
 580	 *
 581	 * We're not allowed to add any new bios to the
 582	 * bio list here, anyone else that wants to
 583	 * change this stripe needs to do their own rmw.
 584	 */
 585	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
 586		return 0;
 587
 588	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
 589		return 0;
 590
 591	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
 592		int fa = last->faila;
 593		int fb = last->failb;
 594		int cur_fa = cur->faila;
 595		int cur_fb = cur->failb;
 596
 597		if (last->faila >= last->failb) {
 598			fa = last->failb;
 599			fb = last->faila;
 600		}
 601
 602		if (cur->faila >= cur->failb) {
 603			cur_fa = cur->failb;
 604			cur_fb = cur->faila;
 605		}
 606
 607		if (fa != cur_fa || fb != cur_fb)
 608			return 0;
 609	}
 610	return 1;
 611}
 612
 613static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
 614				  int index)
 615{
 616	return stripe * rbio->stripe_npages + index;
 617}
 618
 619/*
 620 * these are just the pages from the rbio array, not from anything
 621 * the FS sent down to us
 622 */
 623static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
 624				     int index)
 625{
 626	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
 627}
 628
 629/*
 630 * helper to index into the pstripe
 631 */
 632static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
 633{
 634	return rbio_stripe_page(rbio, rbio->nr_data, index);
 635}
 636
 637/*
 638 * helper to index into the qstripe, returns null
 639 * if there is no qstripe
 640 */
 641static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
 642{
 643	if (rbio->nr_data + 1 == rbio->real_stripes)
 644		return NULL;
 645	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
 646}
 647
 648/*
 649 * The first stripe in the table for a logical address
 650 * has the lock.  rbios are added in one of three ways:
 651 *
 652 * 1) Nobody has the stripe locked yet.  The rbio is given
 653 * the lock and 0 is returned.  The caller must start the IO
 654 * themselves.
 655 *
 656 * 2) Someone has the stripe locked, but we're able to merge
 657 * with the lock owner.  The rbio is freed and the IO will
 658 * start automatically along with the existing rbio.  1 is returned.
 659 *
 660 * 3) Someone has the stripe locked, but we're not able to merge.
 661 * The rbio is added to the lock owner's plug list, or merged into
 662 * an rbio already on the plug list.  When the lock owner unlocks,
 663 * the next rbio on the list is run and the IO is started automatically.
 664 * 1 is returned
 665 *
 666 * If we return 0, the caller still owns the rbio and must continue with
 667 * IO submission.  If we return 1, the caller must assume the rbio has
 668 * already been freed.
 669 */
 670static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 671{
 672	int bucket = rbio_bucket(rbio);
 673	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
 674	struct btrfs_raid_bio *cur;
 675	struct btrfs_raid_bio *pending;
 676	unsigned long flags;
 677	struct btrfs_raid_bio *freeit = NULL;
 678	struct btrfs_raid_bio *cache_drop = NULL;
 679	int ret = 0;
 680
 
 
 681	spin_lock_irqsave(&h->lock, flags);
 682	list_for_each_entry(cur, &h->hash_list, hash_list) {
 683		if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
 684			spin_lock(&cur->bio_list_lock);
 
 
 685
 686			/* can we steal this cached rbio's pages? */
 687			if (bio_list_empty(&cur->bio_list) &&
 688			    list_empty(&cur->plug_list) &&
 689			    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 690			    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 691				list_del_init(&cur->hash_list);
 692				refcount_dec(&cur->refs);
 693
 694				steal_rbio(cur, rbio);
 695				cache_drop = cur;
 696				spin_unlock(&cur->bio_list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 697
 698				goto lockit;
 699			}
 700
 701			/* can we merge into the lock owner? */
 702			if (rbio_can_merge(cur, rbio)) {
 703				merge_rbio(cur, rbio);
 
 
 
 
 
 704				spin_unlock(&cur->bio_list_lock);
 705				freeit = rbio;
 706				ret = 1;
 707				goto out;
 708			}
 
 709
 710
 711			/*
 712			 * we couldn't merge with the running
 713			 * rbio, see if we can merge with the
 714			 * pending ones.  We don't have to
 715			 * check for rmw_locked because there
 716			 * is no way they are inside finish_rmw
 717			 * right now
 718			 */
 719			list_for_each_entry(pending, &cur->plug_list,
 720					    plug_list) {
 721				if (rbio_can_merge(pending, rbio)) {
 722					merge_rbio(pending, rbio);
 723					spin_unlock(&cur->bio_list_lock);
 724					freeit = rbio;
 725					ret = 1;
 726					goto out;
 727				}
 728			}
 729
 730			/* no merging, put us on the tail of the plug list,
 731			 * our rbio will be started with the currently
 732			 * running rbio unlocks
 733			 */
 734			list_add_tail(&rbio->plug_list, &cur->plug_list);
 735			spin_unlock(&cur->bio_list_lock);
 736			ret = 1;
 737			goto out;
 738		}
 739	}
 740lockit:
 741	refcount_inc(&rbio->refs);
 742	list_add(&rbio->hash_list, &h->hash_list);
 743out:
 744	spin_unlock_irqrestore(&h->lock, flags);
 745	if (cache_drop)
 746		remove_rbio_from_cache(cache_drop);
 747	if (freeit)
 748		__free_raid_bio(freeit);
 749	return ret;
 750}
 751
 752/*
 753 * called as rmw or parity rebuild is completed.  If the plug list has more
 754 * rbios waiting for this stripe, the next one on the list will be started
 755 */
 756static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 757{
 758	int bucket;
 759	struct btrfs_stripe_hash *h;
 760	unsigned long flags;
 761	int keep_cache = 0;
 762
 763	bucket = rbio_bucket(rbio);
 764	h = rbio->fs_info->stripe_hash_table->table + bucket;
 765
 766	if (list_empty(&rbio->plug_list))
 767		cache_rbio(rbio);
 768
 769	spin_lock_irqsave(&h->lock, flags);
 770	spin_lock(&rbio->bio_list_lock);
 771
 772	if (!list_empty(&rbio->hash_list)) {
 773		/*
 774		 * if we're still cached and there is no other IO
 775		 * to perform, just leave this rbio here for others
 776		 * to steal from later
 777		 */
 778		if (list_empty(&rbio->plug_list) &&
 779		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 780			keep_cache = 1;
 781			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 782			BUG_ON(!bio_list_empty(&rbio->bio_list));
 783			goto done;
 784		}
 785
 786		list_del_init(&rbio->hash_list);
 787		refcount_dec(&rbio->refs);
 788
 789		/*
 790		 * we use the plug list to hold all the rbios
 791		 * waiting for the chance to lock this stripe.
 792		 * hand the lock over to one of them.
 793		 */
 794		if (!list_empty(&rbio->plug_list)) {
 795			struct btrfs_raid_bio *next;
 796			struct list_head *head = rbio->plug_list.next;
 797
 798			next = list_entry(head, struct btrfs_raid_bio,
 799					  plug_list);
 800
 801			list_del_init(&rbio->plug_list);
 802
 803			list_add(&next->hash_list, &h->hash_list);
 804			refcount_inc(&next->refs);
 805			spin_unlock(&rbio->bio_list_lock);
 806			spin_unlock_irqrestore(&h->lock, flags);
 807
 808			if (next->operation == BTRFS_RBIO_READ_REBUILD)
 809				async_read_rebuild(next);
 810			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
 811				steal_rbio(rbio, next);
 812				async_read_rebuild(next);
 813			} else if (next->operation == BTRFS_RBIO_WRITE) {
 814				steal_rbio(rbio, next);
 815				async_rmw_stripe(next);
 816			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 817				steal_rbio(rbio, next);
 818				async_scrub_parity(next);
 819			}
 820
 821			goto done_nolock;
 822		}
 823	}
 824done:
 825	spin_unlock(&rbio->bio_list_lock);
 826	spin_unlock_irqrestore(&h->lock, flags);
 827
 828done_nolock:
 829	if (!keep_cache)
 830		remove_rbio_from_cache(rbio);
 831}
 832
 833static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 834{
 835	int i;
 836
 837	if (!refcount_dec_and_test(&rbio->refs))
 838		return;
 839
 840	WARN_ON(!list_empty(&rbio->stripe_cache));
 841	WARN_ON(!list_empty(&rbio->hash_list));
 842	WARN_ON(!bio_list_empty(&rbio->bio_list));
 843
 844	for (i = 0; i < rbio->nr_pages; i++) {
 845		if (rbio->stripe_pages[i]) {
 846			__free_page(rbio->stripe_pages[i]);
 847			rbio->stripe_pages[i] = NULL;
 848		}
 849	}
 850
 851	btrfs_put_bbio(rbio->bbio);
 852	kfree(rbio);
 853}
 854
 855static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
 856{
 857	struct bio *next;
 858
 859	while (cur) {
 860		next = cur->bi_next;
 861		cur->bi_next = NULL;
 862		cur->bi_status = err;
 863		bio_endio(cur);
 864		cur = next;
 865	}
 866}
 867
 868/*
 869 * this frees the rbio and runs through all the bios in the
 870 * bio_list and calls end_io on them
 871 */
 872static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 873{
 874	struct bio *cur = bio_list_get(&rbio->bio_list);
 875	struct bio *extra;
 876
 877	if (rbio->generic_bio_cnt)
 878		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
 879
 880	/*
 881	 * At this moment, rbio->bio_list is empty, however since rbio does not
 882	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
 883	 * hash list, rbio may be merged with others so that rbio->bio_list
 884	 * becomes non-empty.
 885	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
 886	 * more and we can call bio_endio() on all queued bios.
 887	 */
 888	unlock_stripe(rbio);
 889	extra = bio_list_get(&rbio->bio_list);
 890	__free_raid_bio(rbio);
 891
 892	rbio_endio_bio_list(cur, err);
 893	if (extra)
 894		rbio_endio_bio_list(extra, err);
 895}
 896
 897/*
 898 * end io function used by finish_rmw.  When we finally
 899 * get here, we've written a full stripe
 900 */
 901static void raid_write_end_io(struct bio *bio)
 902{
 903	struct btrfs_raid_bio *rbio = bio->bi_private;
 904	blk_status_t err = bio->bi_status;
 905	int max_errors;
 906
 907	if (err)
 908		fail_bio_stripe(rbio, bio);
 909
 910	bio_put(bio);
 911
 912	if (!atomic_dec_and_test(&rbio->stripes_pending))
 913		return;
 914
 915	err = BLK_STS_OK;
 916
 917	/* OK, we have read all the stripes we need to. */
 918	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
 919		     0 : rbio->bbio->max_errors;
 920	if (atomic_read(&rbio->error) > max_errors)
 921		err = BLK_STS_IOERR;
 922
 923	rbio_orig_end_io(rbio, err);
 924}
 925
 926/*
 927 * the read/modify/write code wants to use the original bio for
 928 * any pages it included, and then use the rbio for everything
 929 * else.  This function decides if a given index (stripe number)
 930 * and page number in that stripe fall inside the original bio
 931 * or the rbio.
 932 *
 933 * if you set bio_list_only, you'll get a NULL back for any ranges
 934 * that are outside the bio_list
 935 *
 936 * This doesn't take any refs on anything, you get a bare page pointer
 937 * and the caller must bump refs as required.
 938 *
 939 * You must call index_rbio_pages once before you can trust
 940 * the answers from this function.
 941 */
 942static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
 943				 int index, int pagenr, int bio_list_only)
 944{
 945	int chunk_page;
 946	struct page *p = NULL;
 947
 948	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
 949
 950	spin_lock_irq(&rbio->bio_list_lock);
 951	p = rbio->bio_pages[chunk_page];
 952	spin_unlock_irq(&rbio->bio_list_lock);
 953
 954	if (p || bio_list_only)
 955		return p;
 956
 957	return rbio->stripe_pages[chunk_page];
 958}
 959
 960/*
 961 * number of pages we need for the entire stripe across all the
 962 * drives
 963 */
 964static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
 965{
 966	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
 967}
 968
 969/*
 970 * allocation and initial setup for the btrfs_raid_bio.  Not
 971 * this does not allocate any pages for rbio->pages.
 972 */
 973static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
 974					 struct btrfs_bio *bbio,
 975					 u64 stripe_len)
 976{
 977	struct btrfs_raid_bio *rbio;
 978	int nr_data = 0;
 979	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
 980	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
 981	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
 982	void *p;
 983
 984	rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
 985		       DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
 986		       sizeof(long), GFP_NOFS);
 
 
 
 
 
 987	if (!rbio)
 988		return ERR_PTR(-ENOMEM);
 989
 990	bio_list_init(&rbio->bio_list);
 991	INIT_LIST_HEAD(&rbio->plug_list);
 992	spin_lock_init(&rbio->bio_list_lock);
 993	INIT_LIST_HEAD(&rbio->stripe_cache);
 994	INIT_LIST_HEAD(&rbio->hash_list);
 995	rbio->bbio = bbio;
 996	rbio->fs_info = fs_info;
 997	rbio->stripe_len = stripe_len;
 998	rbio->nr_pages = num_pages;
 999	rbio->real_stripes = real_stripes;
1000	rbio->stripe_npages = stripe_npages;
1001	rbio->faila = -1;
1002	rbio->failb = -1;
1003	refcount_set(&rbio->refs, 1);
1004	atomic_set(&rbio->error, 0);
1005	atomic_set(&rbio->stripes_pending, 0);
1006
1007	/*
1008	 * the stripe_pages and bio_pages array point to the extra
1009	 * memory we allocated past the end of the rbio
1010	 */
1011	p = rbio + 1;
1012	rbio->stripe_pages = p;
1013	rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1014	rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
 
 
 
 
 
 
 
1015
1016	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1017		nr_data = real_stripes - 1;
1018	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1019		nr_data = real_stripes - 2;
1020	else
1021		BUG();
1022
1023	rbio->nr_data = nr_data;
1024	return rbio;
1025}
1026
1027/* allocate pages for all the stripes in the bio, including parity */
1028static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1029{
1030	int i;
1031	struct page *page;
1032
1033	for (i = 0; i < rbio->nr_pages; i++) {
1034		if (rbio->stripe_pages[i])
1035			continue;
1036		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1037		if (!page)
1038			return -ENOMEM;
1039		rbio->stripe_pages[i] = page;
1040	}
1041	return 0;
1042}
1043
1044/* only allocate pages for p/q stripes */
1045static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1046{
1047	int i;
1048	struct page *page;
1049
1050	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1051
1052	for (; i < rbio->nr_pages; i++) {
1053		if (rbio->stripe_pages[i])
1054			continue;
1055		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1056		if (!page)
1057			return -ENOMEM;
1058		rbio->stripe_pages[i] = page;
1059	}
1060	return 0;
1061}
1062
1063/*
1064 * add a single page from a specific stripe into our list of bios for IO
1065 * this will try to merge into existing bios if possible, and returns
1066 * zero if all went well.
1067 */
1068static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1069			    struct bio_list *bio_list,
1070			    struct page *page,
1071			    int stripe_nr,
1072			    unsigned long page_index,
1073			    unsigned long bio_max_len)
1074{
1075	struct bio *last = bio_list->tail;
1076	u64 last_end = 0;
1077	int ret;
1078	struct bio *bio;
1079	struct btrfs_bio_stripe *stripe;
1080	u64 disk_start;
1081
1082	stripe = &rbio->bbio->stripes[stripe_nr];
1083	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1084
1085	/* if the device is missing, just fail this stripe */
1086	if (!stripe->dev->bdev)
1087		return fail_rbio_index(rbio, stripe_nr);
1088
1089	/* see if we can add this page onto our existing bio */
1090	if (last) {
1091		last_end = (u64)last->bi_iter.bi_sector << 9;
1092		last_end += last->bi_iter.bi_size;
1093
1094		/*
1095		 * we can't merge these if they are from different
1096		 * devices or if they are not contiguous
1097		 */
1098		if (last_end == disk_start && stripe->dev->bdev &&
1099		    !last->bi_status &&
1100		    last->bi_disk == stripe->dev->bdev->bd_disk &&
1101		    last->bi_partno == stripe->dev->bdev->bd_partno) {
1102			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1103			if (ret == PAGE_SIZE)
1104				return 0;
1105		}
1106	}
1107
1108	/* put a new bio on the list */
1109	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
 
1110	bio->bi_iter.bi_size = 0;
1111	bio_set_dev(bio, stripe->dev->bdev);
1112	bio->bi_iter.bi_sector = disk_start >> 9;
1113
1114	bio_add_page(bio, page, PAGE_SIZE, 0);
1115	bio_list_add(bio_list, bio);
1116	return 0;
1117}
1118
1119/*
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk.  This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction.  The rmw will be finished
1124 * after we've reconstructed the failed stripes
1125 */
1126static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1127{
1128	if (rbio->faila >= 0 || rbio->failb >= 0) {
1129		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1130		__raid56_parity_recover(rbio);
1131	} else {
1132		finish_rmw(rbio);
1133	}
1134}
1135
1136/*
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result.  This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1140 * reconstruction.
1141 *
1142 * This must be called before you trust the answers from page_in_rbio
1143 */
1144static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145{
1146	struct bio *bio;
1147	u64 start;
1148	unsigned long stripe_offset;
1149	unsigned long page_index;
1150
1151	spin_lock_irq(&rbio->bio_list_lock);
1152	bio_list_for_each(bio, &rbio->bio_list) {
1153		struct bio_vec bvec;
1154		struct bvec_iter iter;
1155		int i = 0;
1156
1157		start = (u64)bio->bi_iter.bi_sector << 9;
1158		stripe_offset = start - rbio->bbio->raid_map[0];
1159		page_index = stripe_offset >> PAGE_SHIFT;
1160
1161		if (bio_flagged(bio, BIO_CLONED))
1162			bio->bi_iter = btrfs_io_bio(bio)->iter;
1163
1164		bio_for_each_segment(bvec, bio, iter) {
1165			rbio->bio_pages[page_index + i] = bvec.bv_page;
1166			i++;
1167		}
1168	}
1169	spin_unlock_irq(&rbio->bio_list_lock);
1170}
1171
1172/*
1173 * this is called from one of two situations.  We either
1174 * have a full stripe from the higher layers, or we've read all
1175 * the missing bits off disk.
1176 *
1177 * This will calculate the parity and then send down any
1178 * changed blocks.
1179 */
1180static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1181{
1182	struct btrfs_bio *bbio = rbio->bbio;
1183	void *pointers[rbio->real_stripes];
1184	int nr_data = rbio->nr_data;
1185	int stripe;
1186	int pagenr;
1187	int p_stripe = -1;
1188	int q_stripe = -1;
1189	struct bio_list bio_list;
1190	struct bio *bio;
1191	int ret;
1192
1193	bio_list_init(&bio_list);
1194
1195	if (rbio->real_stripes - rbio->nr_data == 1) {
1196		p_stripe = rbio->real_stripes - 1;
1197	} else if (rbio->real_stripes - rbio->nr_data == 2) {
1198		p_stripe = rbio->real_stripes - 2;
1199		q_stripe = rbio->real_stripes - 1;
1200	} else {
1201		BUG();
1202	}
1203
1204	/* at this point we either have a full stripe,
1205	 * or we've read the full stripe from the drive.
1206	 * recalculate the parity and write the new results.
1207	 *
1208	 * We're not allowed to add any new bios to the
1209	 * bio list here, anyone else that wants to
1210	 * change this stripe needs to do their own rmw.
1211	 */
1212	spin_lock_irq(&rbio->bio_list_lock);
1213	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1214	spin_unlock_irq(&rbio->bio_list_lock);
1215
1216	atomic_set(&rbio->error, 0);
1217
1218	/*
1219	 * now that we've set rmw_locked, run through the
1220	 * bio list one last time and map the page pointers
1221	 *
1222	 * We don't cache full rbios because we're assuming
1223	 * the higher layers are unlikely to use this area of
1224	 * the disk again soon.  If they do use it again,
1225	 * hopefully they will send another full bio.
1226	 */
1227	index_rbio_pages(rbio);
1228	if (!rbio_is_full(rbio))
1229		cache_rbio_pages(rbio);
1230	else
1231		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1232
1233	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1234		struct page *p;
1235		/* first collect one page from each data stripe */
1236		for (stripe = 0; stripe < nr_data; stripe++) {
1237			p = page_in_rbio(rbio, stripe, pagenr, 0);
1238			pointers[stripe] = kmap(p);
1239		}
1240
1241		/* then add the parity stripe */
1242		p = rbio_pstripe_page(rbio, pagenr);
1243		SetPageUptodate(p);
1244		pointers[stripe++] = kmap(p);
1245
1246		if (q_stripe != -1) {
1247
1248			/*
1249			 * raid6, add the qstripe and call the
1250			 * library function to fill in our p/q
1251			 */
1252			p = rbio_qstripe_page(rbio, pagenr);
1253			SetPageUptodate(p);
1254			pointers[stripe++] = kmap(p);
1255
1256			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1257						pointers);
1258		} else {
1259			/* raid5 */
1260			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1261			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1262		}
1263
1264
1265		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1266			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1267	}
1268
1269	/*
1270	 * time to start writing.  Make bios for everything from the
1271	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1272	 * everything else.
1273	 */
1274	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1275		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1276			struct page *page;
1277			if (stripe < rbio->nr_data) {
1278				page = page_in_rbio(rbio, stripe, pagenr, 1);
1279				if (!page)
1280					continue;
1281			} else {
1282			       page = rbio_stripe_page(rbio, stripe, pagenr);
1283			}
1284
1285			ret = rbio_add_io_page(rbio, &bio_list,
1286				       page, stripe, pagenr, rbio->stripe_len);
1287			if (ret)
1288				goto cleanup;
1289		}
1290	}
1291
1292	if (likely(!bbio->num_tgtdevs))
1293		goto write_data;
1294
1295	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1296		if (!bbio->tgtdev_map[stripe])
1297			continue;
1298
1299		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1300			struct page *page;
1301			if (stripe < rbio->nr_data) {
1302				page = page_in_rbio(rbio, stripe, pagenr, 1);
1303				if (!page)
1304					continue;
1305			} else {
1306			       page = rbio_stripe_page(rbio, stripe, pagenr);
1307			}
1308
1309			ret = rbio_add_io_page(rbio, &bio_list, page,
1310					       rbio->bbio->tgtdev_map[stripe],
1311					       pagenr, rbio->stripe_len);
1312			if (ret)
1313				goto cleanup;
1314		}
1315	}
1316
1317write_data:
1318	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1319	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1320
1321	while (1) {
1322		bio = bio_list_pop(&bio_list);
1323		if (!bio)
1324			break;
1325
1326		bio->bi_private = rbio;
1327		bio->bi_end_io = raid_write_end_io;
1328		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1329
1330		submit_bio(bio);
1331	}
1332	return;
1333
1334cleanup:
1335	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1336
1337	while ((bio = bio_list_pop(&bio_list)))
1338		bio_put(bio);
1339}
1340
1341/*
1342 * helper to find the stripe number for a given bio.  Used to figure out which
1343 * stripe has failed.  This expects the bio to correspond to a physical disk,
1344 * so it looks up based on physical sector numbers.
1345 */
1346static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1347			   struct bio *bio)
1348{
1349	u64 physical = bio->bi_iter.bi_sector;
1350	u64 stripe_start;
1351	int i;
1352	struct btrfs_bio_stripe *stripe;
1353
1354	physical <<= 9;
1355
1356	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1357		stripe = &rbio->bbio->stripes[i];
1358		stripe_start = stripe->physical;
1359		if (physical >= stripe_start &&
1360		    physical < stripe_start + rbio->stripe_len &&
1361		    stripe->dev->bdev &&
1362		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1363		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1364			return i;
1365		}
1366	}
1367	return -1;
1368}
1369
1370/*
1371 * helper to find the stripe number for a given
1372 * bio (before mapping).  Used to figure out which stripe has
1373 * failed.  This looks up based on logical block numbers.
1374 */
1375static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1376				   struct bio *bio)
1377{
1378	u64 logical = bio->bi_iter.bi_sector;
1379	u64 stripe_start;
1380	int i;
1381
1382	logical <<= 9;
 
1383
1384	for (i = 0; i < rbio->nr_data; i++) {
1385		stripe_start = rbio->bbio->raid_map[i];
1386		if (logical >= stripe_start &&
1387		    logical < stripe_start + rbio->stripe_len) {
1388			return i;
1389		}
1390	}
1391	return -1;
1392}
1393
1394/*
1395 * returns -EIO if we had too many failures
1396 */
1397static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1398{
1399	unsigned long flags;
1400	int ret = 0;
1401
1402	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1403
1404	/* we already know this stripe is bad, move on */
1405	if (rbio->faila == failed || rbio->failb == failed)
1406		goto out;
1407
1408	if (rbio->faila == -1) {
1409		/* first failure on this rbio */
1410		rbio->faila = failed;
1411		atomic_inc(&rbio->error);
1412	} else if (rbio->failb == -1) {
1413		/* second failure on this rbio */
1414		rbio->failb = failed;
1415		atomic_inc(&rbio->error);
1416	} else {
1417		ret = -EIO;
1418	}
1419out:
1420	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1421
1422	return ret;
1423}
1424
1425/*
1426 * helper to fail a stripe based on a physical disk
1427 * bio.
1428 */
1429static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1430			   struct bio *bio)
1431{
1432	int failed = find_bio_stripe(rbio, bio);
1433
1434	if (failed < 0)
1435		return -EIO;
1436
1437	return fail_rbio_index(rbio, failed);
1438}
1439
1440/*
1441 * this sets each page in the bio uptodate.  It should only be used on private
1442 * rbio pages, nothing that comes in from the higher layers
1443 */
1444static void set_bio_pages_uptodate(struct bio *bio)
1445{
1446	struct bio_vec *bvec;
1447	int i;
1448
1449	ASSERT(!bio_flagged(bio, BIO_CLONED));
1450
1451	bio_for_each_segment_all(bvec, bio, i)
1452		SetPageUptodate(bvec->bv_page);
1453}
1454
1455/*
1456 * end io for the read phase of the rmw cycle.  All the bios here are physical
1457 * stripe bios we've read from the disk so we can recalculate the parity of the
1458 * stripe.
1459 *
1460 * This will usually kick off finish_rmw once all the bios are read in, but it
1461 * may trigger parity reconstruction if we had any errors along the way
1462 */
1463static void raid_rmw_end_io(struct bio *bio)
1464{
1465	struct btrfs_raid_bio *rbio = bio->bi_private;
1466
1467	if (bio->bi_status)
1468		fail_bio_stripe(rbio, bio);
1469	else
1470		set_bio_pages_uptodate(bio);
1471
1472	bio_put(bio);
1473
1474	if (!atomic_dec_and_test(&rbio->stripes_pending))
1475		return;
1476
1477	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1478		goto cleanup;
1479
1480	/*
1481	 * this will normally call finish_rmw to start our write
1482	 * but if there are any failed stripes we'll reconstruct
1483	 * from parity first
1484	 */
1485	validate_rbio_for_rmw(rbio);
1486	return;
1487
1488cleanup:
1489
1490	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1491}
1492
1493static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1494{
1495	btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1496	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1497}
1498
1499static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1500{
1501	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1502			read_rebuild_work, NULL, NULL);
1503
1504	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
1505}
1506
1507/*
1508 * the stripe must be locked by the caller.  It will
1509 * unlock after all the writes are done
1510 */
1511static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1512{
1513	int bios_to_read = 0;
1514	struct bio_list bio_list;
1515	int ret;
1516	int pagenr;
1517	int stripe;
1518	struct bio *bio;
1519
1520	bio_list_init(&bio_list);
1521
1522	ret = alloc_rbio_pages(rbio);
1523	if (ret)
1524		goto cleanup;
1525
1526	index_rbio_pages(rbio);
1527
1528	atomic_set(&rbio->error, 0);
1529	/*
1530	 * build a list of bios to read all the missing parts of this
1531	 * stripe
1532	 */
1533	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1535			struct page *page;
1536			/*
1537			 * we want to find all the pages missing from
1538			 * the rbio and read them from the disk.  If
1539			 * page_in_rbio finds a page in the bio list
1540			 * we don't need to read it off the stripe.
1541			 */
1542			page = page_in_rbio(rbio, stripe, pagenr, 1);
1543			if (page)
1544				continue;
1545
1546			page = rbio_stripe_page(rbio, stripe, pagenr);
1547			/*
1548			 * the bio cache may have handed us an uptodate
1549			 * page.  If so, be happy and use it
1550			 */
1551			if (PageUptodate(page))
1552				continue;
1553
1554			ret = rbio_add_io_page(rbio, &bio_list, page,
1555				       stripe, pagenr, rbio->stripe_len);
1556			if (ret)
1557				goto cleanup;
1558		}
1559	}
1560
1561	bios_to_read = bio_list_size(&bio_list);
1562	if (!bios_to_read) {
1563		/*
1564		 * this can happen if others have merged with
1565		 * us, it means there is nothing left to read.
1566		 * But if there are missing devices it may not be
1567		 * safe to do the full stripe write yet.
1568		 */
1569		goto finish;
1570	}
1571
1572	/*
1573	 * the bbio may be freed once we submit the last bio.  Make sure
1574	 * not to touch it after that
1575	 */
1576	atomic_set(&rbio->stripes_pending, bios_to_read);
1577	while (1) {
1578		bio = bio_list_pop(&bio_list);
1579		if (!bio)
1580			break;
1581
1582		bio->bi_private = rbio;
1583		bio->bi_end_io = raid_rmw_end_io;
1584		bio_set_op_attrs(bio, REQ_OP_READ, 0);
1585
1586		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1587
1588		submit_bio(bio);
1589	}
1590	/* the actual write will happen once the reads are done */
1591	return 0;
1592
1593cleanup:
1594	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1595
1596	while ((bio = bio_list_pop(&bio_list)))
1597		bio_put(bio);
1598
1599	return -EIO;
1600
1601finish:
1602	validate_rbio_for_rmw(rbio);
1603	return 0;
1604}
1605
1606/*
1607 * if the upper layers pass in a full stripe, we thank them by only allocating
1608 * enough pages to hold the parity, and sending it all down quickly.
1609 */
1610static int full_stripe_write(struct btrfs_raid_bio *rbio)
1611{
1612	int ret;
1613
1614	ret = alloc_rbio_parity_pages(rbio);
1615	if (ret) {
1616		__free_raid_bio(rbio);
1617		return ret;
1618	}
1619
1620	ret = lock_stripe_add(rbio);
1621	if (ret == 0)
1622		finish_rmw(rbio);
1623	return 0;
1624}
1625
1626/*
1627 * partial stripe writes get handed over to async helpers.
1628 * We're really hoping to merge a few more writes into this
1629 * rbio before calculating new parity
1630 */
1631static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1632{
1633	int ret;
1634
1635	ret = lock_stripe_add(rbio);
1636	if (ret == 0)
1637		async_rmw_stripe(rbio);
1638	return 0;
1639}
1640
1641/*
1642 * sometimes while we were reading from the drive to
1643 * recalculate parity, enough new bios come into create
1644 * a full stripe.  So we do a check here to see if we can
1645 * go directly to finish_rmw
1646 */
1647static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1648{
1649	/* head off into rmw land if we don't have a full stripe */
1650	if (!rbio_is_full(rbio))
1651		return partial_stripe_write(rbio);
1652	return full_stripe_write(rbio);
1653}
1654
1655/*
1656 * We use plugging call backs to collect full stripes.
1657 * Any time we get a partial stripe write while plugged
1658 * we collect it into a list.  When the unplug comes down,
1659 * we sort the list by logical block number and merge
1660 * everything we can into the same rbios
1661 */
1662struct btrfs_plug_cb {
1663	struct blk_plug_cb cb;
1664	struct btrfs_fs_info *info;
1665	struct list_head rbio_list;
1666	struct btrfs_work work;
1667};
1668
1669/*
1670 * rbios on the plug list are sorted for easier merging.
1671 */
1672static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1673{
1674	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1675						 plug_list);
1676	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1677						 plug_list);
1678	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1680
1681	if (a_sector < b_sector)
1682		return -1;
1683	if (a_sector > b_sector)
1684		return 1;
1685	return 0;
1686}
1687
1688static void run_plug(struct btrfs_plug_cb *plug)
1689{
1690	struct btrfs_raid_bio *cur;
1691	struct btrfs_raid_bio *last = NULL;
1692
1693	/*
1694	 * sort our plug list then try to merge
1695	 * everything we can in hopes of creating full
1696	 * stripes.
1697	 */
1698	list_sort(NULL, &plug->rbio_list, plug_cmp);
1699	while (!list_empty(&plug->rbio_list)) {
1700		cur = list_entry(plug->rbio_list.next,
1701				 struct btrfs_raid_bio, plug_list);
1702		list_del_init(&cur->plug_list);
1703
1704		if (rbio_is_full(cur)) {
 
 
1705			/* we have a full stripe, send it down */
1706			full_stripe_write(cur);
 
1707			continue;
1708		}
1709		if (last) {
1710			if (rbio_can_merge(last, cur)) {
1711				merge_rbio(last, cur);
1712				__free_raid_bio(cur);
1713				continue;
1714
1715			}
1716			__raid56_parity_write(last);
1717		}
1718		last = cur;
1719	}
1720	if (last) {
1721		__raid56_parity_write(last);
1722	}
1723	kfree(plug);
1724}
1725
1726/*
1727 * if the unplug comes from schedule, we have to push the
1728 * work off to a helper thread
1729 */
1730static void unplug_work(struct btrfs_work *work)
1731{
1732	struct btrfs_plug_cb *plug;
1733	plug = container_of(work, struct btrfs_plug_cb, work);
1734	run_plug(plug);
1735}
1736
1737static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1738{
1739	struct btrfs_plug_cb *plug;
1740	plug = container_of(cb, struct btrfs_plug_cb, cb);
1741
1742	if (from_schedule) {
1743		btrfs_init_work(&plug->work, btrfs_rmw_helper,
1744				unplug_work, NULL, NULL);
1745		btrfs_queue_work(plug->info->rmw_workers,
1746				 &plug->work);
1747		return;
1748	}
1749	run_plug(plug);
1750}
1751
1752/*
1753 * our main entry point for writes from the rest of the FS.
1754 */
1755int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1756			struct btrfs_bio *bbio, u64 stripe_len)
1757{
1758	struct btrfs_raid_bio *rbio;
1759	struct btrfs_plug_cb *plug = NULL;
1760	struct blk_plug_cb *cb;
1761	int ret;
1762
1763	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1764	if (IS_ERR(rbio)) {
1765		btrfs_put_bbio(bbio);
1766		return PTR_ERR(rbio);
1767	}
1768	bio_list_add(&rbio->bio_list, bio);
1769	rbio->bio_list_bytes = bio->bi_iter.bi_size;
1770	rbio->operation = BTRFS_RBIO_WRITE;
1771
1772	btrfs_bio_counter_inc_noblocked(fs_info);
1773	rbio->generic_bio_cnt = 1;
1774
1775	/*
1776	 * don't plug on full rbios, just get them out the door
1777	 * as quickly as we can
1778	 */
1779	if (rbio_is_full(rbio)) {
1780		ret = full_stripe_write(rbio);
1781		if (ret)
1782			btrfs_bio_counter_dec(fs_info);
1783		return ret;
1784	}
1785
1786	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1787	if (cb) {
1788		plug = container_of(cb, struct btrfs_plug_cb, cb);
1789		if (!plug->info) {
1790			plug->info = fs_info;
1791			INIT_LIST_HEAD(&plug->rbio_list);
1792		}
1793		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1794		ret = 0;
1795	} else {
1796		ret = __raid56_parity_write(rbio);
1797		if (ret)
1798			btrfs_bio_counter_dec(fs_info);
1799	}
1800	return ret;
1801}
1802
1803/*
1804 * all parity reconstruction happens here.  We've read in everything
1805 * we can find from the drives and this does the heavy lifting of
1806 * sorting the good from the bad.
1807 */
1808static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1809{
1810	int pagenr, stripe;
1811	void **pointers;
1812	int faila = -1, failb = -1;
1813	struct page *page;
1814	blk_status_t err;
1815	int i;
1816
1817	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1818	if (!pointers) {
1819		err = BLK_STS_RESOURCE;
1820		goto cleanup_io;
1821	}
1822
1823	faila = rbio->faila;
1824	failb = rbio->failb;
1825
1826	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1827	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1828		spin_lock_irq(&rbio->bio_list_lock);
1829		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1830		spin_unlock_irq(&rbio->bio_list_lock);
1831	}
1832
1833	index_rbio_pages(rbio);
1834
1835	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1836		/*
1837		 * Now we just use bitmap to mark the horizontal stripes in
1838		 * which we have data when doing parity scrub.
1839		 */
1840		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1841		    !test_bit(pagenr, rbio->dbitmap))
1842			continue;
1843
1844		/* setup our array of pointers with pages
1845		 * from each stripe
1846		 */
1847		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1848			/*
1849			 * if we're rebuilding a read, we have to use
1850			 * pages from the bio list
1851			 */
1852			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1853			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1854			    (stripe == faila || stripe == failb)) {
1855				page = page_in_rbio(rbio, stripe, pagenr, 0);
1856			} else {
1857				page = rbio_stripe_page(rbio, stripe, pagenr);
1858			}
1859			pointers[stripe] = kmap(page);
1860		}
1861
1862		/* all raid6 handling here */
1863		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1864			/*
1865			 * single failure, rebuild from parity raid5
1866			 * style
1867			 */
1868			if (failb < 0) {
1869				if (faila == rbio->nr_data) {
1870					/*
1871					 * Just the P stripe has failed, without
1872					 * a bad data or Q stripe.
1873					 * TODO, we should redo the xor here.
1874					 */
1875					err = BLK_STS_IOERR;
1876					goto cleanup;
1877				}
1878				/*
1879				 * a single failure in raid6 is rebuilt
1880				 * in the pstripe code below
1881				 */
1882				goto pstripe;
1883			}
1884
1885			/* make sure our ps and qs are in order */
1886			if (faila > failb) {
1887				int tmp = failb;
1888				failb = faila;
1889				faila = tmp;
1890			}
1891
1892			/* if the q stripe is failed, do a pstripe reconstruction
1893			 * from the xors.
1894			 * If both the q stripe and the P stripe are failed, we're
1895			 * here due to a crc mismatch and we can't give them the
1896			 * data they want
1897			 */
1898			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1899				if (rbio->bbio->raid_map[faila] ==
1900				    RAID5_P_STRIPE) {
1901					err = BLK_STS_IOERR;
1902					goto cleanup;
1903				}
1904				/*
1905				 * otherwise we have one bad data stripe and
1906				 * a good P stripe.  raid5!
1907				 */
1908				goto pstripe;
1909			}
1910
1911			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1912				raid6_datap_recov(rbio->real_stripes,
1913						  PAGE_SIZE, faila, pointers);
1914			} else {
1915				raid6_2data_recov(rbio->real_stripes,
1916						  PAGE_SIZE, faila, failb,
1917						  pointers);
1918			}
1919		} else {
1920			void *p;
1921
1922			/* rebuild from P stripe here (raid5 or raid6) */
1923			BUG_ON(failb != -1);
1924pstripe:
1925			/* Copy parity block into failed block to start with */
1926			memcpy(pointers[faila],
1927			       pointers[rbio->nr_data],
1928			       PAGE_SIZE);
1929
1930			/* rearrange the pointer array */
1931			p = pointers[faila];
1932			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1933				pointers[stripe] = pointers[stripe + 1];
1934			pointers[rbio->nr_data - 1] = p;
1935
1936			/* xor in the rest */
1937			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1938		}
1939		/* if we're doing this rebuild as part of an rmw, go through
1940		 * and set all of our private rbio pages in the
1941		 * failed stripes as uptodate.  This way finish_rmw will
1942		 * know they can be trusted.  If this was a read reconstruction,
1943		 * other endio functions will fiddle the uptodate bits
1944		 */
1945		if (rbio->operation == BTRFS_RBIO_WRITE) {
1946			for (i = 0;  i < rbio->stripe_npages; i++) {
1947				if (faila != -1) {
1948					page = rbio_stripe_page(rbio, faila, i);
1949					SetPageUptodate(page);
1950				}
1951				if (failb != -1) {
1952					page = rbio_stripe_page(rbio, failb, i);
1953					SetPageUptodate(page);
1954				}
1955			}
1956		}
1957		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1958			/*
1959			 * if we're rebuilding a read, we have to use
1960			 * pages from the bio list
1961			 */
1962			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1963			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1964			    (stripe == faila || stripe == failb)) {
1965				page = page_in_rbio(rbio, stripe, pagenr, 0);
1966			} else {
1967				page = rbio_stripe_page(rbio, stripe, pagenr);
1968			}
1969			kunmap(page);
1970		}
1971	}
1972
1973	err = BLK_STS_OK;
1974cleanup:
1975	kfree(pointers);
1976
1977cleanup_io:
1978	/*
1979	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1980	 * valid rbio which is consistent with ondisk content, thus such a
1981	 * valid rbio can be cached to avoid further disk reads.
1982	 */
1983	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1984	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1985		/*
1986		 * - In case of two failures, where rbio->failb != -1:
1987		 *
1988		 *   Do not cache this rbio since the above read reconstruction
1989		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
1990		 *   changed some content of stripes which are not identical to
1991		 *   on-disk content any more, otherwise, a later write/recover
1992		 *   may steal stripe_pages from this rbio and end up with
1993		 *   corruptions or rebuild failures.
1994		 *
1995		 * - In case of single failure, where rbio->failb == -1:
1996		 *
1997		 *   Cache this rbio iff the above read reconstruction is
1998		 *   excuted without problems.
1999		 */
2000		if (err == BLK_STS_OK && rbio->failb < 0)
2001			cache_rbio_pages(rbio);
2002		else
2003			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2004
2005		rbio_orig_end_io(rbio, err);
2006	} else if (err == BLK_STS_OK) {
2007		rbio->faila = -1;
2008		rbio->failb = -1;
2009
2010		if (rbio->operation == BTRFS_RBIO_WRITE)
2011			finish_rmw(rbio);
2012		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2013			finish_parity_scrub(rbio, 0);
2014		else
2015			BUG();
2016	} else {
2017		rbio_orig_end_io(rbio, err);
2018	}
2019}
2020
2021/*
2022 * This is called only for stripes we've read from disk to
2023 * reconstruct the parity.
2024 */
2025static void raid_recover_end_io(struct bio *bio)
2026{
2027	struct btrfs_raid_bio *rbio = bio->bi_private;
2028
2029	/*
2030	 * we only read stripe pages off the disk, set them
2031	 * up to date if there were no errors
2032	 */
2033	if (bio->bi_status)
2034		fail_bio_stripe(rbio, bio);
2035	else
2036		set_bio_pages_uptodate(bio);
2037	bio_put(bio);
2038
2039	if (!atomic_dec_and_test(&rbio->stripes_pending))
2040		return;
2041
2042	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2043		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2044	else
2045		__raid_recover_end_io(rbio);
2046}
2047
2048/*
2049 * reads everything we need off the disk to reconstruct
2050 * the parity. endio handlers trigger final reconstruction
2051 * when the IO is done.
2052 *
2053 * This is used both for reads from the higher layers and for
2054 * parity construction required to finish a rmw cycle.
2055 */
2056static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2057{
2058	int bios_to_read = 0;
2059	struct bio_list bio_list;
2060	int ret;
2061	int pagenr;
2062	int stripe;
2063	struct bio *bio;
2064
2065	bio_list_init(&bio_list);
2066
2067	ret = alloc_rbio_pages(rbio);
2068	if (ret)
2069		goto cleanup;
2070
2071	atomic_set(&rbio->error, 0);
2072
2073	/*
2074	 * read everything that hasn't failed.  Thanks to the
2075	 * stripe cache, it is possible that some or all of these
2076	 * pages are going to be uptodate.
2077	 */
2078	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2079		if (rbio->faila == stripe || rbio->failb == stripe) {
2080			atomic_inc(&rbio->error);
2081			continue;
2082		}
2083
2084		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2085			struct page *p;
2086
2087			/*
2088			 * the rmw code may have already read this
2089			 * page in
2090			 */
2091			p = rbio_stripe_page(rbio, stripe, pagenr);
2092			if (PageUptodate(p))
2093				continue;
2094
2095			ret = rbio_add_io_page(rbio, &bio_list,
2096				       rbio_stripe_page(rbio, stripe, pagenr),
2097				       stripe, pagenr, rbio->stripe_len);
2098			if (ret < 0)
2099				goto cleanup;
2100		}
2101	}
2102
2103	bios_to_read = bio_list_size(&bio_list);
2104	if (!bios_to_read) {
2105		/*
2106		 * we might have no bios to read just because the pages
2107		 * were up to date, or we might have no bios to read because
2108		 * the devices were gone.
2109		 */
2110		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2111			__raid_recover_end_io(rbio);
2112			goto out;
2113		} else {
2114			goto cleanup;
2115		}
2116	}
2117
2118	/*
2119	 * the bbio may be freed once we submit the last bio.  Make sure
2120	 * not to touch it after that
2121	 */
2122	atomic_set(&rbio->stripes_pending, bios_to_read);
2123	while (1) {
2124		bio = bio_list_pop(&bio_list);
2125		if (!bio)
2126			break;
2127
2128		bio->bi_private = rbio;
2129		bio->bi_end_io = raid_recover_end_io;
2130		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2131
2132		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2133
2134		submit_bio(bio);
2135	}
2136out:
2137	return 0;
2138
2139cleanup:
2140	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2141	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2142		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2143
2144	while ((bio = bio_list_pop(&bio_list)))
2145		bio_put(bio);
2146
2147	return -EIO;
2148}
2149
2150/*
2151 * the main entry point for reads from the higher layers.  This
2152 * is really only called when the normal read path had a failure,
2153 * so we assume the bio they send down corresponds to a failed part
2154 * of the drive.
2155 */
2156int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2157			  struct btrfs_bio *bbio, u64 stripe_len,
2158			  int mirror_num, int generic_io)
2159{
2160	struct btrfs_raid_bio *rbio;
2161	int ret;
2162
2163	if (generic_io) {
2164		ASSERT(bbio->mirror_num == mirror_num);
2165		btrfs_io_bio(bio)->mirror_num = mirror_num;
2166	}
2167
2168	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2169	if (IS_ERR(rbio)) {
2170		if (generic_io)
2171			btrfs_put_bbio(bbio);
2172		return PTR_ERR(rbio);
2173	}
2174
2175	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2176	bio_list_add(&rbio->bio_list, bio);
2177	rbio->bio_list_bytes = bio->bi_iter.bi_size;
2178
2179	rbio->faila = find_logical_bio_stripe(rbio, bio);
2180	if (rbio->faila == -1) {
2181		btrfs_warn(fs_info,
2182	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2183			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2184			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2185		if (generic_io)
2186			btrfs_put_bbio(bbio);
2187		kfree(rbio);
2188		return -EIO;
2189	}
2190
2191	if (generic_io) {
2192		btrfs_bio_counter_inc_noblocked(fs_info);
2193		rbio->generic_bio_cnt = 1;
2194	} else {
2195		btrfs_get_bbio(bbio);
2196	}
2197
2198	/*
2199	 * Loop retry:
2200	 * for 'mirror == 2', reconstruct from all other stripes.
2201	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2202	 */
2203	if (mirror_num > 2) {
2204		/*
2205		 * 'mirror == 3' is to fail the p stripe and
2206		 * reconstruct from the q stripe.  'mirror > 3' is to
2207		 * fail a data stripe and reconstruct from p+q stripe.
2208		 */
2209		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2210		ASSERT(rbio->failb > 0);
2211		if (rbio->failb <= rbio->faila)
2212			rbio->failb--;
2213	}
2214
2215	ret = lock_stripe_add(rbio);
2216
2217	/*
2218	 * __raid56_parity_recover will end the bio with
2219	 * any errors it hits.  We don't want to return
2220	 * its error value up the stack because our caller
2221	 * will end up calling bio_endio with any nonzero
2222	 * return
2223	 */
2224	if (ret == 0)
2225		__raid56_parity_recover(rbio);
2226	/*
2227	 * our rbio has been added to the list of
2228	 * rbios that will be handled after the
2229	 * currently lock owner is done
2230	 */
2231	return 0;
2232
2233}
2234
2235static void rmw_work(struct btrfs_work *work)
2236{
2237	struct btrfs_raid_bio *rbio;
2238
2239	rbio = container_of(work, struct btrfs_raid_bio, work);
2240	raid56_rmw_stripe(rbio);
2241}
2242
2243static void read_rebuild_work(struct btrfs_work *work)
2244{
2245	struct btrfs_raid_bio *rbio;
2246
2247	rbio = container_of(work, struct btrfs_raid_bio, work);
2248	__raid56_parity_recover(rbio);
2249}
2250
2251/*
2252 * The following code is used to scrub/replace the parity stripe
2253 *
2254 * Caller must have already increased bio_counter for getting @bbio.
2255 *
2256 * Note: We need make sure all the pages that add into the scrub/replace
2257 * raid bio are correct and not be changed during the scrub/replace. That
2258 * is those pages just hold metadata or file data with checksum.
2259 */
2260
2261struct btrfs_raid_bio *
2262raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2263			       struct btrfs_bio *bbio, u64 stripe_len,
2264			       struct btrfs_device *scrub_dev,
2265			       unsigned long *dbitmap, int stripe_nsectors)
2266{
2267	struct btrfs_raid_bio *rbio;
2268	int i;
2269
2270	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2271	if (IS_ERR(rbio))
2272		return NULL;
2273	bio_list_add(&rbio->bio_list, bio);
2274	/*
2275	 * This is a special bio which is used to hold the completion handler
2276	 * and make the scrub rbio is similar to the other types
2277	 */
2278	ASSERT(!bio->bi_iter.bi_size);
2279	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2280
2281	/*
2282	 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2283	 * to the end position, so this search can start from the first parity
2284	 * stripe.
2285	 */
2286	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2287		if (bbio->stripes[i].dev == scrub_dev) {
2288			rbio->scrubp = i;
2289			break;
2290		}
2291	}
2292	ASSERT(i < rbio->real_stripes);
2293
2294	/* Now we just support the sectorsize equals to page size */
2295	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2296	ASSERT(rbio->stripe_npages == stripe_nsectors);
2297	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2298
2299	/*
2300	 * We have already increased bio_counter when getting bbio, record it
2301	 * so we can free it at rbio_orig_end_io().
2302	 */
2303	rbio->generic_bio_cnt = 1;
2304
2305	return rbio;
2306}
2307
2308/* Used for both parity scrub and missing. */
2309void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2310			    u64 logical)
2311{
2312	int stripe_offset;
2313	int index;
2314
2315	ASSERT(logical >= rbio->bbio->raid_map[0]);
2316	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2317				rbio->stripe_len * rbio->nr_data);
2318	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2319	index = stripe_offset >> PAGE_SHIFT;
2320	rbio->bio_pages[index] = page;
2321}
2322
2323/*
2324 * We just scrub the parity that we have correct data on the same horizontal,
2325 * so we needn't allocate all pages for all the stripes.
2326 */
2327static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2328{
2329	int i;
2330	int bit;
2331	int index;
2332	struct page *page;
2333
2334	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2335		for (i = 0; i < rbio->real_stripes; i++) {
2336			index = i * rbio->stripe_npages + bit;
2337			if (rbio->stripe_pages[index])
2338				continue;
2339
2340			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2341			if (!page)
2342				return -ENOMEM;
2343			rbio->stripe_pages[index] = page;
2344		}
2345	}
2346	return 0;
2347}
2348
2349static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2350					 int need_check)
2351{
2352	struct btrfs_bio *bbio = rbio->bbio;
2353	void *pointers[rbio->real_stripes];
2354	DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2355	int nr_data = rbio->nr_data;
2356	int stripe;
2357	int pagenr;
2358	int p_stripe = -1;
2359	int q_stripe = -1;
2360	struct page *p_page = NULL;
2361	struct page *q_page = NULL;
2362	struct bio_list bio_list;
2363	struct bio *bio;
2364	int is_replace = 0;
2365	int ret;
2366
2367	bio_list_init(&bio_list);
2368
2369	if (rbio->real_stripes - rbio->nr_data == 1) {
2370		p_stripe = rbio->real_stripes - 1;
2371	} else if (rbio->real_stripes - rbio->nr_data == 2) {
2372		p_stripe = rbio->real_stripes - 2;
2373		q_stripe = rbio->real_stripes - 1;
2374	} else {
2375		BUG();
2376	}
2377
2378	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2379		is_replace = 1;
2380		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2381	}
2382
2383	/*
2384	 * Because the higher layers(scrubber) are unlikely to
2385	 * use this area of the disk again soon, so don't cache
2386	 * it.
2387	 */
2388	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2389
2390	if (!need_check)
2391		goto writeback;
2392
2393	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2394	if (!p_page)
2395		goto cleanup;
2396	SetPageUptodate(p_page);
2397
2398	if (q_stripe != -1) {
2399		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2400		if (!q_page) {
2401			__free_page(p_page);
2402			goto cleanup;
2403		}
2404		SetPageUptodate(q_page);
2405	}
2406
2407	atomic_set(&rbio->error, 0);
2408
2409	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2410		struct page *p;
2411		void *parity;
2412		/* first collect one page from each data stripe */
2413		for (stripe = 0; stripe < nr_data; stripe++) {
2414			p = page_in_rbio(rbio, stripe, pagenr, 0);
2415			pointers[stripe] = kmap(p);
2416		}
2417
2418		/* then add the parity stripe */
2419		pointers[stripe++] = kmap(p_page);
2420
2421		if (q_stripe != -1) {
2422
2423			/*
2424			 * raid6, add the qstripe and call the
2425			 * library function to fill in our p/q
2426			 */
2427			pointers[stripe++] = kmap(q_page);
2428
2429			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2430						pointers);
2431		} else {
2432			/* raid5 */
2433			memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2434			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2435		}
2436
2437		/* Check scrubbing parity and repair it */
2438		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2439		parity = kmap(p);
2440		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2441			memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
2442		else
2443			/* Parity is right, needn't writeback */
2444			bitmap_clear(rbio->dbitmap, pagenr, 1);
2445		kunmap(p);
2446
2447		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2448			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
 
2449	}
2450
2451	__free_page(p_page);
2452	if (q_page)
2453		__free_page(q_page);
2454
2455writeback:
2456	/*
2457	 * time to start writing.  Make bios for everything from the
2458	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2459	 * everything else.
2460	 */
2461	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2462		struct page *page;
2463
2464		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2465		ret = rbio_add_io_page(rbio, &bio_list,
2466			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2467		if (ret)
2468			goto cleanup;
2469	}
2470
2471	if (!is_replace)
2472		goto submit_write;
2473
2474	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2475		struct page *page;
2476
2477		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2478		ret = rbio_add_io_page(rbio, &bio_list, page,
2479				       bbio->tgtdev_map[rbio->scrubp],
2480				       pagenr, rbio->stripe_len);
2481		if (ret)
2482			goto cleanup;
2483	}
2484
2485submit_write:
2486	nr_data = bio_list_size(&bio_list);
2487	if (!nr_data) {
2488		/* Every parity is right */
2489		rbio_orig_end_io(rbio, BLK_STS_OK);
2490		return;
2491	}
2492
2493	atomic_set(&rbio->stripes_pending, nr_data);
2494
2495	while (1) {
2496		bio = bio_list_pop(&bio_list);
2497		if (!bio)
2498			break;
2499
2500		bio->bi_private = rbio;
2501		bio->bi_end_io = raid_write_end_io;
2502		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2503
2504		submit_bio(bio);
2505	}
2506	return;
2507
2508cleanup:
2509	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2510
2511	while ((bio = bio_list_pop(&bio_list)))
2512		bio_put(bio);
2513}
2514
2515static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2516{
2517	if (stripe >= 0 && stripe < rbio->nr_data)
2518		return 1;
2519	return 0;
2520}
2521
2522/*
2523 * While we're doing the parity check and repair, we could have errors
2524 * in reading pages off the disk.  This checks for errors and if we're
2525 * not able to read the page it'll trigger parity reconstruction.  The
2526 * parity scrub will be finished after we've reconstructed the failed
2527 * stripes
2528 */
2529static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2530{
2531	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2532		goto cleanup;
2533
2534	if (rbio->faila >= 0 || rbio->failb >= 0) {
2535		int dfail = 0, failp = -1;
2536
2537		if (is_data_stripe(rbio, rbio->faila))
2538			dfail++;
2539		else if (is_parity_stripe(rbio->faila))
2540			failp = rbio->faila;
2541
2542		if (is_data_stripe(rbio, rbio->failb))
2543			dfail++;
2544		else if (is_parity_stripe(rbio->failb))
2545			failp = rbio->failb;
2546
2547		/*
2548		 * Because we can not use a scrubbing parity to repair
2549		 * the data, so the capability of the repair is declined.
2550		 * (In the case of RAID5, we can not repair anything)
2551		 */
2552		if (dfail > rbio->bbio->max_errors - 1)
2553			goto cleanup;
2554
2555		/*
2556		 * If all data is good, only parity is correctly, just
2557		 * repair the parity.
2558		 */
2559		if (dfail == 0) {
2560			finish_parity_scrub(rbio, 0);
2561			return;
2562		}
2563
2564		/*
2565		 * Here means we got one corrupted data stripe and one
2566		 * corrupted parity on RAID6, if the corrupted parity
2567		 * is scrubbing parity, luckily, use the other one to repair
2568		 * the data, or we can not repair the data stripe.
2569		 */
2570		if (failp != rbio->scrubp)
2571			goto cleanup;
2572
2573		__raid_recover_end_io(rbio);
2574	} else {
2575		finish_parity_scrub(rbio, 1);
2576	}
2577	return;
2578
2579cleanup:
2580	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2581}
2582
2583/*
2584 * end io for the read phase of the rmw cycle.  All the bios here are physical
2585 * stripe bios we've read from the disk so we can recalculate the parity of the
2586 * stripe.
2587 *
2588 * This will usually kick off finish_rmw once all the bios are read in, but it
2589 * may trigger parity reconstruction if we had any errors along the way
2590 */
2591static void raid56_parity_scrub_end_io(struct bio *bio)
2592{
2593	struct btrfs_raid_bio *rbio = bio->bi_private;
2594
2595	if (bio->bi_status)
2596		fail_bio_stripe(rbio, bio);
2597	else
2598		set_bio_pages_uptodate(bio);
2599
2600	bio_put(bio);
2601
2602	if (!atomic_dec_and_test(&rbio->stripes_pending))
2603		return;
2604
2605	/*
2606	 * this will normally call finish_rmw to start our write
2607	 * but if there are any failed stripes we'll reconstruct
2608	 * from parity first
2609	 */
2610	validate_rbio_for_parity_scrub(rbio);
2611}
2612
2613static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2614{
2615	int bios_to_read = 0;
2616	struct bio_list bio_list;
2617	int ret;
2618	int pagenr;
2619	int stripe;
2620	struct bio *bio;
2621
2622	bio_list_init(&bio_list);
2623
2624	ret = alloc_rbio_essential_pages(rbio);
2625	if (ret)
2626		goto cleanup;
2627
2628	atomic_set(&rbio->error, 0);
2629	/*
2630	 * build a list of bios to read all the missing parts of this
2631	 * stripe
2632	 */
2633	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2634		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2635			struct page *page;
2636			/*
2637			 * we want to find all the pages missing from
2638			 * the rbio and read them from the disk.  If
2639			 * page_in_rbio finds a page in the bio list
2640			 * we don't need to read it off the stripe.
2641			 */
2642			page = page_in_rbio(rbio, stripe, pagenr, 1);
2643			if (page)
2644				continue;
2645
2646			page = rbio_stripe_page(rbio, stripe, pagenr);
2647			/*
2648			 * the bio cache may have handed us an uptodate
2649			 * page.  If so, be happy and use it
2650			 */
2651			if (PageUptodate(page))
2652				continue;
2653
2654			ret = rbio_add_io_page(rbio, &bio_list, page,
2655				       stripe, pagenr, rbio->stripe_len);
2656			if (ret)
2657				goto cleanup;
2658		}
2659	}
2660
2661	bios_to_read = bio_list_size(&bio_list);
2662	if (!bios_to_read) {
2663		/*
2664		 * this can happen if others have merged with
2665		 * us, it means there is nothing left to read.
2666		 * But if there are missing devices it may not be
2667		 * safe to do the full stripe write yet.
2668		 */
2669		goto finish;
2670	}
2671
2672	/*
2673	 * the bbio may be freed once we submit the last bio.  Make sure
2674	 * not to touch it after that
2675	 */
2676	atomic_set(&rbio->stripes_pending, bios_to_read);
2677	while (1) {
2678		bio = bio_list_pop(&bio_list);
2679		if (!bio)
2680			break;
2681
2682		bio->bi_private = rbio;
2683		bio->bi_end_io = raid56_parity_scrub_end_io;
2684		bio_set_op_attrs(bio, REQ_OP_READ, 0);
2685
2686		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2687
2688		submit_bio(bio);
2689	}
2690	/* the actual write will happen once the reads are done */
2691	return;
2692
2693cleanup:
2694	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2695
2696	while ((bio = bio_list_pop(&bio_list)))
2697		bio_put(bio);
2698
2699	return;
2700
2701finish:
2702	validate_rbio_for_parity_scrub(rbio);
2703}
2704
2705static void scrub_parity_work(struct btrfs_work *work)
2706{
2707	struct btrfs_raid_bio *rbio;
2708
2709	rbio = container_of(work, struct btrfs_raid_bio, work);
2710	raid56_parity_scrub_stripe(rbio);
2711}
2712
2713static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2714{
2715	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2716			scrub_parity_work, NULL, NULL);
2717
2718	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2719}
2720
2721void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2722{
2723	if (!lock_stripe_add(rbio))
2724		async_scrub_parity(rbio);
2725}
2726
2727/* The following code is used for dev replace of a missing RAID 5/6 device. */
2728
2729struct btrfs_raid_bio *
2730raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2731			  struct btrfs_bio *bbio, u64 length)
2732{
2733	struct btrfs_raid_bio *rbio;
2734
2735	rbio = alloc_rbio(fs_info, bbio, length);
2736	if (IS_ERR(rbio))
2737		return NULL;
2738
2739	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2740	bio_list_add(&rbio->bio_list, bio);
2741	/*
2742	 * This is a special bio which is used to hold the completion handler
2743	 * and make the scrub rbio is similar to the other types
2744	 */
2745	ASSERT(!bio->bi_iter.bi_size);
2746
2747	rbio->faila = find_logical_bio_stripe(rbio, bio);
2748	if (rbio->faila == -1) {
2749		BUG();
2750		kfree(rbio);
2751		return NULL;
2752	}
2753
2754	/*
2755	 * When we get bbio, we have already increased bio_counter, record it
2756	 * so we can free it at rbio_orig_end_io()
2757	 */
2758	rbio->generic_bio_cnt = 1;
2759
2760	return rbio;
2761}
2762
2763void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2764{
2765	if (!lock_stripe_add(rbio))
2766		async_read_rebuild(rbio);
2767}