Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2012 Fusion-io  All rights reserved.
   4 * Copyright (C) 2012 Intel Corp. All rights reserved.
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/bio.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/raid/pq.h>
  12#include <linux/hash.h>
  13#include <linux/list_sort.h>
  14#include <linux/raid/xor.h>
  15#include <linux/mm.h>
  16#include "messages.h"
  17#include "misc.h"
  18#include "ctree.h"
  19#include "disk-io.h"
  20#include "volumes.h"
  21#include "raid56.h"
  22#include "async-thread.h"
  23#include "file-item.h"
  24#include "btrfs_inode.h"
  25
  26/* set when additional merges to this rbio are not allowed */
  27#define RBIO_RMW_LOCKED_BIT	1
  28
  29/*
  30 * set when this rbio is sitting in the hash, but it is just a cache
  31 * of past RMW
  32 */
  33#define RBIO_CACHE_BIT		2
  34
  35/*
  36 * set when it is safe to trust the stripe_pages for caching
  37 */
  38#define RBIO_CACHE_READY_BIT	3
  39
  40#define RBIO_CACHE_SIZE 1024
  41
  42#define BTRFS_STRIPE_HASH_TABLE_BITS				11
  43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  44/* Used by the raid56 code to lock stripes for read/modify/write */
  45struct btrfs_stripe_hash {
  46	struct list_head hash_list;
  47	spinlock_t lock;
  48};
  49
  50/* Used by the raid56 code to lock stripes for read/modify/write */
  51struct btrfs_stripe_hash_table {
  52	struct list_head stripe_cache;
  53	spinlock_t cache_lock;
  54	int cache_size;
  55	struct btrfs_stripe_hash table[];
  56};
  57
  58/*
  59 * A bvec like structure to present a sector inside a page.
  60 *
  61 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
  62 */
  63struct sector_ptr {
  64	struct page *page;
  65	unsigned int pgoff:24;
  66	unsigned int uptodate:8;
  67};
  68
  69static void rmw_rbio_work(struct work_struct *work);
  70static void rmw_rbio_work_locked(struct work_struct *work);
  71static void index_rbio_pages(struct btrfs_raid_bio *rbio);
  72static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
  73
  74static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check);
  75static void scrub_rbio_work_locked(struct work_struct *work);
  76
  77static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
  78{
  79	bitmap_free(rbio->error_bitmap);
  80	kfree(rbio->stripe_pages);
  81	kfree(rbio->bio_sectors);
  82	kfree(rbio->stripe_sectors);
  83	kfree(rbio->finish_pointers);
  84}
  85
  86static void free_raid_bio(struct btrfs_raid_bio *rbio)
  87{
  88	int i;
  89
  90	if (!refcount_dec_and_test(&rbio->refs))
  91		return;
  92
  93	WARN_ON(!list_empty(&rbio->stripe_cache));
  94	WARN_ON(!list_empty(&rbio->hash_list));
  95	WARN_ON(!bio_list_empty(&rbio->bio_list));
  96
  97	for (i = 0; i < rbio->nr_pages; i++) {
  98		if (rbio->stripe_pages[i]) {
  99			__free_page(rbio->stripe_pages[i]);
 100			rbio->stripe_pages[i] = NULL;
 101		}
 102	}
 103
 104	btrfs_put_bioc(rbio->bioc);
 105	free_raid_bio_pointers(rbio);
 106	kfree(rbio);
 107}
 108
 109static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
 110{
 111	INIT_WORK(&rbio->work, work_func);
 112	queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
 113}
 114
 115/*
 116 * the stripe hash table is used for locking, and to collect
 117 * bios in hopes of making a full stripe
 118 */
 119int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 120{
 121	struct btrfs_stripe_hash_table *table;
 122	struct btrfs_stripe_hash_table *x;
 123	struct btrfs_stripe_hash *cur;
 124	struct btrfs_stripe_hash *h;
 125	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 126	int i;
 127
 128	if (info->stripe_hash_table)
 129		return 0;
 130
 131	/*
 132	 * The table is large, starting with order 4 and can go as high as
 133	 * order 7 in case lock debugging is turned on.
 134	 *
 135	 * Try harder to allocate and fallback to vmalloc to lower the chance
 136	 * of a failing mount.
 137	 */
 138	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
 139	if (!table)
 140		return -ENOMEM;
 141
 142	spin_lock_init(&table->cache_lock);
 143	INIT_LIST_HEAD(&table->stripe_cache);
 144
 145	h = table->table;
 146
 147	for (i = 0; i < num_entries; i++) {
 148		cur = h + i;
 149		INIT_LIST_HEAD(&cur->hash_list);
 150		spin_lock_init(&cur->lock);
 151	}
 152
 153	x = cmpxchg(&info->stripe_hash_table, NULL, table);
 154	kvfree(x);
 155	return 0;
 156}
 157
 158/*
 159 * caching an rbio means to copy anything from the
 160 * bio_sectors array into the stripe_pages array.  We
 161 * use the page uptodate bit in the stripe cache array
 162 * to indicate if it has valid data
 163 *
 164 * once the caching is done, we set the cache ready
 165 * bit.
 166 */
 167static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 168{
 169	int i;
 170	int ret;
 171
 172	ret = alloc_rbio_pages(rbio);
 173	if (ret)
 174		return;
 175
 176	for (i = 0; i < rbio->nr_sectors; i++) {
 177		/* Some range not covered by bio (partial write), skip it */
 178		if (!rbio->bio_sectors[i].page) {
 179			/*
 180			 * Even if the sector is not covered by bio, if it is
 181			 * a data sector it should still be uptodate as it is
 182			 * read from disk.
 183			 */
 184			if (i < rbio->nr_data * rbio->stripe_nsectors)
 185				ASSERT(rbio->stripe_sectors[i].uptodate);
 186			continue;
 187		}
 188
 189		ASSERT(rbio->stripe_sectors[i].page);
 190		memcpy_page(rbio->stripe_sectors[i].page,
 191			    rbio->stripe_sectors[i].pgoff,
 192			    rbio->bio_sectors[i].page,
 193			    rbio->bio_sectors[i].pgoff,
 194			    rbio->bioc->fs_info->sectorsize);
 195		rbio->stripe_sectors[i].uptodate = 1;
 196	}
 197	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 198}
 199
 200/*
 201 * we hash on the first logical address of the stripe
 202 */
 203static int rbio_bucket(struct btrfs_raid_bio *rbio)
 204{
 205	u64 num = rbio->bioc->raid_map[0];
 206
 207	/*
 208	 * we shift down quite a bit.  We're using byte
 209	 * addressing, and most of the lower bits are zeros.
 210	 * This tends to upset hash_64, and it consistently
 211	 * returns just one or two different values.
 212	 *
 213	 * shifting off the lower bits fixes things.
 214	 */
 215	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 216}
 217
 218static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
 219				       unsigned int page_nr)
 220{
 221	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
 222	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
 223	int i;
 224
 225	ASSERT(page_nr < rbio->nr_pages);
 226
 227	for (i = sectors_per_page * page_nr;
 228	     i < sectors_per_page * page_nr + sectors_per_page;
 229	     i++) {
 230		if (!rbio->stripe_sectors[i].uptodate)
 231			return false;
 232	}
 233	return true;
 234}
 235
 236/*
 237 * Update the stripe_sectors[] array to use correct page and pgoff
 238 *
 239 * Should be called every time any page pointer in stripes_pages[] got modified.
 240 */
 241static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
 242{
 243	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
 244	u32 offset;
 245	int i;
 246
 247	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
 248		int page_index = offset >> PAGE_SHIFT;
 249
 250		ASSERT(page_index < rbio->nr_pages);
 251		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
 252		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
 253	}
 254}
 255
 256static void steal_rbio_page(struct btrfs_raid_bio *src,
 257			    struct btrfs_raid_bio *dest, int page_nr)
 258{
 259	const u32 sectorsize = src->bioc->fs_info->sectorsize;
 260	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
 261	int i;
 262
 263	if (dest->stripe_pages[page_nr])
 264		__free_page(dest->stripe_pages[page_nr]);
 265	dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
 266	src->stripe_pages[page_nr] = NULL;
 267
 268	/* Also update the sector->uptodate bits. */
 269	for (i = sectors_per_page * page_nr;
 270	     i < sectors_per_page * page_nr + sectors_per_page; i++)
 271		dest->stripe_sectors[i].uptodate = true;
 272}
 273
 274static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
 275{
 276	const int sector_nr = (page_nr << PAGE_SHIFT) >>
 277			      rbio->bioc->fs_info->sectorsize_bits;
 278
 279	/*
 280	 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
 281	 * we won't have a page which is half data half parity.
 282	 *
 283	 * Thus if the first sector of the page belongs to data stripes, then
 284	 * the full page belongs to data stripes.
 285	 */
 286	return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
 287}
 288
 289/*
 290 * Stealing an rbio means taking all the uptodate pages from the stripe array
 291 * in the source rbio and putting them into the destination rbio.
 292 *
 293 * This will also update the involved stripe_sectors[] which are referring to
 294 * the old pages.
 295 */
 296static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 297{
 298	int i;
 299
 300	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 301		return;
 302
 303	for (i = 0; i < dest->nr_pages; i++) {
 304		struct page *p = src->stripe_pages[i];
 305
 306		/*
 307		 * We don't need to steal P/Q pages as they will always be
 308		 * regenerated for RMW or full write anyway.
 309		 */
 310		if (!is_data_stripe_page(src, i))
 311			continue;
 312
 313		/*
 314		 * If @src already has RBIO_CACHE_READY_BIT, it should have
 315		 * all data stripe pages present and uptodate.
 316		 */
 317		ASSERT(p);
 318		ASSERT(full_page_sectors_uptodate(src, i));
 319		steal_rbio_page(src, dest, i);
 320	}
 321	index_stripe_sectors(dest);
 322	index_stripe_sectors(src);
 323}
 324
 325/*
 326 * merging means we take the bio_list from the victim and
 327 * splice it into the destination.  The victim should
 328 * be discarded afterwards.
 329 *
 330 * must be called with dest->rbio_list_lock held
 331 */
 332static void merge_rbio(struct btrfs_raid_bio *dest,
 333		       struct btrfs_raid_bio *victim)
 334{
 335	bio_list_merge(&dest->bio_list, &victim->bio_list);
 336	dest->bio_list_bytes += victim->bio_list_bytes;
 337	/* Also inherit the bitmaps from @victim. */
 338	bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
 339		  dest->stripe_nsectors);
 340	bio_list_init(&victim->bio_list);
 341}
 342
 343/*
 344 * used to prune items that are in the cache.  The caller
 345 * must hold the hash table lock.
 346 */
 347static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 348{
 349	int bucket = rbio_bucket(rbio);
 350	struct btrfs_stripe_hash_table *table;
 351	struct btrfs_stripe_hash *h;
 352	int freeit = 0;
 353
 354	/*
 355	 * check the bit again under the hash table lock.
 356	 */
 357	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 358		return;
 359
 360	table = rbio->bioc->fs_info->stripe_hash_table;
 361	h = table->table + bucket;
 362
 363	/* hold the lock for the bucket because we may be
 364	 * removing it from the hash table
 365	 */
 366	spin_lock(&h->lock);
 367
 368	/*
 369	 * hold the lock for the bio list because we need
 370	 * to make sure the bio list is empty
 371	 */
 372	spin_lock(&rbio->bio_list_lock);
 373
 374	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 375		list_del_init(&rbio->stripe_cache);
 376		table->cache_size -= 1;
 377		freeit = 1;
 378
 379		/* if the bio list isn't empty, this rbio is
 380		 * still involved in an IO.  We take it out
 381		 * of the cache list, and drop the ref that
 382		 * was held for the list.
 383		 *
 384		 * If the bio_list was empty, we also remove
 385		 * the rbio from the hash_table, and drop
 386		 * the corresponding ref
 387		 */
 388		if (bio_list_empty(&rbio->bio_list)) {
 389			if (!list_empty(&rbio->hash_list)) {
 390				list_del_init(&rbio->hash_list);
 391				refcount_dec(&rbio->refs);
 392				BUG_ON(!list_empty(&rbio->plug_list));
 393			}
 394		}
 395	}
 396
 397	spin_unlock(&rbio->bio_list_lock);
 398	spin_unlock(&h->lock);
 399
 400	if (freeit)
 401		free_raid_bio(rbio);
 402}
 403
 404/*
 405 * prune a given rbio from the cache
 406 */
 407static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 408{
 409	struct btrfs_stripe_hash_table *table;
 410	unsigned long flags;
 411
 412	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 413		return;
 414
 415	table = rbio->bioc->fs_info->stripe_hash_table;
 416
 417	spin_lock_irqsave(&table->cache_lock, flags);
 418	__remove_rbio_from_cache(rbio);
 419	spin_unlock_irqrestore(&table->cache_lock, flags);
 420}
 421
 422/*
 423 * remove everything in the cache
 424 */
 425static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 426{
 427	struct btrfs_stripe_hash_table *table;
 428	unsigned long flags;
 429	struct btrfs_raid_bio *rbio;
 430
 431	table = info->stripe_hash_table;
 432
 433	spin_lock_irqsave(&table->cache_lock, flags);
 434	while (!list_empty(&table->stripe_cache)) {
 435		rbio = list_entry(table->stripe_cache.next,
 436				  struct btrfs_raid_bio,
 437				  stripe_cache);
 438		__remove_rbio_from_cache(rbio);
 439	}
 440	spin_unlock_irqrestore(&table->cache_lock, flags);
 441}
 442
 443/*
 444 * remove all cached entries and free the hash table
 445 * used by unmount
 446 */
 447void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 448{
 449	if (!info->stripe_hash_table)
 450		return;
 451	btrfs_clear_rbio_cache(info);
 452	kvfree(info->stripe_hash_table);
 453	info->stripe_hash_table = NULL;
 454}
 455
 456/*
 457 * insert an rbio into the stripe cache.  It
 458 * must have already been prepared by calling
 459 * cache_rbio_pages
 460 *
 461 * If this rbio was already cached, it gets
 462 * moved to the front of the lru.
 463 *
 464 * If the size of the rbio cache is too big, we
 465 * prune an item.
 466 */
 467static void cache_rbio(struct btrfs_raid_bio *rbio)
 468{
 469	struct btrfs_stripe_hash_table *table;
 470	unsigned long flags;
 471
 472	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 473		return;
 474
 475	table = rbio->bioc->fs_info->stripe_hash_table;
 476
 477	spin_lock_irqsave(&table->cache_lock, flags);
 478	spin_lock(&rbio->bio_list_lock);
 479
 480	/* bump our ref if we were not in the list before */
 481	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 482		refcount_inc(&rbio->refs);
 483
 484	if (!list_empty(&rbio->stripe_cache)){
 485		list_move(&rbio->stripe_cache, &table->stripe_cache);
 486	} else {
 487		list_add(&rbio->stripe_cache, &table->stripe_cache);
 488		table->cache_size += 1;
 489	}
 490
 491	spin_unlock(&rbio->bio_list_lock);
 492
 493	if (table->cache_size > RBIO_CACHE_SIZE) {
 494		struct btrfs_raid_bio *found;
 495
 496		found = list_entry(table->stripe_cache.prev,
 497				  struct btrfs_raid_bio,
 498				  stripe_cache);
 499
 500		if (found != rbio)
 501			__remove_rbio_from_cache(found);
 502	}
 503
 504	spin_unlock_irqrestore(&table->cache_lock, flags);
 505}
 506
 507/*
 508 * helper function to run the xor_blocks api.  It is only
 509 * able to do MAX_XOR_BLOCKS at a time, so we need to
 510 * loop through.
 511 */
 512static void run_xor(void **pages, int src_cnt, ssize_t len)
 513{
 514	int src_off = 0;
 515	int xor_src_cnt = 0;
 516	void *dest = pages[src_cnt];
 517
 518	while(src_cnt > 0) {
 519		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 520		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 521
 522		src_cnt -= xor_src_cnt;
 523		src_off += xor_src_cnt;
 524	}
 525}
 526
 527/*
 528 * Returns true if the bio list inside this rbio covers an entire stripe (no
 529 * rmw required).
 530 */
 531static int rbio_is_full(struct btrfs_raid_bio *rbio)
 532{
 533	unsigned long flags;
 534	unsigned long size = rbio->bio_list_bytes;
 535	int ret = 1;
 536
 537	spin_lock_irqsave(&rbio->bio_list_lock, flags);
 538	if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
 539		ret = 0;
 540	BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
 541	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
 542
 543	return ret;
 544}
 545
 546/*
 547 * returns 1 if it is safe to merge two rbios together.
 548 * The merging is safe if the two rbios correspond to
 549 * the same stripe and if they are both going in the same
 550 * direction (read vs write), and if neither one is
 551 * locked for final IO
 552 *
 553 * The caller is responsible for locking such that
 554 * rmw_locked is safe to test
 555 */
 556static int rbio_can_merge(struct btrfs_raid_bio *last,
 557			  struct btrfs_raid_bio *cur)
 558{
 559	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 560	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 561		return 0;
 562
 563	/*
 564	 * we can't merge with cached rbios, since the
 565	 * idea is that when we merge the destination
 566	 * rbio is going to run our IO for us.  We can
 567	 * steal from cached rbios though, other functions
 568	 * handle that.
 569	 */
 570	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 571	    test_bit(RBIO_CACHE_BIT, &cur->flags))
 572		return 0;
 573
 574	if (last->bioc->raid_map[0] != cur->bioc->raid_map[0])
 575		return 0;
 576
 577	/* we can't merge with different operations */
 578	if (last->operation != cur->operation)
 579		return 0;
 580	/*
 581	 * We've need read the full stripe from the drive.
 582	 * check and repair the parity and write the new results.
 583	 *
 584	 * We're not allowed to add any new bios to the
 585	 * bio list here, anyone else that wants to
 586	 * change this stripe needs to do their own rmw.
 587	 */
 588	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
 589		return 0;
 590
 591	if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
 592	    last->operation == BTRFS_RBIO_READ_REBUILD)
 593		return 0;
 594
 595	return 1;
 596}
 597
 598static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
 599					     unsigned int stripe_nr,
 600					     unsigned int sector_nr)
 601{
 602	ASSERT(stripe_nr < rbio->real_stripes);
 603	ASSERT(sector_nr < rbio->stripe_nsectors);
 604
 605	return stripe_nr * rbio->stripe_nsectors + sector_nr;
 606}
 607
 608/* Return a sector from rbio->stripe_sectors, not from the bio list */
 609static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
 610					     unsigned int stripe_nr,
 611					     unsigned int sector_nr)
 612{
 613	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
 614							      sector_nr)];
 615}
 616
 617/* Grab a sector inside P stripe */
 618static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
 619					      unsigned int sector_nr)
 620{
 621	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
 622}
 623
 624/* Grab a sector inside Q stripe, return NULL if not RAID6 */
 625static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
 626					      unsigned int sector_nr)
 627{
 628	if (rbio->nr_data + 1 == rbio->real_stripes)
 629		return NULL;
 630	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
 631}
 632
 633/*
 634 * The first stripe in the table for a logical address
 635 * has the lock.  rbios are added in one of three ways:
 636 *
 637 * 1) Nobody has the stripe locked yet.  The rbio is given
 638 * the lock and 0 is returned.  The caller must start the IO
 639 * themselves.
 640 *
 641 * 2) Someone has the stripe locked, but we're able to merge
 642 * with the lock owner.  The rbio is freed and the IO will
 643 * start automatically along with the existing rbio.  1 is returned.
 644 *
 645 * 3) Someone has the stripe locked, but we're not able to merge.
 646 * The rbio is added to the lock owner's plug list, or merged into
 647 * an rbio already on the plug list.  When the lock owner unlocks,
 648 * the next rbio on the list is run and the IO is started automatically.
 649 * 1 is returned
 650 *
 651 * If we return 0, the caller still owns the rbio and must continue with
 652 * IO submission.  If we return 1, the caller must assume the rbio has
 653 * already been freed.
 654 */
 655static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 656{
 657	struct btrfs_stripe_hash *h;
 658	struct btrfs_raid_bio *cur;
 659	struct btrfs_raid_bio *pending;
 660	unsigned long flags;
 661	struct btrfs_raid_bio *freeit = NULL;
 662	struct btrfs_raid_bio *cache_drop = NULL;
 663	int ret = 0;
 664
 665	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
 666
 667	spin_lock_irqsave(&h->lock, flags);
 668	list_for_each_entry(cur, &h->hash_list, hash_list) {
 669		if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0])
 670			continue;
 671
 672		spin_lock(&cur->bio_list_lock);
 673
 674		/* Can we steal this cached rbio's pages? */
 675		if (bio_list_empty(&cur->bio_list) &&
 676		    list_empty(&cur->plug_list) &&
 677		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 678		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 679			list_del_init(&cur->hash_list);
 680			refcount_dec(&cur->refs);
 681
 682			steal_rbio(cur, rbio);
 683			cache_drop = cur;
 684			spin_unlock(&cur->bio_list_lock);
 685
 686			goto lockit;
 687		}
 688
 689		/* Can we merge into the lock owner? */
 690		if (rbio_can_merge(cur, rbio)) {
 691			merge_rbio(cur, rbio);
 692			spin_unlock(&cur->bio_list_lock);
 693			freeit = rbio;
 694			ret = 1;
 695			goto out;
 696		}
 697
 698
 699		/*
 700		 * We couldn't merge with the running rbio, see if we can merge
 701		 * with the pending ones.  We don't have to check for rmw_locked
 702		 * because there is no way they are inside finish_rmw right now
 703		 */
 704		list_for_each_entry(pending, &cur->plug_list, plug_list) {
 705			if (rbio_can_merge(pending, rbio)) {
 706				merge_rbio(pending, rbio);
 707				spin_unlock(&cur->bio_list_lock);
 708				freeit = rbio;
 709				ret = 1;
 710				goto out;
 711			}
 712		}
 713
 714		/*
 715		 * No merging, put us on the tail of the plug list, our rbio
 716		 * will be started with the currently running rbio unlocks
 717		 */
 718		list_add_tail(&rbio->plug_list, &cur->plug_list);
 719		spin_unlock(&cur->bio_list_lock);
 720		ret = 1;
 721		goto out;
 722	}
 723lockit:
 724	refcount_inc(&rbio->refs);
 725	list_add(&rbio->hash_list, &h->hash_list);
 726out:
 727	spin_unlock_irqrestore(&h->lock, flags);
 728	if (cache_drop)
 729		remove_rbio_from_cache(cache_drop);
 730	if (freeit)
 731		free_raid_bio(freeit);
 732	return ret;
 733}
 734
 735static void recover_rbio_work_locked(struct work_struct *work);
 736
 737/*
 738 * called as rmw or parity rebuild is completed.  If the plug list has more
 739 * rbios waiting for this stripe, the next one on the list will be started
 740 */
 741static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 742{
 743	int bucket;
 744	struct btrfs_stripe_hash *h;
 745	unsigned long flags;
 746	int keep_cache = 0;
 747
 748	bucket = rbio_bucket(rbio);
 749	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
 750
 751	if (list_empty(&rbio->plug_list))
 752		cache_rbio(rbio);
 753
 754	spin_lock_irqsave(&h->lock, flags);
 755	spin_lock(&rbio->bio_list_lock);
 756
 757	if (!list_empty(&rbio->hash_list)) {
 758		/*
 759		 * if we're still cached and there is no other IO
 760		 * to perform, just leave this rbio here for others
 761		 * to steal from later
 762		 */
 763		if (list_empty(&rbio->plug_list) &&
 764		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 765			keep_cache = 1;
 766			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 767			BUG_ON(!bio_list_empty(&rbio->bio_list));
 768			goto done;
 769		}
 770
 771		list_del_init(&rbio->hash_list);
 772		refcount_dec(&rbio->refs);
 773
 774		/*
 775		 * we use the plug list to hold all the rbios
 776		 * waiting for the chance to lock this stripe.
 777		 * hand the lock over to one of them.
 778		 */
 779		if (!list_empty(&rbio->plug_list)) {
 780			struct btrfs_raid_bio *next;
 781			struct list_head *head = rbio->plug_list.next;
 782
 783			next = list_entry(head, struct btrfs_raid_bio,
 784					  plug_list);
 785
 786			list_del_init(&rbio->plug_list);
 787
 788			list_add(&next->hash_list, &h->hash_list);
 789			refcount_inc(&next->refs);
 790			spin_unlock(&rbio->bio_list_lock);
 791			spin_unlock_irqrestore(&h->lock, flags);
 792
 793			if (next->operation == BTRFS_RBIO_READ_REBUILD)
 794				start_async_work(next, recover_rbio_work_locked);
 795			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
 796				steal_rbio(rbio, next);
 797				start_async_work(next, recover_rbio_work_locked);
 798			} else if (next->operation == BTRFS_RBIO_WRITE) {
 799				steal_rbio(rbio, next);
 800				start_async_work(next, rmw_rbio_work_locked);
 801			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 802				steal_rbio(rbio, next);
 803				start_async_work(next, scrub_rbio_work_locked);
 804			}
 805
 806			goto done_nolock;
 807		}
 808	}
 809done:
 810	spin_unlock(&rbio->bio_list_lock);
 811	spin_unlock_irqrestore(&h->lock, flags);
 812
 813done_nolock:
 814	if (!keep_cache)
 815		remove_rbio_from_cache(rbio);
 816}
 817
 818static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
 819{
 820	struct bio *next;
 821
 822	while (cur) {
 823		next = cur->bi_next;
 824		cur->bi_next = NULL;
 825		cur->bi_status = err;
 826		bio_endio(cur);
 827		cur = next;
 828	}
 829}
 830
 831/*
 832 * this frees the rbio and runs through all the bios in the
 833 * bio_list and calls end_io on them
 834 */
 835static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 836{
 837	struct bio *cur = bio_list_get(&rbio->bio_list);
 838	struct bio *extra;
 839
 840	kfree(rbio->csum_buf);
 841	bitmap_free(rbio->csum_bitmap);
 842	rbio->csum_buf = NULL;
 843	rbio->csum_bitmap = NULL;
 844
 845	/*
 846	 * Clear the data bitmap, as the rbio may be cached for later usage.
 847	 * do this before before unlock_stripe() so there will be no new bio
 848	 * for this bio.
 849	 */
 850	bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
 851
 852	/*
 853	 * At this moment, rbio->bio_list is empty, however since rbio does not
 854	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
 855	 * hash list, rbio may be merged with others so that rbio->bio_list
 856	 * becomes non-empty.
 857	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
 858	 * more and we can call bio_endio() on all queued bios.
 859	 */
 860	unlock_stripe(rbio);
 861	extra = bio_list_get(&rbio->bio_list);
 862	free_raid_bio(rbio);
 863
 864	rbio_endio_bio_list(cur, err);
 865	if (extra)
 866		rbio_endio_bio_list(extra, err);
 867}
 868
 869/*
 870 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
 871 *
 872 * @rbio:               The raid bio
 873 * @stripe_nr:          Stripe number, valid range [0, real_stripe)
 874 * @sector_nr:		Sector number inside the stripe,
 875 *			valid range [0, stripe_nsectors)
 876 * @bio_list_only:      Whether to use sectors inside the bio list only.
 877 *
 878 * The read/modify/write code wants to reuse the original bio page as much
 879 * as possible, and only use stripe_sectors as fallback.
 880 */
 881static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
 882					 int stripe_nr, int sector_nr,
 883					 bool bio_list_only)
 884{
 885	struct sector_ptr *sector;
 886	int index;
 887
 888	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
 889	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
 
 
 890
 891	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
 892	ASSERT(index >= 0 && index < rbio->nr_sectors);
 893
 894	spin_lock_irq(&rbio->bio_list_lock);
 895	sector = &rbio->bio_sectors[index];
 896	if (sector->page || bio_list_only) {
 897		/* Don't return sector without a valid page pointer */
 898		if (!sector->page)
 899			sector = NULL;
 900		spin_unlock_irq(&rbio->bio_list_lock);
 901		return sector;
 902	}
 903	spin_unlock_irq(&rbio->bio_list_lock);
 904
 905	return &rbio->stripe_sectors[index];
 906}
 907
 908/*
 909 * allocation and initial setup for the btrfs_raid_bio.  Not
 910 * this does not allocate any pages for rbio->pages.
 911 */
 912static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
 913					 struct btrfs_io_context *bioc)
 914{
 915	const unsigned int real_stripes = bioc->num_stripes - bioc->num_tgtdevs;
 916	const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
 917	const unsigned int num_pages = stripe_npages * real_stripes;
 918	const unsigned int stripe_nsectors =
 919		BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
 920	const unsigned int num_sectors = stripe_nsectors * real_stripes;
 921	struct btrfs_raid_bio *rbio;
 922
 923	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
 924	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
 925	/*
 926	 * Our current stripe len should be fixed to 64k thus stripe_nsectors
 927	 * (at most 16) should be no larger than BITS_PER_LONG.
 928	 */
 929	ASSERT(stripe_nsectors <= BITS_PER_LONG);
 930
 
 
 
 
 
 
 
 931	rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
 932	if (!rbio)
 933		return ERR_PTR(-ENOMEM);
 934	rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
 935				     GFP_NOFS);
 936	rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
 937				    GFP_NOFS);
 938	rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
 939				       GFP_NOFS);
 940	rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
 941	rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
 942
 943	if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
 944	    !rbio->finish_pointers || !rbio->error_bitmap) {
 945		free_raid_bio_pointers(rbio);
 946		kfree(rbio);
 947		return ERR_PTR(-ENOMEM);
 948	}
 949
 950	bio_list_init(&rbio->bio_list);
 951	init_waitqueue_head(&rbio->io_wait);
 952	INIT_LIST_HEAD(&rbio->plug_list);
 953	spin_lock_init(&rbio->bio_list_lock);
 954	INIT_LIST_HEAD(&rbio->stripe_cache);
 955	INIT_LIST_HEAD(&rbio->hash_list);
 956	btrfs_get_bioc(bioc);
 957	rbio->bioc = bioc;
 958	rbio->nr_pages = num_pages;
 959	rbio->nr_sectors = num_sectors;
 960	rbio->real_stripes = real_stripes;
 961	rbio->stripe_npages = stripe_npages;
 962	rbio->stripe_nsectors = stripe_nsectors;
 963	refcount_set(&rbio->refs, 1);
 964	atomic_set(&rbio->stripes_pending, 0);
 965
 966	ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
 967	rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
 
 968
 969	return rbio;
 970}
 971
 972/* allocate pages for all the stripes in the bio, including parity */
 973static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
 974{
 975	int ret;
 976
 977	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
 978	if (ret < 0)
 979		return ret;
 980	/* Mapping all sectors */
 981	index_stripe_sectors(rbio);
 982	return 0;
 983}
 984
 985/* only allocate pages for p/q stripes */
 986static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
 987{
 988	const int data_pages = rbio->nr_data * rbio->stripe_npages;
 989	int ret;
 990
 991	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
 992				     rbio->stripe_pages + data_pages);
 993	if (ret < 0)
 994		return ret;
 995
 996	index_stripe_sectors(rbio);
 997	return 0;
 998}
 999
1000/*
1001 * Return the total numer of errors found in the vertical stripe of @sector_nr.
1002 *
1003 * @faila and @failb will also be updated to the first and second stripe
1004 * number of the errors.
1005 */
1006static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1007				     int *faila, int *failb)
1008{
1009	int stripe_nr;
1010	int found_errors = 0;
1011
1012	if (faila || failb) {
1013		/*
1014		 * Both @faila and @failb should be valid pointers if any of
1015		 * them is specified.
1016		 */
1017		ASSERT(faila && failb);
1018		*faila = -1;
1019		*failb = -1;
1020	}
1021
1022	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1023		int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1024
1025		if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1026			found_errors++;
1027			if (faila) {
1028				/* Update faila and failb. */
1029				if (*faila < 0)
1030					*faila = stripe_nr;
1031				else if (*failb < 0)
1032					*failb = stripe_nr;
1033			}
1034		}
1035	}
1036	return found_errors;
1037}
1038
1039/*
1040 * Add a single sector @sector into our list of bios for IO.
1041 *
1042 * Return 0 if everything went well.
1043 * Return <0 for error.
1044 */
1045static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1046			      struct bio_list *bio_list,
1047			      struct sector_ptr *sector,
1048			      unsigned int stripe_nr,
1049			      unsigned int sector_nr,
1050			      enum req_op op)
1051{
1052	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1053	struct bio *last = bio_list->tail;
1054	int ret;
1055	struct bio *bio;
1056	struct btrfs_io_stripe *stripe;
1057	u64 disk_start;
1058
1059	/*
1060	 * Note: here stripe_nr has taken device replace into consideration,
1061	 * thus it can be larger than rbio->real_stripe.
1062	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1063	 */
1064	ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1065	ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
 
 
1066	ASSERT(sector->page);
1067
1068	stripe = &rbio->bioc->stripes[stripe_nr];
1069	disk_start = stripe->physical + sector_nr * sectorsize;
1070
1071	/* if the device is missing, just fail this stripe */
1072	if (!stripe->dev->bdev) {
1073		int found_errors;
1074
1075		set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1076			rbio->error_bitmap);
1077
1078		/* Check if we have reached tolerance early. */
1079		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1080							 NULL, NULL);
1081		if (found_errors > rbio->bioc->max_errors)
1082			return -EIO;
1083		return 0;
1084	}
1085
1086	/* see if we can add this page onto our existing bio */
1087	if (last) {
1088		u64 last_end = last->bi_iter.bi_sector << 9;
1089		last_end += last->bi_iter.bi_size;
1090
1091		/*
1092		 * we can't merge these if they are from different
1093		 * devices or if they are not contiguous
1094		 */
1095		if (last_end == disk_start && !last->bi_status &&
1096		    last->bi_bdev == stripe->dev->bdev) {
1097			ret = bio_add_page(last, sector->page, sectorsize,
1098					   sector->pgoff);
1099			if (ret == sectorsize)
1100				return 0;
1101		}
1102	}
1103
1104	/* put a new bio on the list */
1105	bio = bio_alloc(stripe->dev->bdev,
1106			max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1107			op, GFP_NOFS);
1108	bio->bi_iter.bi_sector = disk_start >> 9;
1109	bio->bi_private = rbio;
1110
1111	bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1112	bio_list_add(bio_list, bio);
1113	return 0;
1114}
1115
1116static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1117{
1118	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1119	struct bio_vec bvec;
1120	struct bvec_iter iter;
1121	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1122		     rbio->bioc->raid_map[0];
1123
1124	bio_for_each_segment(bvec, bio, iter) {
1125		u32 bvec_offset;
1126
1127		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1128		     bvec_offset += sectorsize, offset += sectorsize) {
1129			int index = offset / sectorsize;
1130			struct sector_ptr *sector = &rbio->bio_sectors[index];
1131
1132			sector->page = bvec.bv_page;
1133			sector->pgoff = bvec.bv_offset + bvec_offset;
1134			ASSERT(sector->pgoff < PAGE_SIZE);
1135		}
1136	}
1137}
1138
1139/*
1140 * helper function to walk our bio list and populate the bio_pages array with
1141 * the result.  This seems expensive, but it is faster than constantly
1142 * searching through the bio list as we setup the IO in finish_rmw or stripe
1143 * reconstruction.
1144 *
1145 * This must be called before you trust the answers from page_in_rbio
1146 */
1147static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1148{
1149	struct bio *bio;
1150
1151	spin_lock_irq(&rbio->bio_list_lock);
1152	bio_list_for_each(bio, &rbio->bio_list)
1153		index_one_bio(rbio, bio);
1154
1155	spin_unlock_irq(&rbio->bio_list_lock);
1156}
1157
1158static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1159			       struct raid56_bio_trace_info *trace_info)
1160{
1161	const struct btrfs_io_context *bioc = rbio->bioc;
1162	int i;
1163
1164	ASSERT(bioc);
1165
1166	/* We rely on bio->bi_bdev to find the stripe number. */
1167	if (!bio->bi_bdev)
1168		goto not_found;
1169
1170	for (i = 0; i < bioc->num_stripes; i++) {
1171		if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1172			continue;
1173		trace_info->stripe_nr = i;
1174		trace_info->devid = bioc->stripes[i].dev->devid;
1175		trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1176				     bioc->stripes[i].physical;
1177		return;
1178	}
1179
1180not_found:
1181	trace_info->devid = -1;
1182	trace_info->offset = -1;
1183	trace_info->stripe_nr = -1;
1184}
1185
1186/* Generate PQ for one veritical stripe. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1188{
1189	void **pointers = rbio->finish_pointers;
1190	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1191	struct sector_ptr *sector;
1192	int stripe;
1193	const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1194
1195	/* First collect one sector from each data stripe */
1196	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1197		sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1198		pointers[stripe] = kmap_local_page(sector->page) +
1199				   sector->pgoff;
1200	}
1201
1202	/* Then add the parity stripe */
1203	sector = rbio_pstripe_sector(rbio, sectornr);
1204	sector->uptodate = 1;
1205	pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1206
1207	if (has_qstripe) {
1208		/*
1209		 * RAID6, add the qstripe and call the library function
1210		 * to fill in our p/q
1211		 */
1212		sector = rbio_qstripe_sector(rbio, sectornr);
1213		sector->uptodate = 1;
1214		pointers[stripe++] = kmap_local_page(sector->page) +
1215				     sector->pgoff;
1216
 
1217		raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1218					pointers);
1219	} else {
1220		/* raid5 */
1221		memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1222		run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1223	}
1224	for (stripe = stripe - 1; stripe >= 0; stripe--)
1225		kunmap_local(pointers[stripe]);
1226}
1227
1228static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1229				   struct bio_list *bio_list)
1230{
1231	struct bio *bio;
1232	/* The total sector number inside the full stripe. */
1233	int total_sector_nr;
1234	int sectornr;
1235	int stripe;
1236	int ret;
1237
1238	ASSERT(bio_list_size(bio_list) == 0);
1239
1240	/* We should have at least one data sector. */
1241	ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1242
1243	/*
1244	 * Reset errors, as we may have errors inherited from from degraded
1245	 * write.
1246	 */
1247	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1248
1249	/*
1250	 * Start assembly.  Make bios for everything from the higher layers (the
1251	 * bio_list in our rbio) and our P/Q.  Ignore everything else.
1252	 */
1253	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1254	     total_sector_nr++) {
1255		struct sector_ptr *sector;
1256
1257		stripe = total_sector_nr / rbio->stripe_nsectors;
1258		sectornr = total_sector_nr % rbio->stripe_nsectors;
1259
1260		/* This vertical stripe has no data, skip it. */
1261		if (!test_bit(sectornr, &rbio->dbitmap))
1262			continue;
1263
1264		if (stripe < rbio->nr_data) {
1265			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1266			if (!sector)
1267				continue;
1268		} else {
1269			sector = rbio_stripe_sector(rbio, stripe, sectornr);
1270		}
1271
1272		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1273					 sectornr, REQ_OP_WRITE);
1274		if (ret)
1275			goto error;
1276	}
1277
1278	if (likely(!rbio->bioc->num_tgtdevs))
1279		return 0;
1280
1281	/* Make a copy for the replace target device. */
 
 
 
 
 
 
1282	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1283	     total_sector_nr++) {
1284		struct sector_ptr *sector;
1285
1286		stripe = total_sector_nr / rbio->stripe_nsectors;
1287		sectornr = total_sector_nr % rbio->stripe_nsectors;
1288
1289		if (!rbio->bioc->tgtdev_map[stripe]) {
 
 
 
 
 
1290			/*
1291			 * We can skip the whole stripe completely, note
1292			 * total_sector_nr will be increased by one anyway.
1293			 */
1294			ASSERT(sectornr == 0);
1295			total_sector_nr += rbio->stripe_nsectors - 1;
1296			continue;
1297		}
1298
1299		/* This vertical stripe has no data, skip it. */
1300		if (!test_bit(sectornr, &rbio->dbitmap))
1301			continue;
1302
1303		if (stripe < rbio->nr_data) {
1304			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1305			if (!sector)
1306				continue;
1307		} else {
1308			sector = rbio_stripe_sector(rbio, stripe, sectornr);
1309		}
1310
1311		ret = rbio_add_io_sector(rbio, bio_list, sector,
1312					 rbio->bioc->tgtdev_map[stripe],
1313					 sectornr, REQ_OP_WRITE);
1314		if (ret)
1315			goto error;
1316	}
1317
1318	return 0;
1319error:
1320	while ((bio = bio_list_pop(bio_list)))
1321		bio_put(bio);
1322	return -EIO;
1323}
1324
1325static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1326{
1327	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1328	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1329		     rbio->bioc->raid_map[0];
1330	int total_nr_sector = offset >> fs_info->sectorsize_bits;
1331
1332	ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1333
1334	bitmap_set(rbio->error_bitmap, total_nr_sector,
1335		   bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1336
1337	/*
1338	 * Special handling for raid56_alloc_missing_rbio() used by
1339	 * scrub/replace.  Unlike call path in raid56_parity_recover(), they
1340	 * pass an empty bio here.  Thus we have to find out the missing device
1341	 * and mark the stripe error instead.
1342	 */
1343	if (bio->bi_iter.bi_size == 0) {
1344		bool found_missing = false;
1345		int stripe_nr;
1346
1347		for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1348			if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1349				found_missing = true;
1350				bitmap_set(rbio->error_bitmap,
1351					   stripe_nr * rbio->stripe_nsectors,
1352					   rbio->stripe_nsectors);
1353			}
1354		}
1355		ASSERT(found_missing);
1356	}
1357}
1358
1359/*
1360 * For subpage case, we can no longer set page Uptodate directly for
1361 * stripe_pages[], thus we need to locate the sector.
1362 */
1363static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1364					     struct page *page,
1365					     unsigned int pgoff)
1366{
1367	int i;
1368
1369	for (i = 0; i < rbio->nr_sectors; i++) {
1370		struct sector_ptr *sector = &rbio->stripe_sectors[i];
1371
1372		if (sector->page == page && sector->pgoff == pgoff)
1373			return sector;
1374	}
1375	return NULL;
1376}
1377
1378/*
1379 * this sets each page in the bio uptodate.  It should only be used on private
1380 * rbio pages, nothing that comes in from the higher layers
1381 */
1382static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1383{
1384	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1385	struct bio_vec *bvec;
1386	struct bvec_iter_all iter_all;
1387
1388	ASSERT(!bio_flagged(bio, BIO_CLONED));
1389
1390	bio_for_each_segment_all(bvec, bio, iter_all) {
1391		struct sector_ptr *sector;
1392		int pgoff;
1393
1394		for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1395		     pgoff += sectorsize) {
1396			sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1397			ASSERT(sector);
1398			if (sector)
1399				sector->uptodate = 1;
1400		}
1401	}
1402}
1403
1404static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1405{
1406	struct bio_vec *bv = bio_first_bvec_all(bio);
1407	int i;
1408
1409	for (i = 0; i < rbio->nr_sectors; i++) {
1410		struct sector_ptr *sector;
1411
1412		sector = &rbio->stripe_sectors[i];
1413		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1414			break;
1415		sector = &rbio->bio_sectors[i];
1416		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1417			break;
1418	}
1419	ASSERT(i < rbio->nr_sectors);
1420	return i;
1421}
1422
1423static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1424{
1425	int total_sector_nr = get_bio_sector_nr(rbio, bio);
1426	u32 bio_size = 0;
1427	struct bio_vec *bvec;
1428	struct bvec_iter_all iter_all;
1429	int i;
1430
1431	bio_for_each_segment_all(bvec, bio, iter_all)
1432		bio_size += bvec->bv_len;
1433
1434	/*
1435	 * Since we can have multiple bios touching the error_bitmap, we cannot
1436	 * call bitmap_set() without protection.
1437	 *
1438	 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1439	 */
1440	for (i = total_sector_nr; i < total_sector_nr +
1441	     (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1442		set_bit(i, rbio->error_bitmap);
1443}
1444
1445/* Verify the data sectors at read time. */
1446static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1447				    struct bio *bio)
1448{
1449	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1450	int total_sector_nr = get_bio_sector_nr(rbio, bio);
1451	struct bio_vec *bvec;
1452	struct bvec_iter_all iter_all;
1453
1454	/* No data csum for the whole stripe, no need to verify. */
1455	if (!rbio->csum_bitmap || !rbio->csum_buf)
1456		return;
1457
1458	/* P/Q stripes, they have no data csum to verify against. */
1459	if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1460		return;
1461
1462	bio_for_each_segment_all(bvec, bio, iter_all) {
1463		int bv_offset;
1464
1465		for (bv_offset = bvec->bv_offset;
1466		     bv_offset < bvec->bv_offset + bvec->bv_len;
1467		     bv_offset += fs_info->sectorsize, total_sector_nr++) {
1468			u8 csum_buf[BTRFS_CSUM_SIZE];
1469			u8 *expected_csum = rbio->csum_buf +
1470					    total_sector_nr * fs_info->csum_size;
1471			int ret;
1472
1473			/* No csum for this sector, skip to the next sector. */
1474			if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1475				continue;
1476
1477			ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1478				bv_offset, csum_buf, expected_csum);
1479			if (ret < 0)
1480				set_bit(total_sector_nr, rbio->error_bitmap);
1481		}
1482	}
1483}
1484
1485static void raid_wait_read_end_io(struct bio *bio)
1486{
1487	struct btrfs_raid_bio *rbio = bio->bi_private;
1488
1489	if (bio->bi_status) {
1490		rbio_update_error_bitmap(rbio, bio);
1491	} else {
1492		set_bio_pages_uptodate(rbio, bio);
1493		verify_bio_data_sectors(rbio, bio);
1494	}
1495
1496	bio_put(bio);
1497	if (atomic_dec_and_test(&rbio->stripes_pending))
1498		wake_up(&rbio->io_wait);
1499}
1500
1501static void submit_read_bios(struct btrfs_raid_bio *rbio,
1502			     struct bio_list *bio_list)
1503{
1504	struct bio *bio;
1505
1506	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1507	while ((bio = bio_list_pop(bio_list))) {
1508		bio->bi_end_io = raid_wait_read_end_io;
1509
1510		if (trace_raid56_scrub_read_recover_enabled()) {
1511			struct raid56_bio_trace_info trace_info = { 0 };
1512
1513			bio_get_trace_info(rbio, bio, &trace_info);
1514			trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
1515		}
1516		submit_bio(bio);
1517	}
1518}
1519
1520static int rmw_assemble_read_bios(struct btrfs_raid_bio *rbio,
1521				  struct bio_list *bio_list)
1522{
1523	struct bio *bio;
1524	int total_sector_nr;
1525	int ret = 0;
1526
1527	ASSERT(bio_list_size(bio_list) == 0);
1528
1529	/*
1530	 * Build a list of bios to read all sectors (including data and P/Q).
1531	 *
1532	 * This behaviro is to compensate the later csum verification and
1533	 * recovery.
1534	 */
1535	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1536	     total_sector_nr++) {
1537		struct sector_ptr *sector;
1538		int stripe = total_sector_nr / rbio->stripe_nsectors;
1539		int sectornr = total_sector_nr % rbio->stripe_nsectors;
1540
1541		sector = rbio_stripe_sector(rbio, stripe, sectornr);
1542		ret = rbio_add_io_sector(rbio, bio_list, sector,
1543			       stripe, sectornr, REQ_OP_READ);
1544		if (ret)
1545			goto cleanup;
1546	}
1547	return 0;
1548
1549cleanup:
1550	while ((bio = bio_list_pop(bio_list)))
1551		bio_put(bio);
1552	return ret;
1553}
1554
1555static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1556{
1557	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1558	int ret;
1559
1560	ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
1561	if (ret < 0)
1562		return ret;
1563
1564	index_stripe_sectors(rbio);
1565	return 0;
1566}
1567
1568/*
1569 * We use plugging call backs to collect full stripes.
1570 * Any time we get a partial stripe write while plugged
1571 * we collect it into a list.  When the unplug comes down,
1572 * we sort the list by logical block number and merge
1573 * everything we can into the same rbios
1574 */
1575struct btrfs_plug_cb {
1576	struct blk_plug_cb cb;
1577	struct btrfs_fs_info *info;
1578	struct list_head rbio_list;
1579	struct work_struct work;
1580};
1581
1582/*
1583 * rbios on the plug list are sorted for easier merging.
1584 */
1585static int plug_cmp(void *priv, const struct list_head *a,
1586		    const struct list_head *b)
1587{
1588	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1589						       plug_list);
1590	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1591						       plug_list);
1592	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1593	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1594
1595	if (a_sector < b_sector)
1596		return -1;
1597	if (a_sector > b_sector)
1598		return 1;
1599	return 0;
1600}
1601
1602static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1603{
1604	struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1605	struct btrfs_raid_bio *cur;
1606	struct btrfs_raid_bio *last = NULL;
1607
1608	list_sort(NULL, &plug->rbio_list, plug_cmp);
1609
1610	while (!list_empty(&plug->rbio_list)) {
1611		cur = list_entry(plug->rbio_list.next,
1612				 struct btrfs_raid_bio, plug_list);
1613		list_del_init(&cur->plug_list);
1614
1615		if (rbio_is_full(cur)) {
1616			/* We have a full stripe, queue it down. */
1617			start_async_work(cur, rmw_rbio_work);
1618			continue;
1619		}
1620		if (last) {
1621			if (rbio_can_merge(last, cur)) {
1622				merge_rbio(last, cur);
1623				free_raid_bio(cur);
1624				continue;
1625			}
1626			start_async_work(last, rmw_rbio_work);
1627		}
1628		last = cur;
1629	}
1630	if (last)
1631		start_async_work(last, rmw_rbio_work);
1632	kfree(plug);
1633}
1634
1635/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1636static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1637{
1638	const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1639	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1640	const u64 full_stripe_start = rbio->bioc->raid_map[0];
1641	const u32 orig_len = orig_bio->bi_iter.bi_size;
1642	const u32 sectorsize = fs_info->sectorsize;
1643	u64 cur_logical;
1644
1645	ASSERT(orig_logical >= full_stripe_start &&
1646	       orig_logical + orig_len <= full_stripe_start +
1647	       rbio->nr_data * BTRFS_STRIPE_LEN);
 
1648
1649	bio_list_add(&rbio->bio_list, orig_bio);
1650	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1651
1652	/* Update the dbitmap. */
1653	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1654	     cur_logical += sectorsize) {
1655		int bit = ((u32)(cur_logical - full_stripe_start) >>
1656			   fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1657
1658		set_bit(bit, &rbio->dbitmap);
1659	}
1660}
1661
1662/*
1663 * our main entry point for writes from the rest of the FS.
1664 */
1665void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1666{
1667	struct btrfs_fs_info *fs_info = bioc->fs_info;
1668	struct btrfs_raid_bio *rbio;
1669	struct btrfs_plug_cb *plug = NULL;
1670	struct blk_plug_cb *cb;
1671	int ret = 0;
1672
1673	rbio = alloc_rbio(fs_info, bioc);
1674	if (IS_ERR(rbio)) {
1675		ret = PTR_ERR(rbio);
1676		goto fail;
 
1677	}
1678	rbio->operation = BTRFS_RBIO_WRITE;
1679	rbio_add_bio(rbio, bio);
1680
1681	/*
1682	 * Don't plug on full rbios, just get them out the door
1683	 * as quickly as we can
1684	 */
1685	if (rbio_is_full(rbio))
1686		goto queue_rbio;
1687
1688	cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1689	if (cb) {
1690		plug = container_of(cb, struct btrfs_plug_cb, cb);
1691		if (!plug->info) {
1692			plug->info = fs_info;
1693			INIT_LIST_HEAD(&plug->rbio_list);
 
1694		}
1695		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1696		return;
1697	}
1698queue_rbio:
1699	/*
1700	 * Either we don't have any existing plug, or we're doing a full stripe,
1701	 * can queue the rmw work now.
1702	 */
1703	start_async_work(rbio, rmw_rbio_work);
1704
1705	return;
1706
1707fail:
1708	bio->bi_status = errno_to_blk_status(ret);
1709	bio_endio(bio);
1710}
1711
1712static int verify_one_sector(struct btrfs_raid_bio *rbio,
1713			     int stripe_nr, int sector_nr)
1714{
1715	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1716	struct sector_ptr *sector;
1717	u8 csum_buf[BTRFS_CSUM_SIZE];
1718	u8 *csum_expected;
1719	int ret;
1720
1721	if (!rbio->csum_bitmap || !rbio->csum_buf)
1722		return 0;
1723
1724	/* No way to verify P/Q as they are not covered by data csum. */
1725	if (stripe_nr >= rbio->nr_data)
1726		return 0;
1727	/*
1728	 * If we're rebuilding a read, we have to use pages from the
1729	 * bio list if possible.
1730	 */
1731	if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1732	     rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1733		sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1734	} else {
1735		sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1736	}
1737
1738	ASSERT(sector->page);
1739
1740	csum_expected = rbio->csum_buf +
1741			(stripe_nr * rbio->stripe_nsectors + sector_nr) *
1742			fs_info->csum_size;
1743	ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1744				      csum_buf, csum_expected);
1745	return ret;
1746}
1747
1748/*
1749 * Recover a vertical stripe specified by @sector_nr.
1750 * @*pointers are the pre-allocated pointers by the caller, so we don't
1751 * need to allocate/free the pointers again and again.
1752 */
1753static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1754			    void **pointers, void **unmap_array)
1755{
1756	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1757	struct sector_ptr *sector;
1758	const u32 sectorsize = fs_info->sectorsize;
1759	int found_errors;
1760	int faila;
1761	int failb;
1762	int stripe_nr;
1763	int ret = 0;
1764
1765	/*
1766	 * Now we just use bitmap to mark the horizontal stripes in
1767	 * which we have data when doing parity scrub.
1768	 */
1769	if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1770	    !test_bit(sector_nr, &rbio->dbitmap))
1771		return 0;
1772
1773	found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1774						 &failb);
1775	/*
1776	 * No errors in the veritical stripe, skip it.  Can happen for recovery
1777	 * which only part of a stripe failed csum check.
1778	 */
1779	if (!found_errors)
1780		return 0;
1781
1782	if (found_errors > rbio->bioc->max_errors)
1783		return -EIO;
1784
1785	/*
1786	 * Setup our array of pointers with sectors from each stripe
1787	 *
1788	 * NOTE: store a duplicate array of pointers to preserve the
1789	 * pointer order.
1790	 */
1791	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1792		/*
1793		 * If we're rebuilding a read, we have to use pages from the
1794		 * bio list if possible.
1795		 */
1796		if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1797		     rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
1798			sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1799		} else {
1800			sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1801		}
1802		ASSERT(sector->page);
1803		pointers[stripe_nr] = kmap_local_page(sector->page) +
1804				   sector->pgoff;
1805		unmap_array[stripe_nr] = pointers[stripe_nr];
1806	}
1807
1808	/* All raid6 handling here */
1809	if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1810		/* Single failure, rebuild from parity raid5 style */
1811		if (failb < 0) {
1812			if (faila == rbio->nr_data)
1813				/*
1814				 * Just the P stripe has failed, without
1815				 * a bad data or Q stripe.
1816				 * We have nothing to do, just skip the
1817				 * recovery for this stripe.
1818				 */
1819				goto cleanup;
1820			/*
1821			 * a single failure in raid6 is rebuilt
1822			 * in the pstripe code below
1823			 */
1824			goto pstripe;
1825		}
1826
1827		/*
1828		 * If the q stripe is failed, do a pstripe reconstruction from
1829		 * the xors.
1830		 * If both the q stripe and the P stripe are failed, we're
1831		 * here due to a crc mismatch and we can't give them the
1832		 * data they want.
1833		 */
1834		if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) {
1835			if (rbio->bioc->raid_map[faila] ==
1836			    RAID5_P_STRIPE)
1837				/*
1838				 * Only P and Q are corrupted.
1839				 * We only care about data stripes recovery,
1840				 * can skip this vertical stripe.
1841				 */
1842				goto cleanup;
1843			/*
1844			 * Otherwise we have one bad data stripe and
1845			 * a good P stripe.  raid5!
1846			 */
1847			goto pstripe;
1848		}
1849
1850		if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) {
1851			raid6_datap_recov(rbio->real_stripes, sectorsize,
1852					  faila, pointers);
1853		} else {
1854			raid6_2data_recov(rbio->real_stripes, sectorsize,
1855					  faila, failb, pointers);
1856		}
1857	} else {
1858		void *p;
1859
1860		/* Rebuild from P stripe here (raid5 or raid6). */
1861		ASSERT(failb == -1);
1862pstripe:
1863		/* Copy parity block into failed block to start with */
1864		memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1865
1866		/* Rearrange the pointer array */
1867		p = pointers[faila];
1868		for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1869		     stripe_nr++)
1870			pointers[stripe_nr] = pointers[stripe_nr + 1];
1871		pointers[rbio->nr_data - 1] = p;
1872
1873		/* Xor in the rest */
1874		run_xor(pointers, rbio->nr_data - 1, sectorsize);
1875
1876	}
1877
1878	/*
1879	 * No matter if this is a RMW or recovery, we should have all
1880	 * failed sectors repaired in the vertical stripe, thus they are now
1881	 * uptodate.
1882	 * Especially if we determine to cache the rbio, we need to
1883	 * have at least all data sectors uptodate.
1884	 *
1885	 * If possible, also check if the repaired sector matches its data
1886	 * checksum.
1887	 */
1888	if (faila >= 0) {
1889		ret = verify_one_sector(rbio, faila, sector_nr);
1890		if (ret < 0)
1891			goto cleanup;
1892
1893		sector = rbio_stripe_sector(rbio, faila, sector_nr);
1894		sector->uptodate = 1;
1895	}
1896	if (failb >= 0) {
1897		ret = verify_one_sector(rbio, failb, sector_nr);
1898		if (ret < 0)
1899			goto cleanup;
1900
1901		sector = rbio_stripe_sector(rbio, failb, sector_nr);
1902		sector->uptodate = 1;
1903	}
1904
1905cleanup:
1906	for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1907		kunmap_local(unmap_array[stripe_nr]);
1908	return ret;
1909}
1910
1911static int recover_sectors(struct btrfs_raid_bio *rbio)
1912{
1913	void **pointers = NULL;
1914	void **unmap_array = NULL;
1915	int sectornr;
1916	int ret = 0;
1917
1918	/*
1919	 * @pointers array stores the pointer for each sector.
1920	 *
1921	 * @unmap_array stores copy of pointers that does not get reordered
1922	 * during reconstruction so that kunmap_local works.
1923	 */
1924	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1925	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1926	if (!pointers || !unmap_array) {
1927		ret = -ENOMEM;
1928		goto out;
1929	}
1930
1931	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1932	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1933		spin_lock_irq(&rbio->bio_list_lock);
1934		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1935		spin_unlock_irq(&rbio->bio_list_lock);
1936	}
1937
1938	index_rbio_pages(rbio);
1939
1940	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1941		ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1942		if (ret < 0)
1943			break;
1944	}
1945
1946out:
1947	kfree(pointers);
1948	kfree(unmap_array);
1949	return ret;
1950}
1951
1952static int recover_assemble_read_bios(struct btrfs_raid_bio *rbio,
1953				      struct bio_list *bio_list)
1954{
1955	struct bio *bio;
1956	int total_sector_nr;
1957	int ret = 0;
1958
1959	ASSERT(bio_list_size(bio_list) == 0);
 
 
 
 
 
 
 
 
 
 
 
 
1960	/*
1961	 * Read everything that hasn't failed. However this time we will
1962	 * not trust any cached sector.
1963	 * As we may read out some stale data but higher layer is not reading
1964	 * that stale part.
1965	 *
1966	 * So here we always re-read everything in recovery path.
1967	 */
1968	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1969	     total_sector_nr++) {
1970		int stripe = total_sector_nr / rbio->stripe_nsectors;
1971		int sectornr = total_sector_nr % rbio->stripe_nsectors;
1972		struct sector_ptr *sector;
1973
1974		/*
1975		 * Skip the range which has error.  It can be a range which is
1976		 * marked error (for csum mismatch), or it can be a missing
1977		 * device.
1978		 */
1979		if (!rbio->bioc->stripes[stripe].dev->bdev ||
1980		    test_bit(total_sector_nr, rbio->error_bitmap)) {
1981			/*
1982			 * Also set the error bit for missing device, which
1983			 * may not yet have its error bit set.
1984			 */
1985			set_bit(total_sector_nr, rbio->error_bitmap);
1986			continue;
1987		}
1988
1989		sector = rbio_stripe_sector(rbio, stripe, sectornr);
1990		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1991					 sectornr, REQ_OP_READ);
1992		if (ret < 0)
1993			goto error;
 
 
1994	}
1995	return 0;
1996error:
1997	while ((bio = bio_list_pop(bio_list)))
1998		bio_put(bio);
1999
2000	return -EIO;
2001}
2002
2003static int recover_rbio(struct btrfs_raid_bio *rbio)
2004{
2005	struct bio_list bio_list;
2006	struct bio *bio;
2007	int ret;
2008
2009	/*
2010	 * Either we're doing recover for a read failure or degraded write,
2011	 * caller should have set error bitmap correctly.
2012	 */
2013	ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
2014	bio_list_init(&bio_list);
2015
2016	/* For recovery, we need to read all sectors including P/Q. */
2017	ret = alloc_rbio_pages(rbio);
2018	if (ret < 0)
2019		goto out;
2020
2021	index_rbio_pages(rbio);
2022
2023	ret = recover_assemble_read_bios(rbio, &bio_list);
2024	if (ret < 0)
2025		goto out;
2026
2027	submit_read_bios(rbio, &bio_list);
2028	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2029
 
2030	ret = recover_sectors(rbio);
2031
2032out:
2033	while ((bio = bio_list_pop(&bio_list)))
2034		bio_put(bio);
2035
2036	return ret;
2037}
2038
2039static void recover_rbio_work(struct work_struct *work)
2040{
2041	struct btrfs_raid_bio *rbio;
2042	int ret;
2043
2044	rbio = container_of(work, struct btrfs_raid_bio, work);
2045
2046	ret = lock_stripe_add(rbio);
2047	if (ret == 0) {
2048		ret = recover_rbio(rbio);
2049		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2050	}
2051}
2052
2053static void recover_rbio_work_locked(struct work_struct *work)
2054{
2055	struct btrfs_raid_bio *rbio;
2056	int ret;
2057
2058	rbio = container_of(work, struct btrfs_raid_bio, work);
2059
2060	ret = recover_rbio(rbio);
2061	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2062}
2063
2064static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
2065{
2066	bool found = false;
2067	int sector_nr;
2068
2069	/*
2070	 * This is for RAID6 extra recovery tries, thus mirror number should
2071	 * be large than 2.
2072	 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2073	 * RAID5 methods.
2074	 */
2075	ASSERT(mirror_num > 2);
2076	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2077		int found_errors;
2078		int faila;
2079		int failb;
2080
2081		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2082							 &faila, &failb);
2083		/* This vertical stripe doesn't have errors. */
2084		if (!found_errors)
2085			continue;
2086
2087		/*
2088		 * If we found errors, there should be only one error marked
2089		 * by previous set_rbio_range_error().
2090		 */
2091		ASSERT(found_errors == 1);
2092		found = true;
2093
2094		/* Now select another stripe to mark as error. */
2095		failb = rbio->real_stripes - (mirror_num - 1);
2096		if (failb <= faila)
2097			failb--;
2098
2099		/* Set the extra bit in error bitmap. */
2100		if (failb >= 0)
2101			set_bit(failb * rbio->stripe_nsectors + sector_nr,
2102				rbio->error_bitmap);
2103	}
2104
2105	/* We should found at least one vertical stripe with error.*/
2106	ASSERT(found);
2107}
2108
2109/*
2110 * the main entry point for reads from the higher layers.  This
2111 * is really only called when the normal read path had a failure,
2112 * so we assume the bio they send down corresponds to a failed part
2113 * of the drive.
2114 */
2115void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2116			   int mirror_num)
2117{
2118	struct btrfs_fs_info *fs_info = bioc->fs_info;
2119	struct btrfs_raid_bio *rbio;
2120
2121	rbio = alloc_rbio(fs_info, bioc);
2122	if (IS_ERR(rbio)) {
2123		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2124		bio_endio(bio);
2125		return;
2126	}
2127
2128	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2129	rbio_add_bio(rbio, bio);
2130
2131	set_rbio_range_error(rbio, bio);
2132
2133	/*
2134	 * Loop retry:
2135	 * for 'mirror == 2', reconstruct from all other stripes.
2136	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2137	 */
2138	if (mirror_num > 2)
2139		set_rbio_raid6_extra_error(rbio, mirror_num);
2140
2141	start_async_work(rbio, recover_rbio_work);
2142}
2143
2144static void fill_data_csums(struct btrfs_raid_bio *rbio)
2145{
2146	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2147	struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2148						       rbio->bioc->raid_map[0]);
2149	const u64 start = rbio->bioc->raid_map[0];
2150	const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2151			fs_info->sectorsize_bits;
2152	int ret;
2153
2154	/* The rbio should not have its csum buffer initialized. */
2155	ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2156
2157	/*
2158	 * Skip the csum search if:
2159	 *
2160	 * - The rbio doesn't belong to data block groups
2161	 *   Then we are doing IO for tree blocks, no need to search csums.
2162	 *
2163	 * - The rbio belongs to mixed block groups
2164	 *   This is to avoid deadlock, as we're already holding the full
2165	 *   stripe lock, if we trigger a metadata read, and it needs to do
2166	 *   raid56 recovery, we will deadlock.
2167	 */
2168	if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2169	    rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2170		return;
2171
2172	rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2173				 fs_info->csum_size, GFP_NOFS);
2174	rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2175					  GFP_NOFS);
2176	if (!rbio->csum_buf || !rbio->csum_bitmap) {
2177		ret = -ENOMEM;
2178		goto error;
2179	}
2180
2181	ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
2182					rbio->csum_buf, rbio->csum_bitmap);
2183	if (ret < 0)
2184		goto error;
2185	if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2186		goto no_csum;
2187	return;
2188
2189error:
2190	/*
2191	 * We failed to allocate memory or grab the csum, but it's not fatal,
2192	 * we can still continue.  But better to warn users that RMW is no
2193	 * longer safe for this particular sub-stripe write.
2194	 */
2195	btrfs_warn_rl(fs_info,
2196"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2197			rbio->bioc->raid_map[0], ret);
2198no_csum:
2199	kfree(rbio->csum_buf);
2200	bitmap_free(rbio->csum_bitmap);
2201	rbio->csum_buf = NULL;
2202	rbio->csum_bitmap = NULL;
2203}
2204
2205static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2206{
2207	struct bio_list bio_list;
2208	struct bio *bio;
2209	int ret;
2210
2211	bio_list_init(&bio_list);
2212
2213	/*
2214	 * Fill the data csums we need for data verification.  We need to fill
2215	 * the csum_bitmap/csum_buf first, as our endio function will try to
2216	 * verify the data sectors.
2217	 */
2218	fill_data_csums(rbio);
2219
2220	ret = rmw_assemble_read_bios(rbio, &bio_list);
2221	if (ret < 0)
2222		goto out;
 
 
 
 
 
 
 
2223
2224	submit_read_bios(rbio, &bio_list);
2225	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
 
 
 
 
 
 
2226
2227	/*
2228	 * We may or may not have any corrupted sectors (including missing dev
2229	 * and csum mismatch), just let recover_sectors() to handle them all.
2230	 */
2231	ret = recover_sectors(rbio);
2232	return ret;
2233out:
2234	while ((bio = bio_list_pop(&bio_list)))
2235		bio_put(bio);
2236
2237	return ret;
2238}
2239
2240static void raid_wait_write_end_io(struct bio *bio)
2241{
2242	struct btrfs_raid_bio *rbio = bio->bi_private;
2243	blk_status_t err = bio->bi_status;
2244
2245	if (err)
2246		rbio_update_error_bitmap(rbio, bio);
2247	bio_put(bio);
2248	if (atomic_dec_and_test(&rbio->stripes_pending))
2249		wake_up(&rbio->io_wait);
2250}
2251
2252static void submit_write_bios(struct btrfs_raid_bio *rbio,
2253			      struct bio_list *bio_list)
2254{
2255	struct bio *bio;
2256
2257	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2258	while ((bio = bio_list_pop(bio_list))) {
2259		bio->bi_end_io = raid_wait_write_end_io;
2260
2261		if (trace_raid56_write_stripe_enabled()) {
2262			struct raid56_bio_trace_info trace_info = { 0 };
2263
2264			bio_get_trace_info(rbio, bio, &trace_info);
2265			trace_raid56_write_stripe(rbio, bio, &trace_info);
2266		}
2267		submit_bio(bio);
2268	}
2269}
2270
2271/*
2272 * To determine if we need to read any sector from the disk.
2273 * Should only be utilized in RMW path, to skip cached rbio.
2274 */
2275static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2276{
2277	int i;
2278
2279	for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2280		struct sector_ptr *sector = &rbio->stripe_sectors[i];
2281
2282		/*
2283		 * We have a sector which doesn't have page nor uptodate,
2284		 * thus this rbio can not be cached one, as cached one must
2285		 * have all its data sectors present and uptodate.
2286		 */
2287		if (!sector->page || !sector->uptodate)
2288			return true;
2289	}
2290	return false;
2291}
2292
2293static int rmw_rbio(struct btrfs_raid_bio *rbio)
2294{
2295	struct bio_list bio_list;
2296	int sectornr;
2297	int ret = 0;
2298
2299	/*
2300	 * Allocate the pages for parity first, as P/Q pages will always be
2301	 * needed for both full-stripe and sub-stripe writes.
2302	 */
2303	ret = alloc_rbio_parity_pages(rbio);
2304	if (ret < 0)
2305		return ret;
2306
2307	/*
2308	 * Either full stripe write, or we have every data sector already
2309	 * cached, can go to write path immediately.
2310	 */
2311	if (rbio_is_full(rbio) || !need_read_stripe_sectors(rbio))
2312		goto write;
2313
2314	/*
2315	 * Now we're doing sub-stripe write, also need all data stripes to do
2316	 * the full RMW.
2317	 */
2318	ret = alloc_rbio_data_pages(rbio);
2319	if (ret < 0)
2320		return ret;
2321
2322	index_rbio_pages(rbio);
2323
2324	ret = rmw_read_wait_recover(rbio);
2325	if (ret < 0)
2326		return ret;
 
2327
2328write:
2329	/*
2330	 * At this stage we're not allowed to add any new bios to the
2331	 * bio list any more, anyone else that wants to change this stripe
2332	 * needs to do their own rmw.
2333	 */
2334	spin_lock_irq(&rbio->bio_list_lock);
2335	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2336	spin_unlock_irq(&rbio->bio_list_lock);
2337
2338	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2339
2340	index_rbio_pages(rbio);
2341
2342	/*
2343	 * We don't cache full rbios because we're assuming
2344	 * the higher layers are unlikely to use this area of
2345	 * the disk again soon.  If they do use it again,
2346	 * hopefully they will send another full bio.
2347	 */
2348	if (!rbio_is_full(rbio))
2349		cache_rbio_pages(rbio);
2350	else
2351		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2352
2353	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2354		generate_pq_vertical(rbio, sectornr);
2355
2356	bio_list_init(&bio_list);
2357	ret = rmw_assemble_write_bios(rbio, &bio_list);
2358	if (ret < 0)
2359		return ret;
2360
2361	/* We should have at least one bio assembled. */
2362	ASSERT(bio_list_size(&bio_list));
2363	submit_write_bios(rbio, &bio_list);
2364	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2365
2366	/* We may have more errors than our tolerance during the read. */
2367	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2368		int found_errors;
2369
2370		found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2371		if (found_errors > rbio->bioc->max_errors) {
2372			ret = -EIO;
2373			break;
2374		}
2375	}
2376	return ret;
 
2377}
2378
2379static void rmw_rbio_work(struct work_struct *work)
2380{
2381	struct btrfs_raid_bio *rbio;
2382	int ret;
2383
2384	rbio = container_of(work, struct btrfs_raid_bio, work);
2385
2386	ret = lock_stripe_add(rbio);
2387	if (ret == 0) {
2388		ret = rmw_rbio(rbio);
2389		rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2390	}
2391}
2392
2393static void rmw_rbio_work_locked(struct work_struct *work)
2394{
2395	struct btrfs_raid_bio *rbio;
2396	int ret;
2397
2398	rbio = container_of(work, struct btrfs_raid_bio, work);
2399
2400	ret = rmw_rbio(rbio);
2401	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2402}
2403
2404/*
2405 * The following code is used to scrub/replace the parity stripe
2406 *
2407 * Caller must have already increased bio_counter for getting @bioc.
2408 *
2409 * Note: We need make sure all the pages that add into the scrub/replace
2410 * raid bio are correct and not be changed during the scrub/replace. That
2411 * is those pages just hold metadata or file data with checksum.
2412 */
2413
2414struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2415				struct btrfs_io_context *bioc,
2416				struct btrfs_device *scrub_dev,
2417				unsigned long *dbitmap, int stripe_nsectors)
2418{
2419	struct btrfs_fs_info *fs_info = bioc->fs_info;
2420	struct btrfs_raid_bio *rbio;
2421	int i;
2422
2423	rbio = alloc_rbio(fs_info, bioc);
2424	if (IS_ERR(rbio))
2425		return NULL;
2426	bio_list_add(&rbio->bio_list, bio);
2427	/*
2428	 * This is a special bio which is used to hold the completion handler
2429	 * and make the scrub rbio is similar to the other types
2430	 */
2431	ASSERT(!bio->bi_iter.bi_size);
2432	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2433
2434	/*
2435	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2436	 * to the end position, so this search can start from the first parity
2437	 * stripe.
2438	 */
2439	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2440		if (bioc->stripes[i].dev == scrub_dev) {
2441			rbio->scrubp = i;
2442			break;
2443		}
2444	}
2445	ASSERT(i < rbio->real_stripes);
2446
2447	bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2448	return rbio;
2449}
2450
2451/* Used for both parity scrub and missing. */
2452void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2453			    unsigned int pgoff, u64 logical)
2454{
2455	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2456	int stripe_offset;
2457	int index;
2458
2459	ASSERT(logical >= rbio->bioc->raid_map[0]);
2460	ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
2461				       BTRFS_STRIPE_LEN * rbio->nr_data);
2462	stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
2463	index = stripe_offset / sectorsize;
2464	rbio->bio_sectors[index].page = page;
2465	rbio->bio_sectors[index].pgoff = pgoff;
2466}
2467
2468/*
2469 * We just scrub the parity that we have correct data on the same horizontal,
2470 * so we needn't allocate all pages for all the stripes.
2471 */
2472static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2473{
2474	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2475	int total_sector_nr;
2476
2477	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2478	     total_sector_nr++) {
2479		struct page *page;
2480		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2481		int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2482
2483		if (!test_bit(sectornr, &rbio->dbitmap))
2484			continue;
2485		if (rbio->stripe_pages[index])
2486			continue;
2487		page = alloc_page(GFP_NOFS);
2488		if (!page)
2489			return -ENOMEM;
2490		rbio->stripe_pages[index] = page;
2491	}
2492	index_stripe_sectors(rbio);
2493	return 0;
2494}
2495
2496static int finish_parity_scrub(struct btrfs_raid_bio *rbio, int need_check)
2497{
2498	struct btrfs_io_context *bioc = rbio->bioc;
2499	const u32 sectorsize = bioc->fs_info->sectorsize;
2500	void **pointers = rbio->finish_pointers;
2501	unsigned long *pbitmap = &rbio->finish_pbitmap;
2502	int nr_data = rbio->nr_data;
2503	int stripe;
2504	int sectornr;
2505	bool has_qstripe;
2506	struct sector_ptr p_sector = { 0 };
2507	struct sector_ptr q_sector = { 0 };
2508	struct bio_list bio_list;
2509	struct bio *bio;
2510	int is_replace = 0;
2511	int ret;
2512
2513	bio_list_init(&bio_list);
2514
2515	if (rbio->real_stripes - rbio->nr_data == 1)
2516		has_qstripe = false;
2517	else if (rbio->real_stripes - rbio->nr_data == 2)
2518		has_qstripe = true;
2519	else
2520		BUG();
2521
2522	if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) {
 
 
 
 
2523		is_replace = 1;
2524		bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2525	}
2526
2527	/*
2528	 * Because the higher layers(scrubber) are unlikely to
2529	 * use this area of the disk again soon, so don't cache
2530	 * it.
2531	 */
2532	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2533
2534	if (!need_check)
2535		goto writeback;
2536
2537	p_sector.page = alloc_page(GFP_NOFS);
2538	if (!p_sector.page)
2539		return -ENOMEM;
2540	p_sector.pgoff = 0;
2541	p_sector.uptodate = 1;
2542
2543	if (has_qstripe) {
2544		/* RAID6, allocate and map temp space for the Q stripe */
2545		q_sector.page = alloc_page(GFP_NOFS);
2546		if (!q_sector.page) {
2547			__free_page(p_sector.page);
2548			p_sector.page = NULL;
2549			return -ENOMEM;
2550		}
2551		q_sector.pgoff = 0;
2552		q_sector.uptodate = 1;
2553		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2554	}
2555
2556	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2557
2558	/* Map the parity stripe just once */
2559	pointers[nr_data] = kmap_local_page(p_sector.page);
2560
2561	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2562		struct sector_ptr *sector;
2563		void *parity;
2564
2565		/* first collect one page from each data stripe */
2566		for (stripe = 0; stripe < nr_data; stripe++) {
2567			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2568			pointers[stripe] = kmap_local_page(sector->page) +
2569					   sector->pgoff;
2570		}
2571
2572		if (has_qstripe) {
 
2573			/* RAID6, call the library function to fill in our P/Q */
2574			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2575						pointers);
2576		} else {
2577			/* raid5 */
2578			memcpy(pointers[nr_data], pointers[0], sectorsize);
2579			run_xor(pointers + 1, nr_data - 1, sectorsize);
2580		}
2581
2582		/* Check scrubbing parity and repair it */
2583		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2584		parity = kmap_local_page(sector->page) + sector->pgoff;
2585		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2586			memcpy(parity, pointers[rbio->scrubp], sectorsize);
2587		else
2588			/* Parity is right, needn't writeback */
2589			bitmap_clear(&rbio->dbitmap, sectornr, 1);
2590		kunmap_local(parity);
2591
2592		for (stripe = nr_data - 1; stripe >= 0; stripe--)
2593			kunmap_local(pointers[stripe]);
2594	}
2595
2596	kunmap_local(pointers[nr_data]);
2597	__free_page(p_sector.page);
2598	p_sector.page = NULL;
2599	if (q_sector.page) {
2600		kunmap_local(pointers[rbio->real_stripes - 1]);
2601		__free_page(q_sector.page);
2602		q_sector.page = NULL;
2603	}
2604
2605writeback:
2606	/*
2607	 * time to start writing.  Make bios for everything from the
2608	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2609	 * everything else.
2610	 */
2611	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2612		struct sector_ptr *sector;
2613
2614		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2615		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2616					 sectornr, REQ_OP_WRITE);
2617		if (ret)
2618			goto cleanup;
2619	}
2620
2621	if (!is_replace)
2622		goto submit_write;
2623
 
 
 
 
 
2624	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2625		struct sector_ptr *sector;
2626
2627		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2628		ret = rbio_add_io_sector(rbio, &bio_list, sector,
2629				       bioc->tgtdev_map[rbio->scrubp],
2630				       sectornr, REQ_OP_WRITE);
2631		if (ret)
2632			goto cleanup;
2633	}
2634
2635submit_write:
2636	submit_write_bios(rbio, &bio_list);
2637	return 0;
2638
2639cleanup:
2640	while ((bio = bio_list_pop(&bio_list)))
2641		bio_put(bio);
2642	return ret;
2643}
2644
2645static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2646{
2647	if (stripe >= 0 && stripe < rbio->nr_data)
2648		return 1;
2649	return 0;
2650}
2651
2652static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2653{
2654	void **pointers = NULL;
2655	void **unmap_array = NULL;
2656	int sector_nr;
2657	int ret = 0;
2658
2659	/*
2660	 * @pointers array stores the pointer for each sector.
2661	 *
2662	 * @unmap_array stores copy of pointers that does not get reordered
2663	 * during reconstruction so that kunmap_local works.
2664	 */
2665	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2666	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2667	if (!pointers || !unmap_array) {
2668		ret = -ENOMEM;
2669		goto out;
2670	}
2671
2672	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2673		int dfail = 0, failp = -1;
2674		int faila;
2675		int failb;
2676		int found_errors;
2677
2678		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2679							 &faila, &failb);
2680		if (found_errors > rbio->bioc->max_errors) {
2681			ret = -EIO;
2682			goto out;
2683		}
2684		if (found_errors == 0)
2685			continue;
2686
2687		/* We should have at least one error here. */
2688		ASSERT(faila >= 0 || failb >= 0);
2689
2690		if (is_data_stripe(rbio, faila))
2691			dfail++;
2692		else if (is_parity_stripe(faila))
2693			failp = faila;
2694
2695		if (is_data_stripe(rbio, failb))
2696			dfail++;
2697		else if (is_parity_stripe(failb))
2698			failp = failb;
2699		/*
2700		 * Because we can not use a scrubbing parity to repair the
2701		 * data, so the capability of the repair is declined.  (In the
2702		 * case of RAID5, we can not repair anything.)
2703		 */
2704		if (dfail > rbio->bioc->max_errors - 1) {
2705			ret = -EIO;
2706			goto out;
2707		}
2708		/*
2709		 * If all data is good, only parity is correctly, just repair
2710		 * the parity, no need to recover data stripes.
2711		 */
2712		if (dfail == 0)
2713			continue;
2714
2715		/*
2716		 * Here means we got one corrupted data stripe and one
2717		 * corrupted parity on RAID6, if the corrupted parity is
2718		 * scrubbing parity, luckily, use the other one to repair the
2719		 * data, or we can not repair the data stripe.
2720		 */
2721		if (failp != rbio->scrubp) {
2722			ret = -EIO;
2723			goto out;
2724		}
2725
2726		ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2727		if (ret < 0)
2728			goto out;
2729	}
2730out:
2731	kfree(pointers);
2732	kfree(unmap_array);
2733	return ret;
2734}
2735
2736static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio,
2737				    struct bio_list *bio_list)
2738{
2739	struct bio *bio;
2740	int total_sector_nr;
2741	int ret = 0;
2742
2743	ASSERT(bio_list_size(bio_list) == 0);
2744
2745	/* Build a list of bios to read all the missing parts. */
2746	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2747	     total_sector_nr++) {
2748		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2749		int stripe = total_sector_nr / rbio->stripe_nsectors;
2750		struct sector_ptr *sector;
2751
2752		/* No data in the vertical stripe, no need to read. */
2753		if (!test_bit(sectornr, &rbio->dbitmap))
2754			continue;
2755
2756		/*
2757		 * We want to find all the sectors missing from the rbio and
2758		 * read them from the disk. If sector_in_rbio() finds a sector
2759		 * in the bio list we don't need to read it off the stripe.
2760		 */
2761		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2762		if (sector)
2763			continue;
2764
2765		sector = rbio_stripe_sector(rbio, stripe, sectornr);
2766		/*
2767		 * The bio cache may have handed us an uptodate sector.  If so,
2768		 * use it.
2769		 */
2770		if (sector->uptodate)
2771			continue;
2772
2773		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
2774					 sectornr, REQ_OP_READ);
2775		if (ret)
2776			goto error;
 
 
2777	}
 
 
2778	return 0;
2779error:
2780	while ((bio = bio_list_pop(bio_list)))
2781		bio_put(bio);
2782	return ret;
2783}
2784
2785static int scrub_rbio(struct btrfs_raid_bio *rbio)
2786{
2787	bool need_check = false;
2788	struct bio_list bio_list;
2789	int sector_nr;
2790	int ret;
2791	struct bio *bio;
2792
2793	bio_list_init(&bio_list);
2794
2795	ret = alloc_rbio_essential_pages(rbio);
2796	if (ret)
2797		goto cleanup;
2798
2799	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2800
2801	ret = scrub_assemble_read_bios(rbio, &bio_list);
2802	if (ret < 0)
2803		goto cleanup;
2804
2805	submit_read_bios(rbio, &bio_list);
2806	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2807
2808	/* We may have some failures, recover the failed sectors first. */
2809	ret = recover_scrub_rbio(rbio);
2810	if (ret < 0)
2811		goto cleanup;
2812
2813	/*
2814	 * We have every sector properly prepared. Can finish the scrub
2815	 * and writeback the good content.
2816	 */
2817	ret = finish_parity_scrub(rbio, need_check);
2818	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2819	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2820		int found_errors;
2821
2822		found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2823		if (found_errors > rbio->bioc->max_errors) {
2824			ret = -EIO;
2825			break;
2826		}
2827	}
2828	return ret;
2829
2830cleanup:
2831	while ((bio = bio_list_pop(&bio_list)))
2832		bio_put(bio);
2833
2834	return ret;
2835}
2836
2837static void scrub_rbio_work_locked(struct work_struct *work)
2838{
2839	struct btrfs_raid_bio *rbio;
2840	int ret;
2841
2842	rbio = container_of(work, struct btrfs_raid_bio, work);
2843	ret = scrub_rbio(rbio);
2844	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2845}
2846
2847void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2848{
2849	if (!lock_stripe_add(rbio))
2850		start_async_work(rbio, scrub_rbio_work_locked);
2851}
2852
2853/* The following code is used for dev replace of a missing RAID 5/6 device. */
2854
2855struct btrfs_raid_bio *
2856raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc)
 
 
 
 
 
2857{
2858	struct btrfs_fs_info *fs_info = bioc->fs_info;
2859	struct btrfs_raid_bio *rbio;
2860
2861	rbio = alloc_rbio(fs_info, bioc);
2862	if (IS_ERR(rbio))
2863		return NULL;
2864
2865	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2866	bio_list_add(&rbio->bio_list, bio);
2867	/*
2868	 * This is a special bio which is used to hold the completion handler
2869	 * and make the scrub rbio is similar to the other types
 
 
 
 
2870	 */
2871	ASSERT(!bio->bi_iter.bi_size);
2872
2873	set_rbio_range_error(rbio, bio);
2874
2875	return rbio;
2876}
2877
2878void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2879{
2880	start_async_work(rbio, recover_rbio_work);
 
 
 
 
 
 
 
 
 
 
 
2881}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2012 Fusion-io  All rights reserved.
   4 * Copyright (C) 2012 Intel Corp. All rights reserved.
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/bio.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/raid/pq.h>
  12#include <linux/hash.h>
  13#include <linux/list_sort.h>
  14#include <linux/raid/xor.h>
  15#include <linux/mm.h>
  16#include "messages.h"
 
  17#include "ctree.h"
  18#include "disk-io.h"
  19#include "volumes.h"
  20#include "raid56.h"
  21#include "async-thread.h"
  22#include "file-item.h"
  23#include "btrfs_inode.h"
  24
  25/* set when additional merges to this rbio are not allowed */
  26#define RBIO_RMW_LOCKED_BIT	1
  27
  28/*
  29 * set when this rbio is sitting in the hash, but it is just a cache
  30 * of past RMW
  31 */
  32#define RBIO_CACHE_BIT		2
  33
  34/*
  35 * set when it is safe to trust the stripe_pages for caching
  36 */
  37#define RBIO_CACHE_READY_BIT	3
  38
  39#define RBIO_CACHE_SIZE 1024
  40
  41#define BTRFS_STRIPE_HASH_TABLE_BITS				11
  42
  43static void dump_bioc(const struct btrfs_fs_info *fs_info, const struct btrfs_io_context *bioc)
  44{
  45	if (unlikely(!bioc)) {
  46		btrfs_crit(fs_info, "bioc=NULL");
  47		return;
  48	}
  49	btrfs_crit(fs_info,
  50"bioc logical=%llu full_stripe=%llu size=%llu map_type=0x%llx mirror=%u replace_nr_stripes=%u replace_stripe_src=%d num_stripes=%u",
  51		bioc->logical, bioc->full_stripe_logical, bioc->size,
  52		bioc->map_type, bioc->mirror_num, bioc->replace_nr_stripes,
  53		bioc->replace_stripe_src, bioc->num_stripes);
  54	for (int i = 0; i < bioc->num_stripes; i++) {
  55		btrfs_crit(fs_info, "    nr=%d devid=%llu physical=%llu",
  56			   i, bioc->stripes[i].dev->devid,
  57			   bioc->stripes[i].physical);
  58	}
  59}
  60
  61static void btrfs_dump_rbio(const struct btrfs_fs_info *fs_info,
  62			    const struct btrfs_raid_bio *rbio)
  63{
  64	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
  65		return;
  66
  67	dump_bioc(fs_info, rbio->bioc);
  68	btrfs_crit(fs_info,
  69"rbio flags=0x%lx nr_sectors=%u nr_data=%u real_stripes=%u stripe_nsectors=%u scrubp=%u dbitmap=0x%lx",
  70		rbio->flags, rbio->nr_sectors, rbio->nr_data,
  71		rbio->real_stripes, rbio->stripe_nsectors,
  72		rbio->scrubp, rbio->dbitmap);
  73}
  74
  75#define ASSERT_RBIO(expr, rbio)						\
  76({									\
  77	if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) {	\
  78		const struct btrfs_fs_info *__fs_info = (rbio)->bioc ?	\
  79					(rbio)->bioc->fs_info : NULL;	\
  80									\
  81		btrfs_dump_rbio(__fs_info, (rbio));			\
  82	}								\
  83	ASSERT((expr));							\
  84})
  85
  86#define ASSERT_RBIO_STRIPE(expr, rbio, stripe_nr)			\
  87({									\
  88	if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) {	\
  89		const struct btrfs_fs_info *__fs_info = (rbio)->bioc ?	\
  90					(rbio)->bioc->fs_info : NULL;	\
  91									\
  92		btrfs_dump_rbio(__fs_info, (rbio));			\
  93		btrfs_crit(__fs_info, "stripe_nr=%d", (stripe_nr));	\
  94	}								\
  95	ASSERT((expr));							\
  96})
  97
  98#define ASSERT_RBIO_SECTOR(expr, rbio, sector_nr)			\
  99({									\
 100	if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) {	\
 101		const struct btrfs_fs_info *__fs_info = (rbio)->bioc ?	\
 102					(rbio)->bioc->fs_info : NULL;	\
 103									\
 104		btrfs_dump_rbio(__fs_info, (rbio));			\
 105		btrfs_crit(__fs_info, "sector_nr=%d", (sector_nr));	\
 106	}								\
 107	ASSERT((expr));							\
 108})
 109
 110#define ASSERT_RBIO_LOGICAL(expr, rbio, logical)			\
 111({									\
 112	if (IS_ENABLED(CONFIG_BTRFS_ASSERT) && unlikely(!(expr))) {	\
 113		const struct btrfs_fs_info *__fs_info = (rbio)->bioc ?	\
 114					(rbio)->bioc->fs_info : NULL;	\
 115									\
 116		btrfs_dump_rbio(__fs_info, (rbio));			\
 117		btrfs_crit(__fs_info, "logical=%llu", (logical));		\
 118	}								\
 119	ASSERT((expr));							\
 120})
 121
 122/* Used by the raid56 code to lock stripes for read/modify/write */
 123struct btrfs_stripe_hash {
 124	struct list_head hash_list;
 125	spinlock_t lock;
 126};
 127
 128/* Used by the raid56 code to lock stripes for read/modify/write */
 129struct btrfs_stripe_hash_table {
 130	struct list_head stripe_cache;
 131	spinlock_t cache_lock;
 132	int cache_size;
 133	struct btrfs_stripe_hash table[];
 134};
 135
 136/*
 137 * A bvec like structure to present a sector inside a page.
 138 *
 139 * Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
 140 */
 141struct sector_ptr {
 142	struct page *page;
 143	unsigned int pgoff:24;
 144	unsigned int uptodate:8;
 145};
 146
 147static void rmw_rbio_work(struct work_struct *work);
 148static void rmw_rbio_work_locked(struct work_struct *work);
 149static void index_rbio_pages(struct btrfs_raid_bio *rbio);
 150static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
 151
 152static int finish_parity_scrub(struct btrfs_raid_bio *rbio);
 153static void scrub_rbio_work_locked(struct work_struct *work);
 154
 155static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
 156{
 157	bitmap_free(rbio->error_bitmap);
 158	kfree(rbio->stripe_pages);
 159	kfree(rbio->bio_sectors);
 160	kfree(rbio->stripe_sectors);
 161	kfree(rbio->finish_pointers);
 162}
 163
 164static void free_raid_bio(struct btrfs_raid_bio *rbio)
 165{
 166	int i;
 167
 168	if (!refcount_dec_and_test(&rbio->refs))
 169		return;
 170
 171	WARN_ON(!list_empty(&rbio->stripe_cache));
 172	WARN_ON(!list_empty(&rbio->hash_list));
 173	WARN_ON(!bio_list_empty(&rbio->bio_list));
 174
 175	for (i = 0; i < rbio->nr_pages; i++) {
 176		if (rbio->stripe_pages[i]) {
 177			__free_page(rbio->stripe_pages[i]);
 178			rbio->stripe_pages[i] = NULL;
 179		}
 180	}
 181
 182	btrfs_put_bioc(rbio->bioc);
 183	free_raid_bio_pointers(rbio);
 184	kfree(rbio);
 185}
 186
 187static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
 188{
 189	INIT_WORK(&rbio->work, work_func);
 190	queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
 191}
 192
 193/*
 194 * the stripe hash table is used for locking, and to collect
 195 * bios in hopes of making a full stripe
 196 */
 197int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
 198{
 199	struct btrfs_stripe_hash_table *table;
 200	struct btrfs_stripe_hash_table *x;
 201	struct btrfs_stripe_hash *cur;
 202	struct btrfs_stripe_hash *h;
 203	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
 204	int i;
 205
 206	if (info->stripe_hash_table)
 207		return 0;
 208
 209	/*
 210	 * The table is large, starting with order 4 and can go as high as
 211	 * order 7 in case lock debugging is turned on.
 212	 *
 213	 * Try harder to allocate and fallback to vmalloc to lower the chance
 214	 * of a failing mount.
 215	 */
 216	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
 217	if (!table)
 218		return -ENOMEM;
 219
 220	spin_lock_init(&table->cache_lock);
 221	INIT_LIST_HEAD(&table->stripe_cache);
 222
 223	h = table->table;
 224
 225	for (i = 0; i < num_entries; i++) {
 226		cur = h + i;
 227		INIT_LIST_HEAD(&cur->hash_list);
 228		spin_lock_init(&cur->lock);
 229	}
 230
 231	x = cmpxchg(&info->stripe_hash_table, NULL, table);
 232	kvfree(x);
 233	return 0;
 234}
 235
 236/*
 237 * caching an rbio means to copy anything from the
 238 * bio_sectors array into the stripe_pages array.  We
 239 * use the page uptodate bit in the stripe cache array
 240 * to indicate if it has valid data
 241 *
 242 * once the caching is done, we set the cache ready
 243 * bit.
 244 */
 245static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
 246{
 247	int i;
 248	int ret;
 249
 250	ret = alloc_rbio_pages(rbio);
 251	if (ret)
 252		return;
 253
 254	for (i = 0; i < rbio->nr_sectors; i++) {
 255		/* Some range not covered by bio (partial write), skip it */
 256		if (!rbio->bio_sectors[i].page) {
 257			/*
 258			 * Even if the sector is not covered by bio, if it is
 259			 * a data sector it should still be uptodate as it is
 260			 * read from disk.
 261			 */
 262			if (i < rbio->nr_data * rbio->stripe_nsectors)
 263				ASSERT(rbio->stripe_sectors[i].uptodate);
 264			continue;
 265		}
 266
 267		ASSERT(rbio->stripe_sectors[i].page);
 268		memcpy_page(rbio->stripe_sectors[i].page,
 269			    rbio->stripe_sectors[i].pgoff,
 270			    rbio->bio_sectors[i].page,
 271			    rbio->bio_sectors[i].pgoff,
 272			    rbio->bioc->fs_info->sectorsize);
 273		rbio->stripe_sectors[i].uptodate = 1;
 274	}
 275	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 276}
 277
 278/*
 279 * we hash on the first logical address of the stripe
 280 */
 281static int rbio_bucket(struct btrfs_raid_bio *rbio)
 282{
 283	u64 num = rbio->bioc->full_stripe_logical;
 284
 285	/*
 286	 * we shift down quite a bit.  We're using byte
 287	 * addressing, and most of the lower bits are zeros.
 288	 * This tends to upset hash_64, and it consistently
 289	 * returns just one or two different values.
 290	 *
 291	 * shifting off the lower bits fixes things.
 292	 */
 293	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
 294}
 295
 296static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
 297				       unsigned int page_nr)
 298{
 299	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
 300	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
 301	int i;
 302
 303	ASSERT(page_nr < rbio->nr_pages);
 304
 305	for (i = sectors_per_page * page_nr;
 306	     i < sectors_per_page * page_nr + sectors_per_page;
 307	     i++) {
 308		if (!rbio->stripe_sectors[i].uptodate)
 309			return false;
 310	}
 311	return true;
 312}
 313
 314/*
 315 * Update the stripe_sectors[] array to use correct page and pgoff
 316 *
 317 * Should be called every time any page pointer in stripes_pages[] got modified.
 318 */
 319static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
 320{
 321	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
 322	u32 offset;
 323	int i;
 324
 325	for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
 326		int page_index = offset >> PAGE_SHIFT;
 327
 328		ASSERT(page_index < rbio->nr_pages);
 329		rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
 330		rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
 331	}
 332}
 333
 334static void steal_rbio_page(struct btrfs_raid_bio *src,
 335			    struct btrfs_raid_bio *dest, int page_nr)
 336{
 337	const u32 sectorsize = src->bioc->fs_info->sectorsize;
 338	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
 339	int i;
 340
 341	if (dest->stripe_pages[page_nr])
 342		__free_page(dest->stripe_pages[page_nr]);
 343	dest->stripe_pages[page_nr] = src->stripe_pages[page_nr];
 344	src->stripe_pages[page_nr] = NULL;
 345
 346	/* Also update the sector->uptodate bits. */
 347	for (i = sectors_per_page * page_nr;
 348	     i < sectors_per_page * page_nr + sectors_per_page; i++)
 349		dest->stripe_sectors[i].uptodate = true;
 350}
 351
 352static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
 353{
 354	const int sector_nr = (page_nr << PAGE_SHIFT) >>
 355			      rbio->bioc->fs_info->sectorsize_bits;
 356
 357	/*
 358	 * We have ensured PAGE_SIZE is aligned with sectorsize, thus
 359	 * we won't have a page which is half data half parity.
 360	 *
 361	 * Thus if the first sector of the page belongs to data stripes, then
 362	 * the full page belongs to data stripes.
 363	 */
 364	return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
 365}
 366
 367/*
 368 * Stealing an rbio means taking all the uptodate pages from the stripe array
 369 * in the source rbio and putting them into the destination rbio.
 370 *
 371 * This will also update the involved stripe_sectors[] which are referring to
 372 * the old pages.
 373 */
 374static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
 375{
 376	int i;
 377
 378	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
 379		return;
 380
 381	for (i = 0; i < dest->nr_pages; i++) {
 382		struct page *p = src->stripe_pages[i];
 383
 384		/*
 385		 * We don't need to steal P/Q pages as they will always be
 386		 * regenerated for RMW or full write anyway.
 387		 */
 388		if (!is_data_stripe_page(src, i))
 389			continue;
 390
 391		/*
 392		 * If @src already has RBIO_CACHE_READY_BIT, it should have
 393		 * all data stripe pages present and uptodate.
 394		 */
 395		ASSERT(p);
 396		ASSERT(full_page_sectors_uptodate(src, i));
 397		steal_rbio_page(src, dest, i);
 398	}
 399	index_stripe_sectors(dest);
 400	index_stripe_sectors(src);
 401}
 402
 403/*
 404 * merging means we take the bio_list from the victim and
 405 * splice it into the destination.  The victim should
 406 * be discarded afterwards.
 407 *
 408 * must be called with dest->rbio_list_lock held
 409 */
 410static void merge_rbio(struct btrfs_raid_bio *dest,
 411		       struct btrfs_raid_bio *victim)
 412{
 413	bio_list_merge_init(&dest->bio_list, &victim->bio_list);
 414	dest->bio_list_bytes += victim->bio_list_bytes;
 415	/* Also inherit the bitmaps from @victim. */
 416	bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
 417		  dest->stripe_nsectors);
 
 418}
 419
 420/*
 421 * used to prune items that are in the cache.  The caller
 422 * must hold the hash table lock.
 423 */
 424static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 425{
 426	int bucket = rbio_bucket(rbio);
 427	struct btrfs_stripe_hash_table *table;
 428	struct btrfs_stripe_hash *h;
 429	int freeit = 0;
 430
 431	/*
 432	 * check the bit again under the hash table lock.
 433	 */
 434	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 435		return;
 436
 437	table = rbio->bioc->fs_info->stripe_hash_table;
 438	h = table->table + bucket;
 439
 440	/* hold the lock for the bucket because we may be
 441	 * removing it from the hash table
 442	 */
 443	spin_lock(&h->lock);
 444
 445	/*
 446	 * hold the lock for the bio list because we need
 447	 * to make sure the bio list is empty
 448	 */
 449	spin_lock(&rbio->bio_list_lock);
 450
 451	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 452		list_del_init(&rbio->stripe_cache);
 453		table->cache_size -= 1;
 454		freeit = 1;
 455
 456		/* if the bio list isn't empty, this rbio is
 457		 * still involved in an IO.  We take it out
 458		 * of the cache list, and drop the ref that
 459		 * was held for the list.
 460		 *
 461		 * If the bio_list was empty, we also remove
 462		 * the rbio from the hash_table, and drop
 463		 * the corresponding ref
 464		 */
 465		if (bio_list_empty(&rbio->bio_list)) {
 466			if (!list_empty(&rbio->hash_list)) {
 467				list_del_init(&rbio->hash_list);
 468				refcount_dec(&rbio->refs);
 469				BUG_ON(!list_empty(&rbio->plug_list));
 470			}
 471		}
 472	}
 473
 474	spin_unlock(&rbio->bio_list_lock);
 475	spin_unlock(&h->lock);
 476
 477	if (freeit)
 478		free_raid_bio(rbio);
 479}
 480
 481/*
 482 * prune a given rbio from the cache
 483 */
 484static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
 485{
 486	struct btrfs_stripe_hash_table *table;
 
 487
 488	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
 489		return;
 490
 491	table = rbio->bioc->fs_info->stripe_hash_table;
 492
 493	spin_lock(&table->cache_lock);
 494	__remove_rbio_from_cache(rbio);
 495	spin_unlock(&table->cache_lock);
 496}
 497
 498/*
 499 * remove everything in the cache
 500 */
 501static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
 502{
 503	struct btrfs_stripe_hash_table *table;
 
 504	struct btrfs_raid_bio *rbio;
 505
 506	table = info->stripe_hash_table;
 507
 508	spin_lock(&table->cache_lock);
 509	while (!list_empty(&table->stripe_cache)) {
 510		rbio = list_entry(table->stripe_cache.next,
 511				  struct btrfs_raid_bio,
 512				  stripe_cache);
 513		__remove_rbio_from_cache(rbio);
 514	}
 515	spin_unlock(&table->cache_lock);
 516}
 517
 518/*
 519 * remove all cached entries and free the hash table
 520 * used by unmount
 521 */
 522void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
 523{
 524	if (!info->stripe_hash_table)
 525		return;
 526	btrfs_clear_rbio_cache(info);
 527	kvfree(info->stripe_hash_table);
 528	info->stripe_hash_table = NULL;
 529}
 530
 531/*
 532 * insert an rbio into the stripe cache.  It
 533 * must have already been prepared by calling
 534 * cache_rbio_pages
 535 *
 536 * If this rbio was already cached, it gets
 537 * moved to the front of the lru.
 538 *
 539 * If the size of the rbio cache is too big, we
 540 * prune an item.
 541 */
 542static void cache_rbio(struct btrfs_raid_bio *rbio)
 543{
 544	struct btrfs_stripe_hash_table *table;
 
 545
 546	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
 547		return;
 548
 549	table = rbio->bioc->fs_info->stripe_hash_table;
 550
 551	spin_lock(&table->cache_lock);
 552	spin_lock(&rbio->bio_list_lock);
 553
 554	/* bump our ref if we were not in the list before */
 555	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
 556		refcount_inc(&rbio->refs);
 557
 558	if (!list_empty(&rbio->stripe_cache)){
 559		list_move(&rbio->stripe_cache, &table->stripe_cache);
 560	} else {
 561		list_add(&rbio->stripe_cache, &table->stripe_cache);
 562		table->cache_size += 1;
 563	}
 564
 565	spin_unlock(&rbio->bio_list_lock);
 566
 567	if (table->cache_size > RBIO_CACHE_SIZE) {
 568		struct btrfs_raid_bio *found;
 569
 570		found = list_entry(table->stripe_cache.prev,
 571				  struct btrfs_raid_bio,
 572				  stripe_cache);
 573
 574		if (found != rbio)
 575			__remove_rbio_from_cache(found);
 576	}
 577
 578	spin_unlock(&table->cache_lock);
 579}
 580
 581/*
 582 * helper function to run the xor_blocks api.  It is only
 583 * able to do MAX_XOR_BLOCKS at a time, so we need to
 584 * loop through.
 585 */
 586static void run_xor(void **pages, int src_cnt, ssize_t len)
 587{
 588	int src_off = 0;
 589	int xor_src_cnt = 0;
 590	void *dest = pages[src_cnt];
 591
 592	while(src_cnt > 0) {
 593		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
 594		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
 595
 596		src_cnt -= xor_src_cnt;
 597		src_off += xor_src_cnt;
 598	}
 599}
 600
 601/*
 602 * Returns true if the bio list inside this rbio covers an entire stripe (no
 603 * rmw required).
 604 */
 605static int rbio_is_full(struct btrfs_raid_bio *rbio)
 606{
 
 607	unsigned long size = rbio->bio_list_bytes;
 608	int ret = 1;
 609
 610	spin_lock(&rbio->bio_list_lock);
 611	if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
 612		ret = 0;
 613	BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
 614	spin_unlock(&rbio->bio_list_lock);
 615
 616	return ret;
 617}
 618
 619/*
 620 * returns 1 if it is safe to merge two rbios together.
 621 * The merging is safe if the two rbios correspond to
 622 * the same stripe and if they are both going in the same
 623 * direction (read vs write), and if neither one is
 624 * locked for final IO
 625 *
 626 * The caller is responsible for locking such that
 627 * rmw_locked is safe to test
 628 */
 629static int rbio_can_merge(struct btrfs_raid_bio *last,
 630			  struct btrfs_raid_bio *cur)
 631{
 632	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
 633	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
 634		return 0;
 635
 636	/*
 637	 * we can't merge with cached rbios, since the
 638	 * idea is that when we merge the destination
 639	 * rbio is going to run our IO for us.  We can
 640	 * steal from cached rbios though, other functions
 641	 * handle that.
 642	 */
 643	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
 644	    test_bit(RBIO_CACHE_BIT, &cur->flags))
 645		return 0;
 646
 647	if (last->bioc->full_stripe_logical != cur->bioc->full_stripe_logical)
 648		return 0;
 649
 650	/* we can't merge with different operations */
 651	if (last->operation != cur->operation)
 652		return 0;
 653	/*
 654	 * We've need read the full stripe from the drive.
 655	 * check and repair the parity and write the new results.
 656	 *
 657	 * We're not allowed to add any new bios to the
 658	 * bio list here, anyone else that wants to
 659	 * change this stripe needs to do their own rmw.
 660	 */
 661	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
 662		return 0;
 663
 664	if (last->operation == BTRFS_RBIO_READ_REBUILD)
 
 665		return 0;
 666
 667	return 1;
 668}
 669
 670static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
 671					     unsigned int stripe_nr,
 672					     unsigned int sector_nr)
 673{
 674	ASSERT_RBIO_STRIPE(stripe_nr < rbio->real_stripes, rbio, stripe_nr);
 675	ASSERT_RBIO_SECTOR(sector_nr < rbio->stripe_nsectors, rbio, sector_nr);
 676
 677	return stripe_nr * rbio->stripe_nsectors + sector_nr;
 678}
 679
 680/* Return a sector from rbio->stripe_sectors, not from the bio list */
 681static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
 682					     unsigned int stripe_nr,
 683					     unsigned int sector_nr)
 684{
 685	return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
 686							      sector_nr)];
 687}
 688
 689/* Grab a sector inside P stripe */
 690static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
 691					      unsigned int sector_nr)
 692{
 693	return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
 694}
 695
 696/* Grab a sector inside Q stripe, return NULL if not RAID6 */
 697static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
 698					      unsigned int sector_nr)
 699{
 700	if (rbio->nr_data + 1 == rbio->real_stripes)
 701		return NULL;
 702	return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
 703}
 704
 705/*
 706 * The first stripe in the table for a logical address
 707 * has the lock.  rbios are added in one of three ways:
 708 *
 709 * 1) Nobody has the stripe locked yet.  The rbio is given
 710 * the lock and 0 is returned.  The caller must start the IO
 711 * themselves.
 712 *
 713 * 2) Someone has the stripe locked, but we're able to merge
 714 * with the lock owner.  The rbio is freed and the IO will
 715 * start automatically along with the existing rbio.  1 is returned.
 716 *
 717 * 3) Someone has the stripe locked, but we're not able to merge.
 718 * The rbio is added to the lock owner's plug list, or merged into
 719 * an rbio already on the plug list.  When the lock owner unlocks,
 720 * the next rbio on the list is run and the IO is started automatically.
 721 * 1 is returned
 722 *
 723 * If we return 0, the caller still owns the rbio and must continue with
 724 * IO submission.  If we return 1, the caller must assume the rbio has
 725 * already been freed.
 726 */
 727static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
 728{
 729	struct btrfs_stripe_hash *h;
 730	struct btrfs_raid_bio *cur;
 731	struct btrfs_raid_bio *pending;
 
 732	struct btrfs_raid_bio *freeit = NULL;
 733	struct btrfs_raid_bio *cache_drop = NULL;
 734	int ret = 0;
 735
 736	h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
 737
 738	spin_lock(&h->lock);
 739	list_for_each_entry(cur, &h->hash_list, hash_list) {
 740		if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
 741			continue;
 742
 743		spin_lock(&cur->bio_list_lock);
 744
 745		/* Can we steal this cached rbio's pages? */
 746		if (bio_list_empty(&cur->bio_list) &&
 747		    list_empty(&cur->plug_list) &&
 748		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
 749		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
 750			list_del_init(&cur->hash_list);
 751			refcount_dec(&cur->refs);
 752
 753			steal_rbio(cur, rbio);
 754			cache_drop = cur;
 755			spin_unlock(&cur->bio_list_lock);
 756
 757			goto lockit;
 758		}
 759
 760		/* Can we merge into the lock owner? */
 761		if (rbio_can_merge(cur, rbio)) {
 762			merge_rbio(cur, rbio);
 763			spin_unlock(&cur->bio_list_lock);
 764			freeit = rbio;
 765			ret = 1;
 766			goto out;
 767		}
 768
 769
 770		/*
 771		 * We couldn't merge with the running rbio, see if we can merge
 772		 * with the pending ones.  We don't have to check for rmw_locked
 773		 * because there is no way they are inside finish_rmw right now
 774		 */
 775		list_for_each_entry(pending, &cur->plug_list, plug_list) {
 776			if (rbio_can_merge(pending, rbio)) {
 777				merge_rbio(pending, rbio);
 778				spin_unlock(&cur->bio_list_lock);
 779				freeit = rbio;
 780				ret = 1;
 781				goto out;
 782			}
 783		}
 784
 785		/*
 786		 * No merging, put us on the tail of the plug list, our rbio
 787		 * will be started with the currently running rbio unlocks
 788		 */
 789		list_add_tail(&rbio->plug_list, &cur->plug_list);
 790		spin_unlock(&cur->bio_list_lock);
 791		ret = 1;
 792		goto out;
 793	}
 794lockit:
 795	refcount_inc(&rbio->refs);
 796	list_add(&rbio->hash_list, &h->hash_list);
 797out:
 798	spin_unlock(&h->lock);
 799	if (cache_drop)
 800		remove_rbio_from_cache(cache_drop);
 801	if (freeit)
 802		free_raid_bio(freeit);
 803	return ret;
 804}
 805
 806static void recover_rbio_work_locked(struct work_struct *work);
 807
 808/*
 809 * called as rmw or parity rebuild is completed.  If the plug list has more
 810 * rbios waiting for this stripe, the next one on the list will be started
 811 */
 812static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
 813{
 814	int bucket;
 815	struct btrfs_stripe_hash *h;
 
 816	int keep_cache = 0;
 817
 818	bucket = rbio_bucket(rbio);
 819	h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
 820
 821	if (list_empty(&rbio->plug_list))
 822		cache_rbio(rbio);
 823
 824	spin_lock(&h->lock);
 825	spin_lock(&rbio->bio_list_lock);
 826
 827	if (!list_empty(&rbio->hash_list)) {
 828		/*
 829		 * if we're still cached and there is no other IO
 830		 * to perform, just leave this rbio here for others
 831		 * to steal from later
 832		 */
 833		if (list_empty(&rbio->plug_list) &&
 834		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
 835			keep_cache = 1;
 836			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
 837			BUG_ON(!bio_list_empty(&rbio->bio_list));
 838			goto done;
 839		}
 840
 841		list_del_init(&rbio->hash_list);
 842		refcount_dec(&rbio->refs);
 843
 844		/*
 845		 * we use the plug list to hold all the rbios
 846		 * waiting for the chance to lock this stripe.
 847		 * hand the lock over to one of them.
 848		 */
 849		if (!list_empty(&rbio->plug_list)) {
 850			struct btrfs_raid_bio *next;
 851			struct list_head *head = rbio->plug_list.next;
 852
 853			next = list_entry(head, struct btrfs_raid_bio,
 854					  plug_list);
 855
 856			list_del_init(&rbio->plug_list);
 857
 858			list_add(&next->hash_list, &h->hash_list);
 859			refcount_inc(&next->refs);
 860			spin_unlock(&rbio->bio_list_lock);
 861			spin_unlock(&h->lock);
 862
 863			if (next->operation == BTRFS_RBIO_READ_REBUILD) {
 
 
 
 864				start_async_work(next, recover_rbio_work_locked);
 865			} else if (next->operation == BTRFS_RBIO_WRITE) {
 866				steal_rbio(rbio, next);
 867				start_async_work(next, rmw_rbio_work_locked);
 868			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
 869				steal_rbio(rbio, next);
 870				start_async_work(next, scrub_rbio_work_locked);
 871			}
 872
 873			goto done_nolock;
 874		}
 875	}
 876done:
 877	spin_unlock(&rbio->bio_list_lock);
 878	spin_unlock(&h->lock);
 879
 880done_nolock:
 881	if (!keep_cache)
 882		remove_rbio_from_cache(rbio);
 883}
 884
 885static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
 886{
 887	struct bio *next;
 888
 889	while (cur) {
 890		next = cur->bi_next;
 891		cur->bi_next = NULL;
 892		cur->bi_status = err;
 893		bio_endio(cur);
 894		cur = next;
 895	}
 896}
 897
 898/*
 899 * this frees the rbio and runs through all the bios in the
 900 * bio_list and calls end_io on them
 901 */
 902static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 903{
 904	struct bio *cur = bio_list_get(&rbio->bio_list);
 905	struct bio *extra;
 906
 907	kfree(rbio->csum_buf);
 908	bitmap_free(rbio->csum_bitmap);
 909	rbio->csum_buf = NULL;
 910	rbio->csum_bitmap = NULL;
 911
 912	/*
 913	 * Clear the data bitmap, as the rbio may be cached for later usage.
 914	 * do this before before unlock_stripe() so there will be no new bio
 915	 * for this bio.
 916	 */
 917	bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
 918
 919	/*
 920	 * At this moment, rbio->bio_list is empty, however since rbio does not
 921	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
 922	 * hash list, rbio may be merged with others so that rbio->bio_list
 923	 * becomes non-empty.
 924	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
 925	 * more and we can call bio_endio() on all queued bios.
 926	 */
 927	unlock_stripe(rbio);
 928	extra = bio_list_get(&rbio->bio_list);
 929	free_raid_bio(rbio);
 930
 931	rbio_endio_bio_list(cur, err);
 932	if (extra)
 933		rbio_endio_bio_list(extra, err);
 934}
 935
 936/*
 937 * Get a sector pointer specified by its @stripe_nr and @sector_nr.
 938 *
 939 * @rbio:               The raid bio
 940 * @stripe_nr:          Stripe number, valid range [0, real_stripe)
 941 * @sector_nr:		Sector number inside the stripe,
 942 *			valid range [0, stripe_nsectors)
 943 * @bio_list_only:      Whether to use sectors inside the bio list only.
 944 *
 945 * The read/modify/write code wants to reuse the original bio page as much
 946 * as possible, and only use stripe_sectors as fallback.
 947 */
 948static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
 949					 int stripe_nr, int sector_nr,
 950					 bool bio_list_only)
 951{
 952	struct sector_ptr *sector;
 953	int index;
 954
 955	ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->real_stripes,
 956			   rbio, stripe_nr);
 957	ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
 958			   rbio, sector_nr);
 959
 960	index = stripe_nr * rbio->stripe_nsectors + sector_nr;
 961	ASSERT(index >= 0 && index < rbio->nr_sectors);
 962
 963	spin_lock(&rbio->bio_list_lock);
 964	sector = &rbio->bio_sectors[index];
 965	if (sector->page || bio_list_only) {
 966		/* Don't return sector without a valid page pointer */
 967		if (!sector->page)
 968			sector = NULL;
 969		spin_unlock(&rbio->bio_list_lock);
 970		return sector;
 971	}
 972	spin_unlock(&rbio->bio_list_lock);
 973
 974	return &rbio->stripe_sectors[index];
 975}
 976
 977/*
 978 * allocation and initial setup for the btrfs_raid_bio.  Not
 979 * this does not allocate any pages for rbio->pages.
 980 */
 981static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
 982					 struct btrfs_io_context *bioc)
 983{
 984	const unsigned int real_stripes = bioc->num_stripes - bioc->replace_nr_stripes;
 985	const unsigned int stripe_npages = BTRFS_STRIPE_LEN >> PAGE_SHIFT;
 986	const unsigned int num_pages = stripe_npages * real_stripes;
 987	const unsigned int stripe_nsectors =
 988		BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
 989	const unsigned int num_sectors = stripe_nsectors * real_stripes;
 990	struct btrfs_raid_bio *rbio;
 991
 992	/* PAGE_SIZE must also be aligned to sectorsize for subpage support */
 993	ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize));
 994	/*
 995	 * Our current stripe len should be fixed to 64k thus stripe_nsectors
 996	 * (at most 16) should be no larger than BITS_PER_LONG.
 997	 */
 998	ASSERT(stripe_nsectors <= BITS_PER_LONG);
 999
1000	/*
1001	 * Real stripes must be between 2 (2 disks RAID5, aka RAID1) and 256
1002	 * (limited by u8).
1003	 */
1004	ASSERT(real_stripes >= 2);
1005	ASSERT(real_stripes <= U8_MAX);
1006
1007	rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
1008	if (!rbio)
1009		return ERR_PTR(-ENOMEM);
1010	rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
1011				     GFP_NOFS);
1012	rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
1013				    GFP_NOFS);
1014	rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
1015				       GFP_NOFS);
1016	rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
1017	rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
1018
1019	if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
1020	    !rbio->finish_pointers || !rbio->error_bitmap) {
1021		free_raid_bio_pointers(rbio);
1022		kfree(rbio);
1023		return ERR_PTR(-ENOMEM);
1024	}
1025
1026	bio_list_init(&rbio->bio_list);
1027	init_waitqueue_head(&rbio->io_wait);
1028	INIT_LIST_HEAD(&rbio->plug_list);
1029	spin_lock_init(&rbio->bio_list_lock);
1030	INIT_LIST_HEAD(&rbio->stripe_cache);
1031	INIT_LIST_HEAD(&rbio->hash_list);
1032	btrfs_get_bioc(bioc);
1033	rbio->bioc = bioc;
1034	rbio->nr_pages = num_pages;
1035	rbio->nr_sectors = num_sectors;
1036	rbio->real_stripes = real_stripes;
1037	rbio->stripe_npages = stripe_npages;
1038	rbio->stripe_nsectors = stripe_nsectors;
1039	refcount_set(&rbio->refs, 1);
1040	atomic_set(&rbio->stripes_pending, 0);
1041
1042	ASSERT(btrfs_nr_parity_stripes(bioc->map_type));
1043	rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
1044	ASSERT(rbio->nr_data > 0);
1045
1046	return rbio;
1047}
1048
1049/* allocate pages for all the stripes in the bio, including parity */
1050static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1051{
1052	int ret;
1053
1054	ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages, false);
1055	if (ret < 0)
1056		return ret;
1057	/* Mapping all sectors */
1058	index_stripe_sectors(rbio);
1059	return 0;
1060}
1061
1062/* only allocate pages for p/q stripes */
1063static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1064{
1065	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1066	int ret;
1067
1068	ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
1069				     rbio->stripe_pages + data_pages, false);
1070	if (ret < 0)
1071		return ret;
1072
1073	index_stripe_sectors(rbio);
1074	return 0;
1075}
1076
1077/*
1078 * Return the total number of errors found in the vertical stripe of @sector_nr.
1079 *
1080 * @faila and @failb will also be updated to the first and second stripe
1081 * number of the errors.
1082 */
1083static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1084				     int *faila, int *failb)
1085{
1086	int stripe_nr;
1087	int found_errors = 0;
1088
1089	if (faila || failb) {
1090		/*
1091		 * Both @faila and @failb should be valid pointers if any of
1092		 * them is specified.
1093		 */
1094		ASSERT(faila && failb);
1095		*faila = -1;
1096		*failb = -1;
1097	}
1098
1099	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1100		int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1101
1102		if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1103			found_errors++;
1104			if (faila) {
1105				/* Update faila and failb. */
1106				if (*faila < 0)
1107					*faila = stripe_nr;
1108				else if (*failb < 0)
1109					*failb = stripe_nr;
1110			}
1111		}
1112	}
1113	return found_errors;
1114}
1115
1116/*
1117 * Add a single sector @sector into our list of bios for IO.
1118 *
1119 * Return 0 if everything went well.
1120 * Return <0 for error.
1121 */
1122static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1123			      struct bio_list *bio_list,
1124			      struct sector_ptr *sector,
1125			      unsigned int stripe_nr,
1126			      unsigned int sector_nr,
1127			      enum req_op op)
1128{
1129	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1130	struct bio *last = bio_list->tail;
1131	int ret;
1132	struct bio *bio;
1133	struct btrfs_io_stripe *stripe;
1134	u64 disk_start;
1135
1136	/*
1137	 * Note: here stripe_nr has taken device replace into consideration,
1138	 * thus it can be larger than rbio->real_stripe.
1139	 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1140	 */
1141	ASSERT_RBIO_STRIPE(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes,
1142			   rbio, stripe_nr);
1143	ASSERT_RBIO_SECTOR(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors,
1144			   rbio, sector_nr);
1145	ASSERT(sector->page);
1146
1147	stripe = &rbio->bioc->stripes[stripe_nr];
1148	disk_start = stripe->physical + sector_nr * sectorsize;
1149
1150	/* if the device is missing, just fail this stripe */
1151	if (!stripe->dev->bdev) {
1152		int found_errors;
1153
1154		set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1155			rbio->error_bitmap);
1156
1157		/* Check if we have reached tolerance early. */
1158		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1159							 NULL, NULL);
1160		if (found_errors > rbio->bioc->max_errors)
1161			return -EIO;
1162		return 0;
1163	}
1164
1165	/* see if we can add this page onto our existing bio */
1166	if (last) {
1167		u64 last_end = last->bi_iter.bi_sector << SECTOR_SHIFT;
1168		last_end += last->bi_iter.bi_size;
1169
1170		/*
1171		 * we can't merge these if they are from different
1172		 * devices or if they are not contiguous
1173		 */
1174		if (last_end == disk_start && !last->bi_status &&
1175		    last->bi_bdev == stripe->dev->bdev) {
1176			ret = bio_add_page(last, sector->page, sectorsize,
1177					   sector->pgoff);
1178			if (ret == sectorsize)
1179				return 0;
1180		}
1181	}
1182
1183	/* put a new bio on the list */
1184	bio = bio_alloc(stripe->dev->bdev,
1185			max(BTRFS_STRIPE_LEN >> PAGE_SHIFT, 1),
1186			op, GFP_NOFS);
1187	bio->bi_iter.bi_sector = disk_start >> SECTOR_SHIFT;
1188	bio->bi_private = rbio;
1189
1190	__bio_add_page(bio, sector->page, sectorsize, sector->pgoff);
1191	bio_list_add(bio_list, bio);
1192	return 0;
1193}
1194
1195static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1196{
1197	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1198	struct bio_vec bvec;
1199	struct bvec_iter iter;
1200	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1201		     rbio->bioc->full_stripe_logical;
1202
1203	bio_for_each_segment(bvec, bio, iter) {
1204		u32 bvec_offset;
1205
1206		for (bvec_offset = 0; bvec_offset < bvec.bv_len;
1207		     bvec_offset += sectorsize, offset += sectorsize) {
1208			int index = offset / sectorsize;
1209			struct sector_ptr *sector = &rbio->bio_sectors[index];
1210
1211			sector->page = bvec.bv_page;
1212			sector->pgoff = bvec.bv_offset + bvec_offset;
1213			ASSERT(sector->pgoff < PAGE_SIZE);
1214		}
1215	}
1216}
1217
1218/*
1219 * helper function to walk our bio list and populate the bio_pages array with
1220 * the result.  This seems expensive, but it is faster than constantly
1221 * searching through the bio list as we setup the IO in finish_rmw or stripe
1222 * reconstruction.
1223 *
1224 * This must be called before you trust the answers from page_in_rbio
1225 */
1226static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1227{
1228	struct bio *bio;
1229
1230	spin_lock(&rbio->bio_list_lock);
1231	bio_list_for_each(bio, &rbio->bio_list)
1232		index_one_bio(rbio, bio);
1233
1234	spin_unlock(&rbio->bio_list_lock);
1235}
1236
1237static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1238			       struct raid56_bio_trace_info *trace_info)
1239{
1240	const struct btrfs_io_context *bioc = rbio->bioc;
1241	int i;
1242
1243	ASSERT(bioc);
1244
1245	/* We rely on bio->bi_bdev to find the stripe number. */
1246	if (!bio->bi_bdev)
1247		goto not_found;
1248
1249	for (i = 0; i < bioc->num_stripes; i++) {
1250		if (bio->bi_bdev != bioc->stripes[i].dev->bdev)
1251			continue;
1252		trace_info->stripe_nr = i;
1253		trace_info->devid = bioc->stripes[i].dev->devid;
1254		trace_info->offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1255				     bioc->stripes[i].physical;
1256		return;
1257	}
1258
1259not_found:
1260	trace_info->devid = -1;
1261	trace_info->offset = -1;
1262	trace_info->stripe_nr = -1;
1263}
1264
1265static inline void bio_list_put(struct bio_list *bio_list)
1266{
1267	struct bio *bio;
1268
1269	while ((bio = bio_list_pop(bio_list)))
1270		bio_put(bio);
1271}
1272
1273static void assert_rbio(struct btrfs_raid_bio *rbio)
1274{
1275	if (!IS_ENABLED(CONFIG_BTRFS_ASSERT))
1276		return;
1277
1278	/*
1279	 * At least two stripes (2 disks RAID5), and since real_stripes is U8,
1280	 * we won't go beyond 256 disks anyway.
1281	 */
1282	ASSERT_RBIO(rbio->real_stripes >= 2, rbio);
1283	ASSERT_RBIO(rbio->nr_data > 0, rbio);
1284
1285	/*
1286	 * This is another check to make sure nr data stripes is smaller
1287	 * than total stripes.
1288	 */
1289	ASSERT_RBIO(rbio->nr_data < rbio->real_stripes, rbio);
1290}
1291
1292/* Generate PQ for one vertical stripe. */
1293static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1294{
1295	void **pointers = rbio->finish_pointers;
1296	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1297	struct sector_ptr *sector;
1298	int stripe;
1299	const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1300
1301	/* First collect one sector from each data stripe */
1302	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1303		sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1304		pointers[stripe] = kmap_local_page(sector->page) +
1305				   sector->pgoff;
1306	}
1307
1308	/* Then add the parity stripe */
1309	sector = rbio_pstripe_sector(rbio, sectornr);
1310	sector->uptodate = 1;
1311	pointers[stripe++] = kmap_local_page(sector->page) + sector->pgoff;
1312
1313	if (has_qstripe) {
1314		/*
1315		 * RAID6, add the qstripe and call the library function
1316		 * to fill in our p/q
1317		 */
1318		sector = rbio_qstripe_sector(rbio, sectornr);
1319		sector->uptodate = 1;
1320		pointers[stripe++] = kmap_local_page(sector->page) +
1321				     sector->pgoff;
1322
1323		assert_rbio(rbio);
1324		raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1325					pointers);
1326	} else {
1327		/* raid5 */
1328		memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1329		run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1330	}
1331	for (stripe = stripe - 1; stripe >= 0; stripe--)
1332		kunmap_local(pointers[stripe]);
1333}
1334
1335static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1336				   struct bio_list *bio_list)
1337{
 
1338	/* The total sector number inside the full stripe. */
1339	int total_sector_nr;
1340	int sectornr;
1341	int stripe;
1342	int ret;
1343
1344	ASSERT(bio_list_size(bio_list) == 0);
1345
1346	/* We should have at least one data sector. */
1347	ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1348
1349	/*
1350	 * Reset errors, as we may have errors inherited from from degraded
1351	 * write.
1352	 */
1353	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1354
1355	/*
1356	 * Start assembly.  Make bios for everything from the higher layers (the
1357	 * bio_list in our rbio) and our P/Q.  Ignore everything else.
1358	 */
1359	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1360	     total_sector_nr++) {
1361		struct sector_ptr *sector;
1362
1363		stripe = total_sector_nr / rbio->stripe_nsectors;
1364		sectornr = total_sector_nr % rbio->stripe_nsectors;
1365
1366		/* This vertical stripe has no data, skip it. */
1367		if (!test_bit(sectornr, &rbio->dbitmap))
1368			continue;
1369
1370		if (stripe < rbio->nr_data) {
1371			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1372			if (!sector)
1373				continue;
1374		} else {
1375			sector = rbio_stripe_sector(rbio, stripe, sectornr);
1376		}
1377
1378		ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1379					 sectornr, REQ_OP_WRITE);
1380		if (ret)
1381			goto error;
1382	}
1383
1384	if (likely(!rbio->bioc->replace_nr_stripes))
1385		return 0;
1386
1387	/*
1388	 * Make a copy for the replace target device.
1389	 *
1390	 * Thus the source stripe number (in replace_stripe_src) should be valid.
1391	 */
1392	ASSERT(rbio->bioc->replace_stripe_src >= 0);
1393
1394	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1395	     total_sector_nr++) {
1396		struct sector_ptr *sector;
1397
1398		stripe = total_sector_nr / rbio->stripe_nsectors;
1399		sectornr = total_sector_nr % rbio->stripe_nsectors;
1400
1401		/*
1402		 * For RAID56, there is only one device that can be replaced,
1403		 * and replace_stripe_src[0] indicates the stripe number we
1404		 * need to copy from.
1405		 */
1406		if (stripe != rbio->bioc->replace_stripe_src) {
1407			/*
1408			 * We can skip the whole stripe completely, note
1409			 * total_sector_nr will be increased by one anyway.
1410			 */
1411			ASSERT(sectornr == 0);
1412			total_sector_nr += rbio->stripe_nsectors - 1;
1413			continue;
1414		}
1415
1416		/* This vertical stripe has no data, skip it. */
1417		if (!test_bit(sectornr, &rbio->dbitmap))
1418			continue;
1419
1420		if (stripe < rbio->nr_data) {
1421			sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1422			if (!sector)
1423				continue;
1424		} else {
1425			sector = rbio_stripe_sector(rbio, stripe, sectornr);
1426		}
1427
1428		ret = rbio_add_io_sector(rbio, bio_list, sector,
1429					 rbio->real_stripes,
1430					 sectornr, REQ_OP_WRITE);
1431		if (ret)
1432			goto error;
1433	}
1434
1435	return 0;
1436error:
1437	bio_list_put(bio_list);
 
1438	return -EIO;
1439}
1440
1441static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1442{
1443	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1444	u32 offset = (bio->bi_iter.bi_sector << SECTOR_SHIFT) -
1445		     rbio->bioc->full_stripe_logical;
1446	int total_nr_sector = offset >> fs_info->sectorsize_bits;
1447
1448	ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1449
1450	bitmap_set(rbio->error_bitmap, total_nr_sector,
1451		   bio->bi_iter.bi_size >> fs_info->sectorsize_bits);
1452
1453	/*
1454	 * Special handling for raid56_alloc_missing_rbio() used by
1455	 * scrub/replace.  Unlike call path in raid56_parity_recover(), they
1456	 * pass an empty bio here.  Thus we have to find out the missing device
1457	 * and mark the stripe error instead.
1458	 */
1459	if (bio->bi_iter.bi_size == 0) {
1460		bool found_missing = false;
1461		int stripe_nr;
1462
1463		for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1464			if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1465				found_missing = true;
1466				bitmap_set(rbio->error_bitmap,
1467					   stripe_nr * rbio->stripe_nsectors,
1468					   rbio->stripe_nsectors);
1469			}
1470		}
1471		ASSERT(found_missing);
1472	}
1473}
1474
1475/*
1476 * For subpage case, we can no longer set page Up-to-date directly for
1477 * stripe_pages[], thus we need to locate the sector.
1478 */
1479static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1480					     struct page *page,
1481					     unsigned int pgoff)
1482{
1483	int i;
1484
1485	for (i = 0; i < rbio->nr_sectors; i++) {
1486		struct sector_ptr *sector = &rbio->stripe_sectors[i];
1487
1488		if (sector->page == page && sector->pgoff == pgoff)
1489			return sector;
1490	}
1491	return NULL;
1492}
1493
1494/*
1495 * this sets each page in the bio uptodate.  It should only be used on private
1496 * rbio pages, nothing that comes in from the higher layers
1497 */
1498static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1499{
1500	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1501	struct bio_vec *bvec;
1502	struct bvec_iter_all iter_all;
1503
1504	ASSERT(!bio_flagged(bio, BIO_CLONED));
1505
1506	bio_for_each_segment_all(bvec, bio, iter_all) {
1507		struct sector_ptr *sector;
1508		int pgoff;
1509
1510		for (pgoff = bvec->bv_offset; pgoff - bvec->bv_offset < bvec->bv_len;
1511		     pgoff += sectorsize) {
1512			sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1513			ASSERT(sector);
1514			if (sector)
1515				sector->uptodate = 1;
1516		}
1517	}
1518}
1519
1520static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1521{
1522	struct bio_vec *bv = bio_first_bvec_all(bio);
1523	int i;
1524
1525	for (i = 0; i < rbio->nr_sectors; i++) {
1526		struct sector_ptr *sector;
1527
1528		sector = &rbio->stripe_sectors[i];
1529		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1530			break;
1531		sector = &rbio->bio_sectors[i];
1532		if (sector->page == bv->bv_page && sector->pgoff == bv->bv_offset)
1533			break;
1534	}
1535	ASSERT(i < rbio->nr_sectors);
1536	return i;
1537}
1538
1539static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1540{
1541	int total_sector_nr = get_bio_sector_nr(rbio, bio);
1542	u32 bio_size = 0;
1543	struct bio_vec *bvec;
 
1544	int i;
1545
1546	bio_for_each_bvec_all(bvec, bio, i)
1547		bio_size += bvec->bv_len;
1548
1549	/*
1550	 * Since we can have multiple bios touching the error_bitmap, we cannot
1551	 * call bitmap_set() without protection.
1552	 *
1553	 * Instead use set_bit() for each bit, as set_bit() itself is atomic.
1554	 */
1555	for (i = total_sector_nr; i < total_sector_nr +
1556	     (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1557		set_bit(i, rbio->error_bitmap);
1558}
1559
1560/* Verify the data sectors at read time. */
1561static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1562				    struct bio *bio)
1563{
1564	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1565	int total_sector_nr = get_bio_sector_nr(rbio, bio);
1566	struct bio_vec *bvec;
1567	struct bvec_iter_all iter_all;
1568
1569	/* No data csum for the whole stripe, no need to verify. */
1570	if (!rbio->csum_bitmap || !rbio->csum_buf)
1571		return;
1572
1573	/* P/Q stripes, they have no data csum to verify against. */
1574	if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1575		return;
1576
1577	bio_for_each_segment_all(bvec, bio, iter_all) {
1578		int bv_offset;
1579
1580		for (bv_offset = bvec->bv_offset;
1581		     bv_offset < bvec->bv_offset + bvec->bv_len;
1582		     bv_offset += fs_info->sectorsize, total_sector_nr++) {
1583			u8 csum_buf[BTRFS_CSUM_SIZE];
1584			u8 *expected_csum = rbio->csum_buf +
1585					    total_sector_nr * fs_info->csum_size;
1586			int ret;
1587
1588			/* No csum for this sector, skip to the next sector. */
1589			if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1590				continue;
1591
1592			ret = btrfs_check_sector_csum(fs_info, bvec->bv_page,
1593				bv_offset, csum_buf, expected_csum);
1594			if (ret < 0)
1595				set_bit(total_sector_nr, rbio->error_bitmap);
1596		}
1597	}
1598}
1599
1600static void raid_wait_read_end_io(struct bio *bio)
1601{
1602	struct btrfs_raid_bio *rbio = bio->bi_private;
1603
1604	if (bio->bi_status) {
1605		rbio_update_error_bitmap(rbio, bio);
1606	} else {
1607		set_bio_pages_uptodate(rbio, bio);
1608		verify_bio_data_sectors(rbio, bio);
1609	}
1610
1611	bio_put(bio);
1612	if (atomic_dec_and_test(&rbio->stripes_pending))
1613		wake_up(&rbio->io_wait);
1614}
1615
1616static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
1617			     struct bio_list *bio_list)
1618{
1619	struct bio *bio;
1620
1621	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1622	while ((bio = bio_list_pop(bio_list))) {
1623		bio->bi_end_io = raid_wait_read_end_io;
1624
1625		if (trace_raid56_read_enabled()) {
1626			struct raid56_bio_trace_info trace_info = { 0 };
1627
1628			bio_get_trace_info(rbio, bio, &trace_info);
1629			trace_raid56_read(rbio, bio, &trace_info);
1630		}
1631		submit_bio(bio);
1632	}
 
 
 
 
 
 
 
 
 
 
1633
1634	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635}
1636
1637static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1638{
1639	const int data_pages = rbio->nr_data * rbio->stripe_npages;
1640	int ret;
1641
1642	ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages, false);
1643	if (ret < 0)
1644		return ret;
1645
1646	index_stripe_sectors(rbio);
1647	return 0;
1648}
1649
1650/*
1651 * We use plugging call backs to collect full stripes.
1652 * Any time we get a partial stripe write while plugged
1653 * we collect it into a list.  When the unplug comes down,
1654 * we sort the list by logical block number and merge
1655 * everything we can into the same rbios
1656 */
1657struct btrfs_plug_cb {
1658	struct blk_plug_cb cb;
1659	struct btrfs_fs_info *info;
1660	struct list_head rbio_list;
 
1661};
1662
1663/*
1664 * rbios on the plug list are sorted for easier merging.
1665 */
1666static int plug_cmp(void *priv, const struct list_head *a,
1667		    const struct list_head *b)
1668{
1669	const struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1670						       plug_list);
1671	const struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1672						       plug_list);
1673	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1674	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1675
1676	if (a_sector < b_sector)
1677		return -1;
1678	if (a_sector > b_sector)
1679		return 1;
1680	return 0;
1681}
1682
1683static void raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1684{
1685	struct btrfs_plug_cb *plug = container_of(cb, struct btrfs_plug_cb, cb);
1686	struct btrfs_raid_bio *cur;
1687	struct btrfs_raid_bio *last = NULL;
1688
1689	list_sort(NULL, &plug->rbio_list, plug_cmp);
1690
1691	while (!list_empty(&plug->rbio_list)) {
1692		cur = list_entry(plug->rbio_list.next,
1693				 struct btrfs_raid_bio, plug_list);
1694		list_del_init(&cur->plug_list);
1695
1696		if (rbio_is_full(cur)) {
1697			/* We have a full stripe, queue it down. */
1698			start_async_work(cur, rmw_rbio_work);
1699			continue;
1700		}
1701		if (last) {
1702			if (rbio_can_merge(last, cur)) {
1703				merge_rbio(last, cur);
1704				free_raid_bio(cur);
1705				continue;
1706			}
1707			start_async_work(last, rmw_rbio_work);
1708		}
1709		last = cur;
1710	}
1711	if (last)
1712		start_async_work(last, rmw_rbio_work);
1713	kfree(plug);
1714}
1715
1716/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1717static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1718{
1719	const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1720	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1721	const u64 full_stripe_start = rbio->bioc->full_stripe_logical;
1722	const u32 orig_len = orig_bio->bi_iter.bi_size;
1723	const u32 sectorsize = fs_info->sectorsize;
1724	u64 cur_logical;
1725
1726	ASSERT_RBIO_LOGICAL(orig_logical >= full_stripe_start &&
1727			    orig_logical + orig_len <= full_stripe_start +
1728			    rbio->nr_data * BTRFS_STRIPE_LEN,
1729			    rbio, orig_logical);
1730
1731	bio_list_add(&rbio->bio_list, orig_bio);
1732	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1733
1734	/* Update the dbitmap. */
1735	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1736	     cur_logical += sectorsize) {
1737		int bit = ((u32)(cur_logical - full_stripe_start) >>
1738			   fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1739
1740		set_bit(bit, &rbio->dbitmap);
1741	}
1742}
1743
1744/*
1745 * our main entry point for writes from the rest of the FS.
1746 */
1747void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc)
1748{
1749	struct btrfs_fs_info *fs_info = bioc->fs_info;
1750	struct btrfs_raid_bio *rbio;
1751	struct btrfs_plug_cb *plug = NULL;
1752	struct blk_plug_cb *cb;
 
1753
1754	rbio = alloc_rbio(fs_info, bioc);
1755	if (IS_ERR(rbio)) {
1756		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1757		bio_endio(bio);
1758		return;
1759	}
1760	rbio->operation = BTRFS_RBIO_WRITE;
1761	rbio_add_bio(rbio, bio);
1762
1763	/*
1764	 * Don't plug on full rbios, just get them out the door
1765	 * as quickly as we can
1766	 */
1767	if (!rbio_is_full(rbio)) {
1768		cb = blk_check_plugged(raid_unplug, fs_info, sizeof(*plug));
1769		if (cb) {
1770			plug = container_of(cb, struct btrfs_plug_cb, cb);
1771			if (!plug->info) {
1772				plug->info = fs_info;
1773				INIT_LIST_HEAD(&plug->rbio_list);
1774			}
1775			list_add_tail(&rbio->plug_list, &plug->rbio_list);
1776			return;
1777		}
 
 
1778	}
1779
1780	/*
1781	 * Either we don't have any existing plug, or we're doing a full stripe,
1782	 * queue the rmw work now.
1783	 */
1784	start_async_work(rbio, rmw_rbio_work);
 
 
 
 
 
 
1785}
1786
1787static int verify_one_sector(struct btrfs_raid_bio *rbio,
1788			     int stripe_nr, int sector_nr)
1789{
1790	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1791	struct sector_ptr *sector;
1792	u8 csum_buf[BTRFS_CSUM_SIZE];
1793	u8 *csum_expected;
1794	int ret;
1795
1796	if (!rbio->csum_bitmap || !rbio->csum_buf)
1797		return 0;
1798
1799	/* No way to verify P/Q as they are not covered by data csum. */
1800	if (stripe_nr >= rbio->nr_data)
1801		return 0;
1802	/*
1803	 * If we're rebuilding a read, we have to use pages from the
1804	 * bio list if possible.
1805	 */
1806	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
 
1807		sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1808	} else {
1809		sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1810	}
1811
1812	ASSERT(sector->page);
1813
1814	csum_expected = rbio->csum_buf +
1815			(stripe_nr * rbio->stripe_nsectors + sector_nr) *
1816			fs_info->csum_size;
1817	ret = btrfs_check_sector_csum(fs_info, sector->page, sector->pgoff,
1818				      csum_buf, csum_expected);
1819	return ret;
1820}
1821
1822/*
1823 * Recover a vertical stripe specified by @sector_nr.
1824 * @*pointers are the pre-allocated pointers by the caller, so we don't
1825 * need to allocate/free the pointers again and again.
1826 */
1827static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1828			    void **pointers, void **unmap_array)
1829{
1830	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1831	struct sector_ptr *sector;
1832	const u32 sectorsize = fs_info->sectorsize;
1833	int found_errors;
1834	int faila;
1835	int failb;
1836	int stripe_nr;
1837	int ret = 0;
1838
1839	/*
1840	 * Now we just use bitmap to mark the horizontal stripes in
1841	 * which we have data when doing parity scrub.
1842	 */
1843	if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1844	    !test_bit(sector_nr, &rbio->dbitmap))
1845		return 0;
1846
1847	found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1848						 &failb);
1849	/*
1850	 * No errors in the vertical stripe, skip it.  Can happen for recovery
1851	 * which only part of a stripe failed csum check.
1852	 */
1853	if (!found_errors)
1854		return 0;
1855
1856	if (found_errors > rbio->bioc->max_errors)
1857		return -EIO;
1858
1859	/*
1860	 * Setup our array of pointers with sectors from each stripe
1861	 *
1862	 * NOTE: store a duplicate array of pointers to preserve the
1863	 * pointer order.
1864	 */
1865	for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1866		/*
1867		 * If we're rebuilding a read, we have to use pages from the
1868		 * bio list if possible.
1869		 */
1870		if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
 
1871			sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1872		} else {
1873			sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1874		}
1875		ASSERT(sector->page);
1876		pointers[stripe_nr] = kmap_local_page(sector->page) +
1877				   sector->pgoff;
1878		unmap_array[stripe_nr] = pointers[stripe_nr];
1879	}
1880
1881	/* All raid6 handling here */
1882	if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1883		/* Single failure, rebuild from parity raid5 style */
1884		if (failb < 0) {
1885			if (faila == rbio->nr_data)
1886				/*
1887				 * Just the P stripe has failed, without
1888				 * a bad data or Q stripe.
1889				 * We have nothing to do, just skip the
1890				 * recovery for this stripe.
1891				 */
1892				goto cleanup;
1893			/*
1894			 * a single failure in raid6 is rebuilt
1895			 * in the pstripe code below
1896			 */
1897			goto pstripe;
1898		}
1899
1900		/*
1901		 * If the q stripe is failed, do a pstripe reconstruction from
1902		 * the xors.
1903		 * If both the q stripe and the P stripe are failed, we're
1904		 * here due to a crc mismatch and we can't give them the
1905		 * data they want.
1906		 */
1907		if (failb == rbio->real_stripes - 1) {
1908			if (faila == rbio->real_stripes - 2)
 
1909				/*
1910				 * Only P and Q are corrupted.
1911				 * We only care about data stripes recovery,
1912				 * can skip this vertical stripe.
1913				 */
1914				goto cleanup;
1915			/*
1916			 * Otherwise we have one bad data stripe and
1917			 * a good P stripe.  raid5!
1918			 */
1919			goto pstripe;
1920		}
1921
1922		if (failb == rbio->real_stripes - 2) {
1923			raid6_datap_recov(rbio->real_stripes, sectorsize,
1924					  faila, pointers);
1925		} else {
1926			raid6_2data_recov(rbio->real_stripes, sectorsize,
1927					  faila, failb, pointers);
1928		}
1929	} else {
1930		void *p;
1931
1932		/* Rebuild from P stripe here (raid5 or raid6). */
1933		ASSERT(failb == -1);
1934pstripe:
1935		/* Copy parity block into failed block to start with */
1936		memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1937
1938		/* Rearrange the pointer array */
1939		p = pointers[faila];
1940		for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1941		     stripe_nr++)
1942			pointers[stripe_nr] = pointers[stripe_nr + 1];
1943		pointers[rbio->nr_data - 1] = p;
1944
1945		/* Xor in the rest */
1946		run_xor(pointers, rbio->nr_data - 1, sectorsize);
1947
1948	}
1949
1950	/*
1951	 * No matter if this is a RMW or recovery, we should have all
1952	 * failed sectors repaired in the vertical stripe, thus they are now
1953	 * uptodate.
1954	 * Especially if we determine to cache the rbio, we need to
1955	 * have at least all data sectors uptodate.
1956	 *
1957	 * If possible, also check if the repaired sector matches its data
1958	 * checksum.
1959	 */
1960	if (faila >= 0) {
1961		ret = verify_one_sector(rbio, faila, sector_nr);
1962		if (ret < 0)
1963			goto cleanup;
1964
1965		sector = rbio_stripe_sector(rbio, faila, sector_nr);
1966		sector->uptodate = 1;
1967	}
1968	if (failb >= 0) {
1969		ret = verify_one_sector(rbio, failb, sector_nr);
1970		if (ret < 0)
1971			goto cleanup;
1972
1973		sector = rbio_stripe_sector(rbio, failb, sector_nr);
1974		sector->uptodate = 1;
1975	}
1976
1977cleanup:
1978	for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1979		kunmap_local(unmap_array[stripe_nr]);
1980	return ret;
1981}
1982
1983static int recover_sectors(struct btrfs_raid_bio *rbio)
1984{
1985	void **pointers = NULL;
1986	void **unmap_array = NULL;
1987	int sectornr;
1988	int ret = 0;
1989
1990	/*
1991	 * @pointers array stores the pointer for each sector.
1992	 *
1993	 * @unmap_array stores copy of pointers that does not get reordered
1994	 * during reconstruction so that kunmap_local works.
1995	 */
1996	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1997	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1998	if (!pointers || !unmap_array) {
1999		ret = -ENOMEM;
2000		goto out;
2001	}
2002
2003	if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
2004		spin_lock(&rbio->bio_list_lock);
 
2005		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2006		spin_unlock(&rbio->bio_list_lock);
2007	}
2008
2009	index_rbio_pages(rbio);
2010
2011	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2012		ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
2013		if (ret < 0)
2014			break;
2015	}
2016
2017out:
2018	kfree(pointers);
2019	kfree(unmap_array);
2020	return ret;
2021}
2022
2023static void recover_rbio(struct btrfs_raid_bio *rbio)
 
2024{
2025	struct bio_list bio_list = BIO_EMPTY_LIST;
2026	int total_sector_nr;
2027	int ret = 0;
2028
2029	/*
2030	 * Either we're doing recover for a read failure or degraded write,
2031	 * caller should have set error bitmap correctly.
2032	 */
2033	ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
2034
2035	/* For recovery, we need to read all sectors including P/Q. */
2036	ret = alloc_rbio_pages(rbio);
2037	if (ret < 0)
2038		goto out;
2039
2040	index_rbio_pages(rbio);
2041
2042	/*
2043	 * Read everything that hasn't failed. However this time we will
2044	 * not trust any cached sector.
2045	 * As we may read out some stale data but higher layer is not reading
2046	 * that stale part.
2047	 *
2048	 * So here we always re-read everything in recovery path.
2049	 */
2050	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2051	     total_sector_nr++) {
2052		int stripe = total_sector_nr / rbio->stripe_nsectors;
2053		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2054		struct sector_ptr *sector;
2055
2056		/*
2057		 * Skip the range which has error.  It can be a range which is
2058		 * marked error (for csum mismatch), or it can be a missing
2059		 * device.
2060		 */
2061		if (!rbio->bioc->stripes[stripe].dev->bdev ||
2062		    test_bit(total_sector_nr, rbio->error_bitmap)) {
2063			/*
2064			 * Also set the error bit for missing device, which
2065			 * may not yet have its error bit set.
2066			 */
2067			set_bit(total_sector_nr, rbio->error_bitmap);
2068			continue;
2069		}
2070
2071		sector = rbio_stripe_sector(rbio, stripe, sectornr);
2072		ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2073					 sectornr, REQ_OP_READ);
2074		if (ret < 0) {
2075			bio_list_put(&bio_list);
2076			goto out;
2077		}
2078	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2079
2080	submit_read_wait_bio_list(rbio, &bio_list);
2081	ret = recover_sectors(rbio);
 
2082out:
2083	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
 
 
 
2084}
2085
2086static void recover_rbio_work(struct work_struct *work)
2087{
2088	struct btrfs_raid_bio *rbio;
 
2089
2090	rbio = container_of(work, struct btrfs_raid_bio, work);
2091	if (!lock_stripe_add(rbio))
2092		recover_rbio(rbio);
 
 
 
 
2093}
2094
2095static void recover_rbio_work_locked(struct work_struct *work)
2096{
2097	recover_rbio(container_of(work, struct btrfs_raid_bio, work));
 
 
 
 
 
 
2098}
2099
2100static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
2101{
2102	bool found = false;
2103	int sector_nr;
2104
2105	/*
2106	 * This is for RAID6 extra recovery tries, thus mirror number should
2107	 * be large than 2.
2108	 * Mirror 1 means read from data stripes. Mirror 2 means rebuild using
2109	 * RAID5 methods.
2110	 */
2111	ASSERT(mirror_num > 2);
2112	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2113		int found_errors;
2114		int faila;
2115		int failb;
2116
2117		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2118							 &faila, &failb);
2119		/* This vertical stripe doesn't have errors. */
2120		if (!found_errors)
2121			continue;
2122
2123		/*
2124		 * If we found errors, there should be only one error marked
2125		 * by previous set_rbio_range_error().
2126		 */
2127		ASSERT(found_errors == 1);
2128		found = true;
2129
2130		/* Now select another stripe to mark as error. */
2131		failb = rbio->real_stripes - (mirror_num - 1);
2132		if (failb <= faila)
2133			failb--;
2134
2135		/* Set the extra bit in error bitmap. */
2136		if (failb >= 0)
2137			set_bit(failb * rbio->stripe_nsectors + sector_nr,
2138				rbio->error_bitmap);
2139	}
2140
2141	/* We should found at least one vertical stripe with error.*/
2142	ASSERT(found);
2143}
2144
2145/*
2146 * the main entry point for reads from the higher layers.  This
2147 * is really only called when the normal read path had a failure,
2148 * so we assume the bio they send down corresponds to a failed part
2149 * of the drive.
2150 */
2151void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
2152			   int mirror_num)
2153{
2154	struct btrfs_fs_info *fs_info = bioc->fs_info;
2155	struct btrfs_raid_bio *rbio;
2156
2157	rbio = alloc_rbio(fs_info, bioc);
2158	if (IS_ERR(rbio)) {
2159		bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2160		bio_endio(bio);
2161		return;
2162	}
2163
2164	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2165	rbio_add_bio(rbio, bio);
2166
2167	set_rbio_range_error(rbio, bio);
2168
2169	/*
2170	 * Loop retry:
2171	 * for 'mirror == 2', reconstruct from all other stripes.
2172	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2173	 */
2174	if (mirror_num > 2)
2175		set_rbio_raid6_extra_error(rbio, mirror_num);
2176
2177	start_async_work(rbio, recover_rbio_work);
2178}
2179
2180static void fill_data_csums(struct btrfs_raid_bio *rbio)
2181{
2182	struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2183	struct btrfs_root *csum_root = btrfs_csum_root(fs_info,
2184						       rbio->bioc->full_stripe_logical);
2185	const u64 start = rbio->bioc->full_stripe_logical;
2186	const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2187			fs_info->sectorsize_bits;
2188	int ret;
2189
2190	/* The rbio should not have its csum buffer initialized. */
2191	ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2192
2193	/*
2194	 * Skip the csum search if:
2195	 *
2196	 * - The rbio doesn't belong to data block groups
2197	 *   Then we are doing IO for tree blocks, no need to search csums.
2198	 *
2199	 * - The rbio belongs to mixed block groups
2200	 *   This is to avoid deadlock, as we're already holding the full
2201	 *   stripe lock, if we trigger a metadata read, and it needs to do
2202	 *   raid56 recovery, we will deadlock.
2203	 */
2204	if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2205	    rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2206		return;
2207
2208	rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2209				 fs_info->csum_size, GFP_NOFS);
2210	rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2211					  GFP_NOFS);
2212	if (!rbio->csum_buf || !rbio->csum_bitmap) {
2213		ret = -ENOMEM;
2214		goto error;
2215	}
2216
2217	ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1,
2218					rbio->csum_buf, rbio->csum_bitmap);
2219	if (ret < 0)
2220		goto error;
2221	if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2222		goto no_csum;
2223	return;
2224
2225error:
2226	/*
2227	 * We failed to allocate memory or grab the csum, but it's not fatal,
2228	 * we can still continue.  But better to warn users that RMW is no
2229	 * longer safe for this particular sub-stripe write.
2230	 */
2231	btrfs_warn_rl(fs_info,
2232"sub-stripe write for full stripe %llu is not safe, failed to get csum: %d",
2233			rbio->bioc->full_stripe_logical, ret);
2234no_csum:
2235	kfree(rbio->csum_buf);
2236	bitmap_free(rbio->csum_bitmap);
2237	rbio->csum_buf = NULL;
2238	rbio->csum_bitmap = NULL;
2239}
2240
2241static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2242{
2243	struct bio_list bio_list = BIO_EMPTY_LIST;
2244	int total_sector_nr;
2245	int ret = 0;
 
 
2246
2247	/*
2248	 * Fill the data csums we need for data verification.  We need to fill
2249	 * the csum_bitmap/csum_buf first, as our endio function will try to
2250	 * verify the data sectors.
2251	 */
2252	fill_data_csums(rbio);
2253
2254	/*
2255	 * Build a list of bios to read all sectors (including data and P/Q).
2256	 *
2257	 * This behavior is to compensate the later csum verification and recovery.
2258	 */
2259	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2260	     total_sector_nr++) {
2261		struct sector_ptr *sector;
2262		int stripe = total_sector_nr / rbio->stripe_nsectors;
2263		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2264
2265		sector = rbio_stripe_sector(rbio, stripe, sectornr);
2266		ret = rbio_add_io_sector(rbio, &bio_list, sector,
2267			       stripe, sectornr, REQ_OP_READ);
2268		if (ret) {
2269			bio_list_put(&bio_list);
2270			return ret;
2271		}
2272	}
2273
2274	/*
2275	 * We may or may not have any corrupted sectors (including missing dev
2276	 * and csum mismatch), just let recover_sectors() to handle them all.
2277	 */
2278	submit_read_wait_bio_list(rbio, &bio_list);
2279	return recover_sectors(rbio);
 
 
 
 
 
2280}
2281
2282static void raid_wait_write_end_io(struct bio *bio)
2283{
2284	struct btrfs_raid_bio *rbio = bio->bi_private;
2285	blk_status_t err = bio->bi_status;
2286
2287	if (err)
2288		rbio_update_error_bitmap(rbio, bio);
2289	bio_put(bio);
2290	if (atomic_dec_and_test(&rbio->stripes_pending))
2291		wake_up(&rbio->io_wait);
2292}
2293
2294static void submit_write_bios(struct btrfs_raid_bio *rbio,
2295			      struct bio_list *bio_list)
2296{
2297	struct bio *bio;
2298
2299	atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2300	while ((bio = bio_list_pop(bio_list))) {
2301		bio->bi_end_io = raid_wait_write_end_io;
2302
2303		if (trace_raid56_write_enabled()) {
2304			struct raid56_bio_trace_info trace_info = { 0 };
2305
2306			bio_get_trace_info(rbio, bio, &trace_info);
2307			trace_raid56_write(rbio, bio, &trace_info);
2308		}
2309		submit_bio(bio);
2310	}
2311}
2312
2313/*
2314 * To determine if we need to read any sector from the disk.
2315 * Should only be utilized in RMW path, to skip cached rbio.
2316 */
2317static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2318{
2319	int i;
2320
2321	for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2322		struct sector_ptr *sector = &rbio->stripe_sectors[i];
2323
2324		/*
2325		 * We have a sector which doesn't have page nor uptodate,
2326		 * thus this rbio can not be cached one, as cached one must
2327		 * have all its data sectors present and uptodate.
2328		 */
2329		if (!sector->page || !sector->uptodate)
2330			return true;
2331	}
2332	return false;
2333}
2334
2335static void rmw_rbio(struct btrfs_raid_bio *rbio)
2336{
2337	struct bio_list bio_list;
2338	int sectornr;
2339	int ret = 0;
2340
2341	/*
2342	 * Allocate the pages for parity first, as P/Q pages will always be
2343	 * needed for both full-stripe and sub-stripe writes.
2344	 */
2345	ret = alloc_rbio_parity_pages(rbio);
2346	if (ret < 0)
2347		goto out;
2348
2349	/*
2350	 * Either full stripe write, or we have every data sector already
2351	 * cached, can go to write path immediately.
2352	 */
2353	if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
2354		/*
2355		 * Now we're doing sub-stripe write, also need all data stripes
2356		 * to do the full RMW.
2357		 */
2358		ret = alloc_rbio_data_pages(rbio);
2359		if (ret < 0)
2360			goto out;
 
 
2361
2362		index_rbio_pages(rbio);
2363
2364		ret = rmw_read_wait_recover(rbio);
2365		if (ret < 0)
2366			goto out;
2367	}
2368
 
2369	/*
2370	 * At this stage we're not allowed to add any new bios to the
2371	 * bio list any more, anyone else that wants to change this stripe
2372	 * needs to do their own rmw.
2373	 */
2374	spin_lock(&rbio->bio_list_lock);
2375	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2376	spin_unlock(&rbio->bio_list_lock);
2377
2378	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2379
2380	index_rbio_pages(rbio);
2381
2382	/*
2383	 * We don't cache full rbios because we're assuming
2384	 * the higher layers are unlikely to use this area of
2385	 * the disk again soon.  If they do use it again,
2386	 * hopefully they will send another full bio.
2387	 */
2388	if (!rbio_is_full(rbio))
2389		cache_rbio_pages(rbio);
2390	else
2391		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2392
2393	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2394		generate_pq_vertical(rbio, sectornr);
2395
2396	bio_list_init(&bio_list);
2397	ret = rmw_assemble_write_bios(rbio, &bio_list);
2398	if (ret < 0)
2399		goto out;
2400
2401	/* We should have at least one bio assembled. */
2402	ASSERT(bio_list_size(&bio_list));
2403	submit_write_bios(rbio, &bio_list);
2404	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2405
2406	/* We may have more errors than our tolerance during the read. */
2407	for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2408		int found_errors;
2409
2410		found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2411		if (found_errors > rbio->bioc->max_errors) {
2412			ret = -EIO;
2413			break;
2414		}
2415	}
2416out:
2417	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2418}
2419
2420static void rmw_rbio_work(struct work_struct *work)
2421{
2422	struct btrfs_raid_bio *rbio;
 
2423
2424	rbio = container_of(work, struct btrfs_raid_bio, work);
2425	if (lock_stripe_add(rbio) == 0)
2426		rmw_rbio(rbio);
 
 
 
 
2427}
2428
2429static void rmw_rbio_work_locked(struct work_struct *work)
2430{
2431	rmw_rbio(container_of(work, struct btrfs_raid_bio, work));
 
 
 
 
 
 
2432}
2433
2434/*
2435 * The following code is used to scrub/replace the parity stripe
2436 *
2437 * Caller must have already increased bio_counter for getting @bioc.
2438 *
2439 * Note: We need make sure all the pages that add into the scrub/replace
2440 * raid bio are correct and not be changed during the scrub/replace. That
2441 * is those pages just hold metadata or file data with checksum.
2442 */
2443
2444struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
2445				struct btrfs_io_context *bioc,
2446				struct btrfs_device *scrub_dev,
2447				unsigned long *dbitmap, int stripe_nsectors)
2448{
2449	struct btrfs_fs_info *fs_info = bioc->fs_info;
2450	struct btrfs_raid_bio *rbio;
2451	int i;
2452
2453	rbio = alloc_rbio(fs_info, bioc);
2454	if (IS_ERR(rbio))
2455		return NULL;
2456	bio_list_add(&rbio->bio_list, bio);
2457	/*
2458	 * This is a special bio which is used to hold the completion handler
2459	 * and make the scrub rbio is similar to the other types
2460	 */
2461	ASSERT(!bio->bi_iter.bi_size);
2462	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2463
2464	/*
2465	 * After mapping bioc with BTRFS_MAP_WRITE, parities have been sorted
2466	 * to the end position, so this search can start from the first parity
2467	 * stripe.
2468	 */
2469	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2470		if (bioc->stripes[i].dev == scrub_dev) {
2471			rbio->scrubp = i;
2472			break;
2473		}
2474	}
2475	ASSERT_RBIO_STRIPE(i < rbio->real_stripes, rbio, i);
2476
2477	bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2478	return rbio;
2479}
2480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2481/*
2482 * We just scrub the parity that we have correct data on the same horizontal,
2483 * so we needn't allocate all pages for all the stripes.
2484 */
2485static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2486{
2487	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2488	int total_sector_nr;
2489
2490	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2491	     total_sector_nr++) {
2492		struct page *page;
2493		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2494		int index = (total_sector_nr * sectorsize) >> PAGE_SHIFT;
2495
2496		if (!test_bit(sectornr, &rbio->dbitmap))
2497			continue;
2498		if (rbio->stripe_pages[index])
2499			continue;
2500		page = alloc_page(GFP_NOFS);
2501		if (!page)
2502			return -ENOMEM;
2503		rbio->stripe_pages[index] = page;
2504	}
2505	index_stripe_sectors(rbio);
2506	return 0;
2507}
2508
2509static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
2510{
2511	struct btrfs_io_context *bioc = rbio->bioc;
2512	const u32 sectorsize = bioc->fs_info->sectorsize;
2513	void **pointers = rbio->finish_pointers;
2514	unsigned long *pbitmap = &rbio->finish_pbitmap;
2515	int nr_data = rbio->nr_data;
2516	int stripe;
2517	int sectornr;
2518	bool has_qstripe;
2519	struct sector_ptr p_sector = { 0 };
2520	struct sector_ptr q_sector = { 0 };
2521	struct bio_list bio_list;
 
2522	int is_replace = 0;
2523	int ret;
2524
2525	bio_list_init(&bio_list);
2526
2527	if (rbio->real_stripes - rbio->nr_data == 1)
2528		has_qstripe = false;
2529	else if (rbio->real_stripes - rbio->nr_data == 2)
2530		has_qstripe = true;
2531	else
2532		BUG();
2533
2534	/*
2535	 * Replace is running and our P/Q stripe is being replaced, then we
2536	 * need to duplicate the final write to replace target.
2537	 */
2538	if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
2539		is_replace = 1;
2540		bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2541	}
2542
2543	/*
2544	 * Because the higher layers(scrubber) are unlikely to
2545	 * use this area of the disk again soon, so don't cache
2546	 * it.
2547	 */
2548	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2549
 
 
 
2550	p_sector.page = alloc_page(GFP_NOFS);
2551	if (!p_sector.page)
2552		return -ENOMEM;
2553	p_sector.pgoff = 0;
2554	p_sector.uptodate = 1;
2555
2556	if (has_qstripe) {
2557		/* RAID6, allocate and map temp space for the Q stripe */
2558		q_sector.page = alloc_page(GFP_NOFS);
2559		if (!q_sector.page) {
2560			__free_page(p_sector.page);
2561			p_sector.page = NULL;
2562			return -ENOMEM;
2563		}
2564		q_sector.pgoff = 0;
2565		q_sector.uptodate = 1;
2566		pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2567	}
2568
2569	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2570
2571	/* Map the parity stripe just once */
2572	pointers[nr_data] = kmap_local_page(p_sector.page);
2573
2574	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2575		struct sector_ptr *sector;
2576		void *parity;
2577
2578		/* first collect one page from each data stripe */
2579		for (stripe = 0; stripe < nr_data; stripe++) {
2580			sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2581			pointers[stripe] = kmap_local_page(sector->page) +
2582					   sector->pgoff;
2583		}
2584
2585		if (has_qstripe) {
2586			assert_rbio(rbio);
2587			/* RAID6, call the library function to fill in our P/Q */
2588			raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2589						pointers);
2590		} else {
2591			/* raid5 */
2592			memcpy(pointers[nr_data], pointers[0], sectorsize);
2593			run_xor(pointers + 1, nr_data - 1, sectorsize);
2594		}
2595
2596		/* Check scrubbing parity and repair it */
2597		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2598		parity = kmap_local_page(sector->page) + sector->pgoff;
2599		if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2600			memcpy(parity, pointers[rbio->scrubp], sectorsize);
2601		else
2602			/* Parity is right, needn't writeback */
2603			bitmap_clear(&rbio->dbitmap, sectornr, 1);
2604		kunmap_local(parity);
2605
2606		for (stripe = nr_data - 1; stripe >= 0; stripe--)
2607			kunmap_local(pointers[stripe]);
2608	}
2609
2610	kunmap_local(pointers[nr_data]);
2611	__free_page(p_sector.page);
2612	p_sector.page = NULL;
2613	if (q_sector.page) {
2614		kunmap_local(pointers[rbio->real_stripes - 1]);
2615		__free_page(q_sector.page);
2616		q_sector.page = NULL;
2617	}
2618
 
2619	/*
2620	 * time to start writing.  Make bios for everything from the
2621	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2622	 * everything else.
2623	 */
2624	for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2625		struct sector_ptr *sector;
2626
2627		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2628		ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2629					 sectornr, REQ_OP_WRITE);
2630		if (ret)
2631			goto cleanup;
2632	}
2633
2634	if (!is_replace)
2635		goto submit_write;
2636
2637	/*
2638	 * Replace is running and our parity stripe needs to be duplicated to
2639	 * the target device.  Check we have a valid source stripe number.
2640	 */
2641	ASSERT_RBIO(rbio->bioc->replace_stripe_src >= 0, rbio);
2642	for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2643		struct sector_ptr *sector;
2644
2645		sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2646		ret = rbio_add_io_sector(rbio, &bio_list, sector,
2647					 rbio->real_stripes,
2648					 sectornr, REQ_OP_WRITE);
2649		if (ret)
2650			goto cleanup;
2651	}
2652
2653submit_write:
2654	submit_write_bios(rbio, &bio_list);
2655	return 0;
2656
2657cleanup:
2658	bio_list_put(&bio_list);
 
2659	return ret;
2660}
2661
2662static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2663{
2664	if (stripe >= 0 && stripe < rbio->nr_data)
2665		return 1;
2666	return 0;
2667}
2668
2669static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2670{
2671	void **pointers = NULL;
2672	void **unmap_array = NULL;
2673	int sector_nr;
2674	int ret = 0;
2675
2676	/*
2677	 * @pointers array stores the pointer for each sector.
2678	 *
2679	 * @unmap_array stores copy of pointers that does not get reordered
2680	 * during reconstruction so that kunmap_local works.
2681	 */
2682	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2683	unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2684	if (!pointers || !unmap_array) {
2685		ret = -ENOMEM;
2686		goto out;
2687	}
2688
2689	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2690		int dfail = 0, failp = -1;
2691		int faila;
2692		int failb;
2693		int found_errors;
2694
2695		found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2696							 &faila, &failb);
2697		if (found_errors > rbio->bioc->max_errors) {
2698			ret = -EIO;
2699			goto out;
2700		}
2701		if (found_errors == 0)
2702			continue;
2703
2704		/* We should have at least one error here. */
2705		ASSERT(faila >= 0 || failb >= 0);
2706
2707		if (is_data_stripe(rbio, faila))
2708			dfail++;
2709		else if (is_parity_stripe(faila))
2710			failp = faila;
2711
2712		if (is_data_stripe(rbio, failb))
2713			dfail++;
2714		else if (is_parity_stripe(failb))
2715			failp = failb;
2716		/*
2717		 * Because we can not use a scrubbing parity to repair the
2718		 * data, so the capability of the repair is declined.  (In the
2719		 * case of RAID5, we can not repair anything.)
2720		 */
2721		if (dfail > rbio->bioc->max_errors - 1) {
2722			ret = -EIO;
2723			goto out;
2724		}
2725		/*
2726		 * If all data is good, only parity is correctly, just repair
2727		 * the parity, no need to recover data stripes.
2728		 */
2729		if (dfail == 0)
2730			continue;
2731
2732		/*
2733		 * Here means we got one corrupted data stripe and one
2734		 * corrupted parity on RAID6, if the corrupted parity is
2735		 * scrubbing parity, luckily, use the other one to repair the
2736		 * data, or we can not repair the data stripe.
2737		 */
2738		if (failp != rbio->scrubp) {
2739			ret = -EIO;
2740			goto out;
2741		}
2742
2743		ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2744		if (ret < 0)
2745			goto out;
2746	}
2747out:
2748	kfree(pointers);
2749	kfree(unmap_array);
2750	return ret;
2751}
2752
2753static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
 
2754{
2755	struct bio_list bio_list = BIO_EMPTY_LIST;
2756	int total_sector_nr;
2757	int ret = 0;
2758
 
 
2759	/* Build a list of bios to read all the missing parts. */
2760	for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2761	     total_sector_nr++) {
2762		int sectornr = total_sector_nr % rbio->stripe_nsectors;
2763		int stripe = total_sector_nr / rbio->stripe_nsectors;
2764		struct sector_ptr *sector;
2765
2766		/* No data in the vertical stripe, no need to read. */
2767		if (!test_bit(sectornr, &rbio->dbitmap))
2768			continue;
2769
2770		/*
2771		 * We want to find all the sectors missing from the rbio and
2772		 * read them from the disk. If sector_in_rbio() finds a sector
2773		 * in the bio list we don't need to read it off the stripe.
2774		 */
2775		sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2776		if (sector)
2777			continue;
2778
2779		sector = rbio_stripe_sector(rbio, stripe, sectornr);
2780		/*
2781		 * The bio cache may have handed us an uptodate sector.  If so,
2782		 * use it.
2783		 */
2784		if (sector->uptodate)
2785			continue;
2786
2787		ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2788					 sectornr, REQ_OP_READ);
2789		if (ret) {
2790			bio_list_put(&bio_list);
2791			return ret;
2792		}
2793	}
2794
2795	submit_read_wait_bio_list(rbio, &bio_list);
2796	return 0;
 
 
 
 
2797}
2798
2799static void scrub_rbio(struct btrfs_raid_bio *rbio)
2800{
 
 
2801	int sector_nr;
2802	int ret;
 
 
 
2803
2804	ret = alloc_rbio_essential_pages(rbio);
2805	if (ret)
2806		goto out;
2807
2808	bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2809
2810	ret = scrub_assemble_read_bios(rbio);
2811	if (ret < 0)
2812		goto out;
 
 
 
2813
2814	/* We may have some failures, recover the failed sectors first. */
2815	ret = recover_scrub_rbio(rbio);
2816	if (ret < 0)
2817		goto out;
2818
2819	/*
2820	 * We have every sector properly prepared. Can finish the scrub
2821	 * and writeback the good content.
2822	 */
2823	ret = finish_parity_scrub(rbio);
2824	wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2825	for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2826		int found_errors;
2827
2828		found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2829		if (found_errors > rbio->bioc->max_errors) {
2830			ret = -EIO;
2831			break;
2832		}
2833	}
2834out:
2835	rbio_orig_end_io(rbio, errno_to_blk_status(ret));
 
 
 
 
 
2836}
2837
2838static void scrub_rbio_work_locked(struct work_struct *work)
2839{
2840	scrub_rbio(container_of(work, struct btrfs_raid_bio, work));
 
 
 
 
 
2841}
2842
2843void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2844{
2845	if (!lock_stripe_add(rbio))
2846		start_async_work(rbio, scrub_rbio_work_locked);
2847}
2848
2849/*
2850 * This is for scrub call sites where we already have correct data contents.
2851 * This allows us to avoid reading data stripes again.
2852 *
2853 * Unfortunately here we have to do page copy, other than reusing the pages.
2854 * This is due to the fact rbio has its own page management for its cache.
2855 */
2856void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
2857				    struct page **data_pages, u64 data_logical)
2858{
2859	const u64 offset_in_full_stripe = data_logical -
2860					  rbio->bioc->full_stripe_logical;
2861	const int page_index = offset_in_full_stripe >> PAGE_SHIFT;
2862	const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2863	const u32 sectors_per_page = PAGE_SIZE / sectorsize;
2864	int ret;
2865
 
 
2866	/*
2867	 * If we hit ENOMEM temporarily, but later at
2868	 * raid56_parity_submit_scrub_rbio() time it succeeded, we just do
2869	 * the extra read, not a big deal.
2870	 *
2871	 * If we hit ENOMEM later at raid56_parity_submit_scrub_rbio() time,
2872	 * the bio would got proper error number set.
2873	 */
2874	ret = alloc_rbio_data_pages(rbio);
2875	if (ret < 0)
2876		return;
 
 
 
2877
2878	/* data_logical must be at stripe boundary and inside the full stripe. */
2879	ASSERT(IS_ALIGNED(offset_in_full_stripe, BTRFS_STRIPE_LEN));
2880	ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT));
2881
2882	for (int page_nr = 0; page_nr < (BTRFS_STRIPE_LEN >> PAGE_SHIFT); page_nr++) {
2883		struct page *dst = rbio->stripe_pages[page_nr + page_index];
2884		struct page *src = data_pages[page_nr];
2885
2886		memcpy_page(dst, 0, src, 0, PAGE_SIZE);
2887		for (int sector_nr = sectors_per_page * page_index;
2888		     sector_nr < sectors_per_page * (page_index + 1);
2889		     sector_nr++)
2890			rbio->stripe_sectors[sector_nr].uptodate = true;
2891	}
2892}