Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Partial Parity Log for closing the RAID5 write hole
   4 * Copyright (c) 2017, Intel Corporation.
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/blkdev.h>
   9#include <linux/slab.h>
  10#include <linux/crc32c.h>
  11#include <linux/async_tx.h>
  12#include <linux/raid/md_p.h>
  13#include "md.h"
  14#include "raid5.h"
  15#include "raid5-log.h"
  16
  17/*
  18 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
  19 * partial parity data. The header contains an array of entries
  20 * (struct ppl_header_entry) which describe the logged write requests.
  21 * Partial parity for the entries comes after the header, written in the same
  22 * sequence as the entries:
  23 *
  24 * Header
  25 *   entry0
  26 *   ...
  27 *   entryN
  28 * PP data
  29 *   PP for entry0
  30 *   ...
  31 *   PP for entryN
  32 *
  33 * An entry describes one or more consecutive stripe_heads, up to a full
  34 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the
  35 * number of stripe_heads in the entry and n is the number of modified data
  36 * disks. Every stripe_head in the entry must write to the same data disks.
  37 * An example of a valid case described by a single entry (writes to the first
  38 * stripe of a 4 disk array, 16k chunk size):
  39 *
  40 * sh->sector   dd0   dd1   dd2    ppl
  41 *            +-----+-----+-----+
  42 * 0          | --- | --- | --- | +----+
  43 * 8          | -W- | -W- | --- | | pp |   data_sector = 8
  44 * 16         | -W- | -W- | --- | | pp |   data_size = 3 * 2 * 4k
  45 * 24         | -W- | -W- | --- | | pp |   pp_size = 3 * 4k
  46 *            +-----+-----+-----+ +----+
  47 *
  48 * data_sector is the first raid sector of the modified data, data_size is the
  49 * total size of modified data and pp_size is the size of partial parity for
  50 * this entry. Entries for full stripe writes contain no partial parity
  51 * (pp_size = 0), they only mark the stripes for which parity should be
  52 * recalculated after an unclean shutdown. Every entry holds a checksum of its
  53 * partial parity, the header also has a checksum of the header itself.
  54 *
  55 * A write request is always logged to the PPL instance stored on the parity
  56 * disk of the corresponding stripe. For each member disk there is one ppl_log
  57 * used to handle logging for this disk, independently from others. They are
  58 * grouped in child_logs array in struct ppl_conf, which is assigned to
  59 * r5conf->log_private.
  60 *
  61 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header.
  62 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head
  63 * can be appended to the last entry if it meets the conditions for a valid
  64 * entry described above, otherwise a new entry is added. Checksums of entries
  65 * are calculated incrementally as stripes containing partial parity are being
  66 * added. ppl_submit_iounit() calculates the checksum of the header and submits
  67 * a bio containing the header page and partial parity pages (sh->ppl_page) for
  68 * all stripes of the io_unit. When the PPL write completes, the stripes
  69 * associated with the io_unit are released and raid5d starts writing their data
  70 * and parity. When all stripes are written, the io_unit is freed and the next
  71 * can be submitted.
  72 *
  73 * An io_unit is used to gather stripes until it is submitted or becomes full
  74 * (if the maximum number of entries or size of PPL is reached). Another io_unit
  75 * can't be submitted until the previous has completed (PPL and stripe
  76 * data+parity is written). The log->io_list tracks all io_units of a log
  77 * (for a single member disk). New io_units are added to the end of the list
  78 * and the first io_unit is submitted, if it is not submitted already.
  79 * The current io_unit accepting new stripes is always at the end of the list.
  80 *
  81 * If write-back cache is enabled for any of the disks in the array, its data
  82 * must be flushed before next io_unit is submitted.
  83 */
  84
  85#define PPL_SPACE_SIZE (128 * 1024)
  86
  87struct ppl_conf {
  88	struct mddev *mddev;
  89
  90	/* array of child logs, one for each raid disk */
  91	struct ppl_log *child_logs;
  92	int count;
  93
  94	int block_size;		/* the logical block size used for data_sector
  95				 * in ppl_header_entry */
  96	u32 signature;		/* raid array identifier */
  97	atomic64_t seq;		/* current log write sequence number */
  98
  99	struct kmem_cache *io_kc;
 100	mempool_t io_pool;
 101	struct bio_set bs;
 102	struct bio_set flush_bs;
 103
 104	/* used only for recovery */
 105	int recovered_entries;
 106	int mismatch_count;
 107
 108	/* stripes to retry if failed to allocate io_unit */
 109	struct list_head no_mem_stripes;
 110	spinlock_t no_mem_stripes_lock;
 111
 112	unsigned short write_hint;
 113};
 114
 115struct ppl_log {
 116	struct ppl_conf *ppl_conf;	/* shared between all log instances */
 117
 118	struct md_rdev *rdev;		/* array member disk associated with
 119					 * this log instance */
 120	struct mutex io_mutex;
 121	struct ppl_io_unit *current_io;	/* current io_unit accepting new data
 122					 * always at the end of io_list */
 123	spinlock_t io_list_lock;
 124	struct list_head io_list;	/* all io_units of this log */
 125
 126	sector_t next_io_sector;
 127	unsigned int entry_space;
 128	bool use_multippl;
 129	bool wb_cache_on;
 130	unsigned long disk_flush_bitmap;
 131};
 132
 133#define PPL_IO_INLINE_BVECS 32
 134
 135struct ppl_io_unit {
 136	struct ppl_log *log;
 137
 138	struct page *header_page;	/* for ppl_header */
 139
 140	unsigned int entries_count;	/* number of entries in ppl_header */
 141	unsigned int pp_size;		/* total size current of partial parity */
 142
 143	u64 seq;			/* sequence number of this log write */
 144	struct list_head log_sibling;	/* log->io_list */
 145
 146	struct list_head stripe_list;	/* stripes added to the io_unit */
 147	atomic_t pending_stripes;	/* how many stripes not written to raid */
 148	atomic_t pending_flushes;	/* how many disk flushes are in progress */
 149
 150	bool submitted;			/* true if write to log started */
 151
 152	/* inline bio and its biovec for submitting the iounit */
 153	struct bio bio;
 154	struct bio_vec biovec[PPL_IO_INLINE_BVECS];
 155};
 156
 157struct dma_async_tx_descriptor *
 158ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
 159		       struct dma_async_tx_descriptor *tx)
 160{
 161	int disks = sh->disks;
 162	struct page **srcs = percpu->scribble;
 163	int count = 0, pd_idx = sh->pd_idx, i;
 164	struct async_submit_ctl submit;
 165
 166	pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
 167
 168	/*
 169	 * Partial parity is the XOR of stripe data chunks that are not changed
 170	 * during the write request. Depending on available data
 171	 * (read-modify-write vs. reconstruct-write case) we calculate it
 172	 * differently.
 173	 */
 174	if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
 175		/*
 176		 * rmw: xor old data and parity from updated disks
 177		 * This is calculated earlier by ops_run_prexor5() so just copy
 178		 * the parity dev page.
 179		 */
 180		srcs[count++] = sh->dev[pd_idx].page;
 181	} else if (sh->reconstruct_state == reconstruct_state_drain_run) {
 182		/* rcw: xor data from all not updated disks */
 183		for (i = disks; i--;) {
 184			struct r5dev *dev = &sh->dev[i];
 185			if (test_bit(R5_UPTODATE, &dev->flags))
 186				srcs[count++] = dev->page;
 187		}
 188	} else {
 189		return tx;
 190	}
 191
 192	init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx,
 193			  NULL, sh, (void *) (srcs + sh->disks + 2));
 194
 195	if (count == 1)
 196		tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE,
 197				  &submit);
 198	else
 199		tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE,
 200			       &submit);
 201
 202	return tx;
 203}
 204
 205static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
 206{
 207	struct kmem_cache *kc = pool_data;
 208	struct ppl_io_unit *io;
 209
 210	io = kmem_cache_alloc(kc, gfp_mask);
 211	if (!io)
 212		return NULL;
 213
 214	io->header_page = alloc_page(gfp_mask);
 215	if (!io->header_page) {
 216		kmem_cache_free(kc, io);
 217		return NULL;
 218	}
 219
 220	return io;
 221}
 222
 223static void ppl_io_pool_free(void *element, void *pool_data)
 224{
 225	struct kmem_cache *kc = pool_data;
 226	struct ppl_io_unit *io = element;
 227
 228	__free_page(io->header_page);
 229	kmem_cache_free(kc, io);
 230}
 231
 232static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
 233					  struct stripe_head *sh)
 234{
 235	struct ppl_conf *ppl_conf = log->ppl_conf;
 236	struct ppl_io_unit *io;
 237	struct ppl_header *pplhdr;
 238	struct page *header_page;
 239
 240	io = mempool_alloc(&ppl_conf->io_pool, GFP_NOWAIT);
 241	if (!io)
 242		return NULL;
 243
 244	header_page = io->header_page;
 245	memset(io, 0, sizeof(*io));
 246	io->header_page = header_page;
 247
 248	io->log = log;
 249	INIT_LIST_HEAD(&io->log_sibling);
 250	INIT_LIST_HEAD(&io->stripe_list);
 251	atomic_set(&io->pending_stripes, 0);
 252	atomic_set(&io->pending_flushes, 0);
 253	bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
 254
 255	pplhdr = page_address(io->header_page);
 256	clear_page(pplhdr);
 257	memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
 258	pplhdr->signature = cpu_to_le32(ppl_conf->signature);
 259
 260	io->seq = atomic64_add_return(1, &ppl_conf->seq);
 261	pplhdr->generation = cpu_to_le64(io->seq);
 262
 263	return io;
 264}
 265
 266static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
 267{
 268	struct ppl_io_unit *io = log->current_io;
 269	struct ppl_header_entry *e = NULL;
 270	struct ppl_header *pplhdr;
 271	int i;
 272	sector_t data_sector = 0;
 273	int data_disks = 0;
 274	struct r5conf *conf = sh->raid_conf;
 275
 276	pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
 277
 278	/* check if current io_unit is full */
 279	if (io && (io->pp_size == log->entry_space ||
 280		   io->entries_count == PPL_HDR_MAX_ENTRIES)) {
 281		pr_debug("%s: add io_unit blocked by seq: %llu\n",
 282			 __func__, io->seq);
 283		io = NULL;
 284	}
 285
 286	/* add a new unit if there is none or the current is full */
 287	if (!io) {
 288		io = ppl_new_iounit(log, sh);
 289		if (!io)
 290			return -ENOMEM;
 291		spin_lock_irq(&log->io_list_lock);
 292		list_add_tail(&io->log_sibling, &log->io_list);
 293		spin_unlock_irq(&log->io_list_lock);
 294
 295		log->current_io = io;
 296	}
 297
 298	for (i = 0; i < sh->disks; i++) {
 299		struct r5dev *dev = &sh->dev[i];
 300
 301		if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) {
 302			if (!data_disks || dev->sector < data_sector)
 303				data_sector = dev->sector;
 304			data_disks++;
 305		}
 306	}
 307	BUG_ON(!data_disks);
 308
 309	pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__,
 310		 io->seq, (unsigned long long)data_sector, data_disks);
 311
 312	pplhdr = page_address(io->header_page);
 313
 314	if (io->entries_count > 0) {
 315		struct ppl_header_entry *last =
 316				&pplhdr->entries[io->entries_count - 1];
 317		struct stripe_head *sh_last = list_last_entry(
 318				&io->stripe_list, struct stripe_head, log_list);
 319		u64 data_sector_last = le64_to_cpu(last->data_sector);
 320		u32 data_size_last = le32_to_cpu(last->data_size);
 321
 322		/*
 323		 * Check if we can append the stripe to the last entry. It must
 324		 * be just after the last logged stripe and write to the same
 325		 * disks. Use bit shift and logarithm to avoid 64-bit division.
 326		 */
 327		if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
 328		    (data_sector >> ilog2(conf->chunk_sectors) ==
 329		     data_sector_last >> ilog2(conf->chunk_sectors)) &&
 330		    ((data_sector - data_sector_last) * data_disks ==
 331		     data_size_last >> 9))
 332			e = last;
 333	}
 334
 335	if (!e) {
 336		e = &pplhdr->entries[io->entries_count++];
 337		e->data_sector = cpu_to_le64(data_sector);
 338		e->parity_disk = cpu_to_le32(sh->pd_idx);
 339		e->checksum = cpu_to_le32(~0);
 340	}
 341
 342	le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT);
 343
 344	/* don't write any PP if full stripe write */
 345	if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
 346		le32_add_cpu(&e->pp_size, PAGE_SIZE);
 347		io->pp_size += PAGE_SIZE;
 348		e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
 349						    page_address(sh->ppl_page),
 350						    PAGE_SIZE));
 351	}
 352
 353	list_add_tail(&sh->log_list, &io->stripe_list);
 354	atomic_inc(&io->pending_stripes);
 355	sh->ppl_io = io;
 356
 357	return 0;
 358}
 359
 360int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh)
 361{
 362	struct ppl_conf *ppl_conf = conf->log_private;
 363	struct ppl_io_unit *io = sh->ppl_io;
 364	struct ppl_log *log;
 365
 366	if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page ||
 367	    !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
 368	    !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) {
 369		clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
 370		return -EAGAIN;
 371	}
 372
 373	log = &ppl_conf->child_logs[sh->pd_idx];
 374
 375	mutex_lock(&log->io_mutex);
 376
 377	if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
 378		mutex_unlock(&log->io_mutex);
 379		return -EAGAIN;
 380	}
 381
 382	set_bit(STRIPE_LOG_TRAPPED, &sh->state);
 383	clear_bit(STRIPE_DELAYED, &sh->state);
 384	atomic_inc(&sh->count);
 385
 386	if (ppl_log_stripe(log, sh)) {
 387		spin_lock_irq(&ppl_conf->no_mem_stripes_lock);
 388		list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes);
 389		spin_unlock_irq(&ppl_conf->no_mem_stripes_lock);
 390	}
 391
 392	mutex_unlock(&log->io_mutex);
 393
 394	return 0;
 395}
 396
 397static void ppl_log_endio(struct bio *bio)
 398{
 399	struct ppl_io_unit *io = bio->bi_private;
 400	struct ppl_log *log = io->log;
 401	struct ppl_conf *ppl_conf = log->ppl_conf;
 402	struct stripe_head *sh, *next;
 403
 404	pr_debug("%s: seq: %llu\n", __func__, io->seq);
 405
 406	if (bio->bi_status)
 407		md_error(ppl_conf->mddev, log->rdev);
 408
 409	list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
 410		list_del_init(&sh->log_list);
 411
 412		set_bit(STRIPE_HANDLE, &sh->state);
 413		raid5_release_stripe(sh);
 414	}
 415}
 416
 417static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
 418{
 419	char b[BDEVNAME_SIZE];
 420
 421	pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
 422		 __func__, io->seq, bio->bi_iter.bi_size,
 423		 (unsigned long long)bio->bi_iter.bi_sector,
 424		 bio_devname(bio, b));
 425
 426	submit_bio(bio);
 427}
 428
 429static void ppl_submit_iounit(struct ppl_io_unit *io)
 430{
 431	struct ppl_log *log = io->log;
 432	struct ppl_conf *ppl_conf = log->ppl_conf;
 433	struct ppl_header *pplhdr = page_address(io->header_page);
 434	struct bio *bio = &io->bio;
 435	struct stripe_head *sh;
 436	int i;
 437
 438	bio->bi_private = io;
 439
 440	if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) {
 441		ppl_log_endio(bio);
 442		return;
 443	}
 444
 445	for (i = 0; i < io->entries_count; i++) {
 446		struct ppl_header_entry *e = &pplhdr->entries[i];
 447
 448		pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n",
 449			 __func__, io->seq, i, le64_to_cpu(e->data_sector),
 450			 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size));
 451
 452		e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >>
 453					     ilog2(ppl_conf->block_size >> 9));
 454		e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum));
 455	}
 456
 457	pplhdr->entries_count = cpu_to_le32(io->entries_count);
 458	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
 459
 460	/* Rewind the buffer if current PPL is larger then remaining space */
 461	if (log->use_multippl &&
 462	    log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
 463	    (PPL_HEADER_SIZE + io->pp_size) >> 9)
 464		log->next_io_sector = log->rdev->ppl.sector;
 465
 466
 467	bio->bi_end_io = ppl_log_endio;
 468	bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
 469	bio_set_dev(bio, log->rdev->bdev);
 470	bio->bi_iter.bi_sector = log->next_io_sector;
 471	bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
 472	bio->bi_write_hint = ppl_conf->write_hint;
 473
 474	pr_debug("%s: log->current_io_sector: %llu\n", __func__,
 475	    (unsigned long long)log->next_io_sector);
 476
 477	if (log->use_multippl)
 478		log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
 479
 480	WARN_ON(log->disk_flush_bitmap != 0);
 481
 482	list_for_each_entry(sh, &io->stripe_list, log_list) {
 483		for (i = 0; i < sh->disks; i++) {
 484			struct r5dev *dev = &sh->dev[i];
 485
 486			if ((ppl_conf->child_logs[i].wb_cache_on) &&
 487			    (test_bit(R5_Wantwrite, &dev->flags))) {
 488				set_bit(i, &log->disk_flush_bitmap);
 489			}
 490		}
 491
 492		/* entries for full stripe writes have no partial parity */
 493		if (test_bit(STRIPE_FULL_WRITE, &sh->state))
 494			continue;
 495
 496		if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
 497			struct bio *prev = bio;
 498
 499			bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
 500					       &ppl_conf->bs);
 501			bio->bi_opf = prev->bi_opf;
 502			bio->bi_write_hint = prev->bi_write_hint;
 503			bio_copy_dev(bio, prev);
 504			bio->bi_iter.bi_sector = bio_end_sector(prev);
 505			bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
 506
 507			bio_chain(bio, prev);
 508			ppl_submit_iounit_bio(io, prev);
 509		}
 510	}
 511
 512	ppl_submit_iounit_bio(io, bio);
 513}
 514
 515static void ppl_submit_current_io(struct ppl_log *log)
 516{
 517	struct ppl_io_unit *io;
 518
 519	spin_lock_irq(&log->io_list_lock);
 520
 521	io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
 522				      log_sibling);
 523	if (io && io->submitted)
 524		io = NULL;
 525
 526	spin_unlock_irq(&log->io_list_lock);
 527
 528	if (io) {
 529		io->submitted = true;
 530
 531		if (io == log->current_io)
 532			log->current_io = NULL;
 533
 534		ppl_submit_iounit(io);
 535	}
 536}
 537
 538void ppl_write_stripe_run(struct r5conf *conf)
 539{
 540	struct ppl_conf *ppl_conf = conf->log_private;
 541	struct ppl_log *log;
 542	int i;
 543
 544	for (i = 0; i < ppl_conf->count; i++) {
 545		log = &ppl_conf->child_logs[i];
 546
 547		mutex_lock(&log->io_mutex);
 548		ppl_submit_current_io(log);
 549		mutex_unlock(&log->io_mutex);
 550	}
 551}
 552
 553static void ppl_io_unit_finished(struct ppl_io_unit *io)
 554{
 555	struct ppl_log *log = io->log;
 556	struct ppl_conf *ppl_conf = log->ppl_conf;
 557	struct r5conf *conf = ppl_conf->mddev->private;
 558	unsigned long flags;
 559
 560	pr_debug("%s: seq: %llu\n", __func__, io->seq);
 561
 562	local_irq_save(flags);
 563
 564	spin_lock(&log->io_list_lock);
 565	list_del(&io->log_sibling);
 566	spin_unlock(&log->io_list_lock);
 567
 568	mempool_free(io, &ppl_conf->io_pool);
 569
 570	spin_lock(&ppl_conf->no_mem_stripes_lock);
 571	if (!list_empty(&ppl_conf->no_mem_stripes)) {
 572		struct stripe_head *sh;
 573
 574		sh = list_first_entry(&ppl_conf->no_mem_stripes,
 575				      struct stripe_head, log_list);
 576		list_del_init(&sh->log_list);
 577		set_bit(STRIPE_HANDLE, &sh->state);
 578		raid5_release_stripe(sh);
 579	}
 580	spin_unlock(&ppl_conf->no_mem_stripes_lock);
 581
 582	local_irq_restore(flags);
 583
 584	wake_up(&conf->wait_for_quiescent);
 585}
 586
 587static void ppl_flush_endio(struct bio *bio)
 588{
 589	struct ppl_io_unit *io = bio->bi_private;
 590	struct ppl_log *log = io->log;
 591	struct ppl_conf *ppl_conf = log->ppl_conf;
 592	struct r5conf *conf = ppl_conf->mddev->private;
 593	char b[BDEVNAME_SIZE];
 594
 595	pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
 596
 597	if (bio->bi_status) {
 598		struct md_rdev *rdev;
 599
 600		rcu_read_lock();
 601		rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
 602		if (rdev)
 603			md_error(rdev->mddev, rdev);
 604		rcu_read_unlock();
 605	}
 606
 607	bio_put(bio);
 608
 609	if (atomic_dec_and_test(&io->pending_flushes)) {
 610		ppl_io_unit_finished(io);
 611		md_wakeup_thread(conf->mddev->thread);
 612	}
 613}
 614
 615static void ppl_do_flush(struct ppl_io_unit *io)
 616{
 617	struct ppl_log *log = io->log;
 618	struct ppl_conf *ppl_conf = log->ppl_conf;
 619	struct r5conf *conf = ppl_conf->mddev->private;
 620	int raid_disks = conf->raid_disks;
 621	int flushed_disks = 0;
 622	int i;
 623
 624	atomic_set(&io->pending_flushes, raid_disks);
 625
 626	for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
 627		struct md_rdev *rdev;
 628		struct block_device *bdev = NULL;
 629
 630		rcu_read_lock();
 631		rdev = rcu_dereference(conf->disks[i].rdev);
 632		if (rdev && !test_bit(Faulty, &rdev->flags))
 633			bdev = rdev->bdev;
 634		rcu_read_unlock();
 635
 636		if (bdev) {
 637			struct bio *bio;
 638			char b[BDEVNAME_SIZE];
 639
 640			bio = bio_alloc_bioset(GFP_NOIO, 0, &ppl_conf->flush_bs);
 641			bio_set_dev(bio, bdev);
 642			bio->bi_private = io;
 643			bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 644			bio->bi_end_io = ppl_flush_endio;
 645
 646			pr_debug("%s: dev: %s\n", __func__,
 647				 bio_devname(bio, b));
 648
 649			submit_bio(bio);
 650			flushed_disks++;
 651		}
 652	}
 653
 654	log->disk_flush_bitmap = 0;
 655
 656	for (i = flushed_disks ; i < raid_disks; i++) {
 657		if (atomic_dec_and_test(&io->pending_flushes))
 658			ppl_io_unit_finished(io);
 659	}
 660}
 661
 662static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
 663					    struct ppl_log *log)
 664{
 665	struct ppl_io_unit *io;
 666
 667	io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
 668				      log_sibling);
 669
 670	return !io || !io->submitted;
 671}
 672
 673void ppl_quiesce(struct r5conf *conf, int quiesce)
 674{
 675	struct ppl_conf *ppl_conf = conf->log_private;
 676	int i;
 677
 678	if (quiesce) {
 679		for (i = 0; i < ppl_conf->count; i++) {
 680			struct ppl_log *log = &ppl_conf->child_logs[i];
 681
 682			spin_lock_irq(&log->io_list_lock);
 683			wait_event_lock_irq(conf->wait_for_quiescent,
 684					    ppl_no_io_unit_submitted(conf, log),
 685					    log->io_list_lock);
 686			spin_unlock_irq(&log->io_list_lock);
 687		}
 688	}
 689}
 690
 691int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
 692{
 693	if (bio->bi_iter.bi_size == 0) {
 694		bio_endio(bio);
 695		return 0;
 696	}
 697	bio->bi_opf &= ~REQ_PREFLUSH;
 698	return -EAGAIN;
 699}
 700
 701void ppl_stripe_write_finished(struct stripe_head *sh)
 702{
 703	struct ppl_io_unit *io;
 704
 705	io = sh->ppl_io;
 706	sh->ppl_io = NULL;
 707
 708	if (io && atomic_dec_and_test(&io->pending_stripes)) {
 709		if (io->log->disk_flush_bitmap)
 710			ppl_do_flush(io);
 711		else
 712			ppl_io_unit_finished(io);
 713	}
 714}
 715
 716static void ppl_xor(int size, struct page *page1, struct page *page2)
 717{
 718	struct async_submit_ctl submit;
 719	struct dma_async_tx_descriptor *tx;
 720	struct page *xor_srcs[] = { page1, page2 };
 721
 722	init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST,
 723			  NULL, NULL, NULL, NULL);
 724	tx = async_xor(page1, xor_srcs, 0, 2, size, &submit);
 725
 726	async_tx_quiesce(&tx);
 727}
 728
 729/*
 730 * PPL recovery strategy: xor partial parity and data from all modified data
 731 * disks within a stripe and write the result as the new stripe parity. If all
 732 * stripe data disks are modified (full stripe write), no partial parity is
 733 * available, so just xor the data disks.
 734 *
 735 * Recovery of a PPL entry shall occur only if all modified data disks are
 736 * available and read from all of them succeeds.
 737 *
 738 * A PPL entry applies to a stripe, partial parity size for an entry is at most
 739 * the size of the chunk. Examples of possible cases for a single entry:
 740 *
 741 * case 0: single data disk write:
 742 *   data0    data1    data2     ppl        parity
 743 * +--------+--------+--------+           +--------------------+
 744 * | ------ | ------ | ------ | +----+    | (no change)        |
 745 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
 746 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp         |
 747 * | ------ | ------ | ------ | +----+    | (no change)        |
 748 * +--------+--------+--------+           +--------------------+
 749 * pp_size = data_size
 750 *
 751 * case 1: more than one data disk write:
 752 *   data0    data1    data2     ppl        parity
 753 * +--------+--------+--------+           +--------------------+
 754 * | ------ | ------ | ------ | +----+    | (no change)        |
 755 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
 756 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp |
 757 * | ------ | ------ | ------ | +----+    | (no change)        |
 758 * +--------+--------+--------+           +--------------------+
 759 * pp_size = data_size / modified_data_disks
 760 *
 761 * case 2: write to all data disks (also full stripe write):
 762 *   data0    data1    data2                parity
 763 * +--------+--------+--------+           +--------------------+
 764 * | ------ | ------ | ------ |           | (no change)        |
 765 * | -data- | -data- | -data- | --------> | xor all data       |
 766 * | ------ | ------ | ------ | --------> | (no change)        |
 767 * | ------ | ------ | ------ |           | (no change)        |
 768 * +--------+--------+--------+           +--------------------+
 769 * pp_size = 0
 770 *
 771 * The following cases are possible only in other implementations. The recovery
 772 * code can handle them, but they are not generated at runtime because they can
 773 * be reduced to cases 0, 1 and 2:
 774 *
 775 * case 3:
 776 *   data0    data1    data2     ppl        parity
 777 * +--------+--------+--------+ +----+    +--------------------+
 778 * | ------ | -data- | -data- | | pp |    | data1 ^ data2 ^ pp |
 779 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp |
 780 * | -data- | -data- | -data- | | -- | -> | xor all data       |
 781 * | -data- | -data- | ------ | | pp |    | data0 ^ data1 ^ pp |
 782 * +--------+--------+--------+ +----+    +--------------------+
 783 * pp_size = chunk_size
 784 *
 785 * case 4:
 786 *   data0    data1    data2     ppl        parity
 787 * +--------+--------+--------+ +----+    +--------------------+
 788 * | ------ | -data- | ------ | | pp |    | data1 ^ pp         |
 789 * | ------ | ------ | ------ | | -- | -> | (no change)        |
 790 * | ------ | ------ | ------ | | -- | -> | (no change)        |
 791 * | -data- | ------ | ------ | | pp |    | data0 ^ pp         |
 792 * +--------+--------+--------+ +----+    +--------------------+
 793 * pp_size = chunk_size
 794 */
 795static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
 796			     sector_t ppl_sector)
 797{
 798	struct ppl_conf *ppl_conf = log->ppl_conf;
 799	struct mddev *mddev = ppl_conf->mddev;
 800	struct r5conf *conf = mddev->private;
 801	int block_size = ppl_conf->block_size;
 802	struct page *page1;
 803	struct page *page2;
 804	sector_t r_sector_first;
 805	sector_t r_sector_last;
 806	int strip_sectors;
 807	int data_disks;
 808	int i;
 809	int ret = 0;
 810	char b[BDEVNAME_SIZE];
 811	unsigned int pp_size = le32_to_cpu(e->pp_size);
 812	unsigned int data_size = le32_to_cpu(e->data_size);
 813
 814	page1 = alloc_page(GFP_KERNEL);
 815	page2 = alloc_page(GFP_KERNEL);
 816
 817	if (!page1 || !page2) {
 818		ret = -ENOMEM;
 819		goto out;
 820	}
 821
 822	r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9);
 823
 824	if ((pp_size >> 9) < conf->chunk_sectors) {
 825		if (pp_size > 0) {
 826			data_disks = data_size / pp_size;
 827			strip_sectors = pp_size >> 9;
 828		} else {
 829			data_disks = conf->raid_disks - conf->max_degraded;
 830			strip_sectors = (data_size >> 9) / data_disks;
 831		}
 832		r_sector_last = r_sector_first +
 833				(data_disks - 1) * conf->chunk_sectors +
 834				strip_sectors;
 835	} else {
 836		data_disks = conf->raid_disks - conf->max_degraded;
 837		strip_sectors = conf->chunk_sectors;
 838		r_sector_last = r_sector_first + (data_size >> 9);
 839	}
 840
 841	pr_debug("%s: array sector first: %llu last: %llu\n", __func__,
 842		 (unsigned long long)r_sector_first,
 843		 (unsigned long long)r_sector_last);
 844
 845	/* if start and end is 4k aligned, use a 4k block */
 846	if (block_size == 512 &&
 847	    (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
 848	    (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
 849		block_size = RAID5_STRIPE_SIZE(conf);
 850
 851	/* iterate through blocks in strip */
 852	for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
 853		bool update_parity = false;
 854		sector_t parity_sector;
 855		struct md_rdev *parity_rdev;
 856		struct stripe_head sh;
 857		int disk;
 858		int indent = 0;
 859
 860		pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i);
 861		indent += 2;
 862
 863		memset(page_address(page1), 0, PAGE_SIZE);
 864
 865		/* iterate through data member disks */
 866		for (disk = 0; disk < data_disks; disk++) {
 867			int dd_idx;
 868			struct md_rdev *rdev;
 869			sector_t sector;
 870			sector_t r_sector = r_sector_first + i +
 871					    (disk * conf->chunk_sectors);
 872
 873			pr_debug("%s:%*s data member disk %d start\n",
 874				 __func__, indent, "", disk);
 875			indent += 2;
 876
 877			if (r_sector >= r_sector_last) {
 878				pr_debug("%s:%*s array sector %llu doesn't need parity update\n",
 879					 __func__, indent, "",
 880					 (unsigned long long)r_sector);
 881				indent -= 2;
 882				continue;
 883			}
 884
 885			update_parity = true;
 886
 887			/* map raid sector to member disk */
 888			sector = raid5_compute_sector(conf, r_sector, 0,
 889						      &dd_idx, NULL);
 890			pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n",
 891				 __func__, indent, "",
 892				 (unsigned long long)r_sector, dd_idx,
 893				 (unsigned long long)sector);
 894
 895			rdev = conf->disks[dd_idx].rdev;
 896			if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
 897				      sector >= rdev->recovery_offset)) {
 898				pr_debug("%s:%*s data member disk %d missing\n",
 899					 __func__, indent, "", dd_idx);
 900				update_parity = false;
 901				break;
 902			}
 903
 904			pr_debug("%s:%*s reading data member disk %s sector %llu\n",
 905				 __func__, indent, "", bdevname(rdev->bdev, b),
 906				 (unsigned long long)sector);
 907			if (!sync_page_io(rdev, sector, block_size, page2,
 908					REQ_OP_READ, 0, false)) {
 909				md_error(mddev, rdev);
 910				pr_debug("%s:%*s read failed!\n", __func__,
 911					 indent, "");
 912				ret = -EIO;
 913				goto out;
 914			}
 915
 916			ppl_xor(block_size, page1, page2);
 917
 918			indent -= 2;
 919		}
 920
 921		if (!update_parity)
 922			continue;
 923
 924		if (pp_size > 0) {
 925			pr_debug("%s:%*s reading pp disk sector %llu\n",
 926				 __func__, indent, "",
 927				 (unsigned long long)(ppl_sector + i));
 928			if (!sync_page_io(log->rdev,
 929					ppl_sector - log->rdev->data_offset + i,
 930					block_size, page2, REQ_OP_READ, 0,
 931					false)) {
 932				pr_debug("%s:%*s read failed!\n", __func__,
 933					 indent, "");
 934				md_error(mddev, log->rdev);
 935				ret = -EIO;
 936				goto out;
 937			}
 938
 939			ppl_xor(block_size, page1, page2);
 940		}
 941
 942		/* map raid sector to parity disk */
 943		parity_sector = raid5_compute_sector(conf, r_sector_first + i,
 944				0, &disk, &sh);
 945		BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk));
 946		parity_rdev = conf->disks[sh.pd_idx].rdev;
 947
 948		BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
 949		pr_debug("%s:%*s write parity at sector %llu, disk %s\n",
 950			 __func__, indent, "",
 951			 (unsigned long long)parity_sector,
 952			 bdevname(parity_rdev->bdev, b));
 953		if (!sync_page_io(parity_rdev, parity_sector, block_size,
 954				page1, REQ_OP_WRITE, 0, false)) {
 955			pr_debug("%s:%*s parity write error!\n", __func__,
 956				 indent, "");
 957			md_error(mddev, parity_rdev);
 958			ret = -EIO;
 959			goto out;
 960		}
 961	}
 962out:
 963	if (page1)
 964		__free_page(page1);
 965	if (page2)
 966		__free_page(page2);
 967	return ret;
 968}
 969
 970static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
 971		       sector_t offset)
 972{
 973	struct ppl_conf *ppl_conf = log->ppl_conf;
 974	struct md_rdev *rdev = log->rdev;
 975	struct mddev *mddev = rdev->mddev;
 976	sector_t ppl_sector = rdev->ppl.sector + offset +
 977			      (PPL_HEADER_SIZE >> 9);
 978	struct page *page;
 979	int i;
 980	int ret = 0;
 981
 982	page = alloc_page(GFP_KERNEL);
 983	if (!page)
 984		return -ENOMEM;
 985
 986	/* iterate through all PPL entries saved */
 987	for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) {
 988		struct ppl_header_entry *e = &pplhdr->entries[i];
 989		u32 pp_size = le32_to_cpu(e->pp_size);
 990		sector_t sector = ppl_sector;
 991		int ppl_entry_sectors = pp_size >> 9;
 992		u32 crc, crc_stored;
 993
 994		pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n",
 995			 __func__, rdev->raid_disk, i,
 996			 (unsigned long long)ppl_sector, pp_size);
 997
 998		crc = ~0;
 999		crc_stored = le32_to_cpu(e->checksum);
1000
1001		/* read parial parity for this entry and calculate its checksum */
1002		while (pp_size) {
1003			int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size;
1004
1005			if (!sync_page_io(rdev, sector - rdev->data_offset,
1006					s, page, REQ_OP_READ, 0, false)) {
1007				md_error(mddev, rdev);
1008				ret = -EIO;
1009				goto out;
1010			}
1011
1012			crc = crc32c_le(crc, page_address(page), s);
1013
1014			pp_size -= s;
1015			sector += s >> 9;
1016		}
1017
1018		crc = ~crc;
1019
1020		if (crc != crc_stored) {
1021			/*
1022			 * Don't recover this entry if the checksum does not
1023			 * match, but keep going and try to recover other
1024			 * entries.
1025			 */
1026			pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n",
1027				 __func__, crc_stored, crc);
1028			ppl_conf->mismatch_count++;
1029		} else {
1030			ret = ppl_recover_entry(log, e, ppl_sector);
1031			if (ret)
1032				goto out;
1033			ppl_conf->recovered_entries++;
1034		}
1035
1036		ppl_sector += ppl_entry_sectors;
1037	}
1038
1039	/* flush the disk cache after recovery if necessary */
1040	ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL);
1041out:
1042	__free_page(page);
1043	return ret;
1044}
1045
1046static int ppl_write_empty_header(struct ppl_log *log)
1047{
1048	struct page *page;
1049	struct ppl_header *pplhdr;
1050	struct md_rdev *rdev = log->rdev;
1051	int ret = 0;
1052
1053	pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__,
1054		 rdev->raid_disk, (unsigned long long)rdev->ppl.sector);
1055
1056	page = alloc_page(GFP_NOIO | __GFP_ZERO);
1057	if (!page)
1058		return -ENOMEM;
1059
1060	pplhdr = page_address(page);
1061	/* zero out PPL space to avoid collision with old PPLs */
1062	blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
1063			    log->rdev->ppl.size, GFP_NOIO, 0);
1064	memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
1065	pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
1066	pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
1067
1068	if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
1069			  PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
1070			  REQ_FUA, 0, false)) {
1071		md_error(rdev->mddev, rdev);
1072		ret = -EIO;
1073	}
1074
1075	__free_page(page);
1076	return ret;
1077}
1078
1079static int ppl_load_distributed(struct ppl_log *log)
1080{
1081	struct ppl_conf *ppl_conf = log->ppl_conf;
1082	struct md_rdev *rdev = log->rdev;
1083	struct mddev *mddev = rdev->mddev;
1084	struct page *page, *page2, *tmp;
1085	struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
1086	u32 crc, crc_stored;
1087	u32 signature;
1088	int ret = 0, i;
1089	sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
1090
1091	pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
1092	/* read PPL headers, find the recent one */
1093	page = alloc_page(GFP_KERNEL);
1094	if (!page)
1095		return -ENOMEM;
1096
1097	page2 = alloc_page(GFP_KERNEL);
1098	if (!page2) {
1099		__free_page(page);
1100		return -ENOMEM;
1101	}
1102
1103	/* searching ppl area for latest ppl */
1104	while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
1105		if (!sync_page_io(rdev,
1106				  rdev->ppl.sector - rdev->data_offset +
1107				  pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
1108				  0, false)) {
1109			md_error(mddev, rdev);
1110			ret = -EIO;
1111			/* if not able to read - don't recover any PPL */
1112			pplhdr = NULL;
1113			break;
1114		}
1115		pplhdr = page_address(page);
1116
1117		/* check header validity */
1118		crc_stored = le32_to_cpu(pplhdr->checksum);
1119		pplhdr->checksum = 0;
1120		crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
1121
1122		if (crc_stored != crc) {
1123			pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
1124				 __func__, crc_stored, crc,
1125				 (unsigned long long)pplhdr_offset);
1126			pplhdr = prev_pplhdr;
1127			pplhdr_offset = prev_pplhdr_offset;
1128			break;
1129		}
1130
1131		signature = le32_to_cpu(pplhdr->signature);
1132
1133		if (mddev->external) {
1134			/*
1135			 * For external metadata the header signature is set and
1136			 * validated in userspace.
1137			 */
1138			ppl_conf->signature = signature;
1139		} else if (ppl_conf->signature != signature) {
1140			pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
1141				 __func__, signature, ppl_conf->signature,
1142				 (unsigned long long)pplhdr_offset);
1143			pplhdr = prev_pplhdr;
1144			pplhdr_offset = prev_pplhdr_offset;
1145			break;
1146		}
1147
1148		if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
1149		    le64_to_cpu(pplhdr->generation)) {
1150			/* previous was newest */
1151			pplhdr = prev_pplhdr;
1152			pplhdr_offset = prev_pplhdr_offset;
1153			break;
1154		}
1155
1156		prev_pplhdr_offset = pplhdr_offset;
1157		prev_pplhdr = pplhdr;
1158
1159		tmp = page;
1160		page = page2;
1161		page2 = tmp;
1162
1163		/* calculate next potential ppl offset */
1164		for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
1165			pplhdr_offset +=
1166			    le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
1167		pplhdr_offset += PPL_HEADER_SIZE >> 9;
1168	}
1169
1170	/* no valid ppl found */
1171	if (!pplhdr)
1172		ppl_conf->mismatch_count++;
1173	else
1174		pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
1175		    __func__, (unsigned long long)pplhdr_offset,
1176		    le64_to_cpu(pplhdr->generation));
1177
1178	/* attempt to recover from log if we are starting a dirty array */
1179	if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
1180		ret = ppl_recover(log, pplhdr, pplhdr_offset);
1181
1182	/* write empty header if we are starting the array */
1183	if (!ret && !mddev->pers)
1184		ret = ppl_write_empty_header(log);
1185
1186	__free_page(page);
1187	__free_page(page2);
1188
1189	pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1190		 __func__, ret, ppl_conf->mismatch_count,
1191		 ppl_conf->recovered_entries);
1192	return ret;
1193}
1194
1195static int ppl_load(struct ppl_conf *ppl_conf)
1196{
1197	int ret = 0;
1198	u32 signature = 0;
1199	bool signature_set = false;
1200	int i;
1201
1202	for (i = 0; i < ppl_conf->count; i++) {
1203		struct ppl_log *log = &ppl_conf->child_logs[i];
1204
1205		/* skip missing drive */
1206		if (!log->rdev)
1207			continue;
1208
1209		ret = ppl_load_distributed(log);
1210		if (ret)
1211			break;
1212
1213		/*
1214		 * For external metadata we can't check if the signature is
1215		 * correct on a single drive, but we can check if it is the same
1216		 * on all drives.
1217		 */
1218		if (ppl_conf->mddev->external) {
1219			if (!signature_set) {
1220				signature = ppl_conf->signature;
1221				signature_set = true;
1222			} else if (signature != ppl_conf->signature) {
1223				pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n",
1224					mdname(ppl_conf->mddev));
1225				ret = -EINVAL;
1226				break;
1227			}
1228		}
1229	}
1230
1231	pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
1232		 __func__, ret, ppl_conf->mismatch_count,
1233		 ppl_conf->recovered_entries);
1234	return ret;
1235}
1236
1237static void __ppl_exit_log(struct ppl_conf *ppl_conf)
1238{
1239	clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1240	clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
1241
1242	kfree(ppl_conf->child_logs);
1243
1244	bioset_exit(&ppl_conf->bs);
1245	bioset_exit(&ppl_conf->flush_bs);
1246	mempool_exit(&ppl_conf->io_pool);
1247	kmem_cache_destroy(ppl_conf->io_kc);
1248
1249	kfree(ppl_conf);
1250}
1251
1252void ppl_exit_log(struct r5conf *conf)
1253{
1254	struct ppl_conf *ppl_conf = conf->log_private;
1255
1256	if (ppl_conf) {
1257		__ppl_exit_log(ppl_conf);
1258		conf->log_private = NULL;
1259	}
1260}
1261
1262static int ppl_validate_rdev(struct md_rdev *rdev)
1263{
1264	char b[BDEVNAME_SIZE];
1265	int ppl_data_sectors;
1266	int ppl_size_new;
1267
1268	/*
1269	 * The configured PPL size must be enough to store
1270	 * the header and (at the very least) partial parity
1271	 * for one stripe. Round it down to ensure the data
1272	 * space is cleanly divisible by stripe size.
1273	 */
1274	ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
1275
1276	if (ppl_data_sectors > 0)
1277		ppl_data_sectors = rounddown(ppl_data_sectors,
1278				RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
1279
1280	if (ppl_data_sectors <= 0) {
1281		pr_warn("md/raid:%s: PPL space too small on %s\n",
1282			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1283		return -ENOSPC;
1284	}
1285
1286	ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9);
1287
1288	if ((rdev->ppl.sector < rdev->data_offset &&
1289	     rdev->ppl.sector + ppl_size_new > rdev->data_offset) ||
1290	    (rdev->ppl.sector >= rdev->data_offset &&
1291	     rdev->data_offset + rdev->sectors > rdev->ppl.sector)) {
1292		pr_warn("md/raid:%s: PPL space overlaps with data on %s\n",
1293			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1294		return -EINVAL;
1295	}
1296
1297	if (!rdev->mddev->external &&
1298	    ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) ||
1299	     (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) {
1300		pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n",
1301			mdname(rdev->mddev), bdevname(rdev->bdev, b));
1302		return -EINVAL;
1303	}
1304
1305	rdev->ppl.size = ppl_size_new;
1306
1307	return 0;
1308}
1309
1310static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
1311{
1312	struct request_queue *q;
1313
1314	if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
1315				      PPL_HEADER_SIZE) * 2) {
1316		log->use_multippl = true;
1317		set_bit(MD_HAS_MULTIPLE_PPLS,
1318			&log->ppl_conf->mddev->flags);
1319		log->entry_space = PPL_SPACE_SIZE;
1320	} else {
1321		log->use_multippl = false;
1322		log->entry_space = (log->rdev->ppl.size << 9) -
1323				   PPL_HEADER_SIZE;
1324	}
1325	log->next_io_sector = rdev->ppl.sector;
1326
1327	q = bdev_get_queue(rdev->bdev);
1328	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
1329		log->wb_cache_on = true;
1330}
1331
1332int ppl_init_log(struct r5conf *conf)
1333{
1334	struct ppl_conf *ppl_conf;
1335	struct mddev *mddev = conf->mddev;
1336	int ret = 0;
1337	int max_disks;
1338	int i;
1339
1340	pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
1341		 mdname(conf->mddev));
1342
1343	if (PAGE_SIZE != 4096)
1344		return -EINVAL;
1345
1346	if (mddev->level != 5) {
1347		pr_warn("md/raid:%s PPL is not compatible with raid level %d\n",
1348			mdname(mddev), mddev->level);
1349		return -EINVAL;
1350	}
1351
1352	if (mddev->bitmap_info.file || mddev->bitmap_info.offset) {
1353		pr_warn("md/raid:%s PPL is not compatible with bitmap\n",
1354			mdname(mddev));
1355		return -EINVAL;
1356	}
1357
1358	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1359		pr_warn("md/raid:%s PPL is not compatible with journal\n",
1360			mdname(mddev));
1361		return -EINVAL;
1362	}
1363
1364	max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) *
1365		BITS_PER_BYTE;
1366	if (conf->raid_disks > max_disks) {
1367		pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
1368			mdname(mddev), max_disks);
1369		return -EINVAL;
1370	}
1371
1372	ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
1373	if (!ppl_conf)
1374		return -ENOMEM;
1375
1376	ppl_conf->mddev = mddev;
1377
1378	ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0);
1379	if (!ppl_conf->io_kc) {
1380		ret = -ENOMEM;
1381		goto err;
1382	}
1383
1384	ret = mempool_init(&ppl_conf->io_pool, conf->raid_disks, ppl_io_pool_alloc,
1385			   ppl_io_pool_free, ppl_conf->io_kc);
1386	if (ret)
1387		goto err;
1388
1389	ret = bioset_init(&ppl_conf->bs, conf->raid_disks, 0, BIOSET_NEED_BVECS);
1390	if (ret)
1391		goto err;
1392
1393	ret = bioset_init(&ppl_conf->flush_bs, conf->raid_disks, 0, 0);
1394	if (ret)
1395		goto err;
1396
1397	ppl_conf->count = conf->raid_disks;
1398	ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
1399				       GFP_KERNEL);
1400	if (!ppl_conf->child_logs) {
1401		ret = -ENOMEM;
1402		goto err;
1403	}
1404
1405	atomic64_set(&ppl_conf->seq, 0);
1406	INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1407	spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1408	ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
1409
1410	if (!mddev->external) {
1411		ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
1412		ppl_conf->block_size = 512;
1413	} else {
1414		ppl_conf->block_size = queue_logical_block_size(mddev->queue);
1415	}
1416
1417	for (i = 0; i < ppl_conf->count; i++) {
1418		struct ppl_log *log = &ppl_conf->child_logs[i];
1419		struct md_rdev *rdev = conf->disks[i].rdev;
1420
1421		mutex_init(&log->io_mutex);
1422		spin_lock_init(&log->io_list_lock);
1423		INIT_LIST_HEAD(&log->io_list);
1424
1425		log->ppl_conf = ppl_conf;
1426		log->rdev = rdev;
1427
1428		if (rdev) {
1429			ret = ppl_validate_rdev(rdev);
1430			if (ret)
1431				goto err;
1432
1433			ppl_init_child_log(log, rdev);
1434		}
1435	}
1436
1437	/* load and possibly recover the logs from the member disks */
1438	ret = ppl_load(ppl_conf);
1439
1440	if (ret) {
1441		goto err;
1442	} else if (!mddev->pers && mddev->recovery_cp == 0 &&
1443		   ppl_conf->recovered_entries > 0 &&
1444		   ppl_conf->mismatch_count == 0) {
1445		/*
1446		 * If we are starting a dirty array and the recovery succeeds
1447		 * without any issues, set the array as clean.
1448		 */
1449		mddev->recovery_cp = MaxSector;
1450		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
1451	} else if (mddev->pers && ppl_conf->mismatch_count > 0) {
1452		/* no mismatch allowed when enabling PPL for a running array */
1453		ret = -EINVAL;
1454		goto err;
1455	}
1456
1457	conf->log_private = ppl_conf;
1458	set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
1459
1460	return 0;
1461err:
1462	__ppl_exit_log(ppl_conf);
1463	return ret;
1464}
1465
1466int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1467{
1468	struct ppl_conf *ppl_conf = conf->log_private;
1469	struct ppl_log *log;
1470	int ret = 0;
1471	char b[BDEVNAME_SIZE];
1472
1473	if (!rdev)
1474		return -EINVAL;
1475
1476	pr_debug("%s: disk: %d operation: %s dev: %s\n",
1477		 __func__, rdev->raid_disk, add ? "add" : "remove",
1478		 bdevname(rdev->bdev, b));
1479
1480	if (rdev->raid_disk < 0)
1481		return 0;
1482
1483	if (rdev->raid_disk >= ppl_conf->count)
1484		return -ENODEV;
1485
1486	log = &ppl_conf->child_logs[rdev->raid_disk];
1487
1488	mutex_lock(&log->io_mutex);
1489	if (add) {
1490		ret = ppl_validate_rdev(rdev);
1491		if (!ret) {
1492			log->rdev = rdev;
1493			ret = ppl_write_empty_header(log);
1494			ppl_init_child_log(log, rdev);
1495		}
1496	} else {
1497		log->rdev = NULL;
1498	}
1499	mutex_unlock(&log->io_mutex);
1500
1501	return ret;
1502}
1503
1504static ssize_t
1505ppl_write_hint_show(struct mddev *mddev, char *buf)
1506{
1507	size_t ret = 0;
1508	struct r5conf *conf;
1509	struct ppl_conf *ppl_conf = NULL;
1510
1511	spin_lock(&mddev->lock);
1512	conf = mddev->private;
1513	if (conf && raid5_has_ppl(conf))
1514		ppl_conf = conf->log_private;
1515	ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1516	spin_unlock(&mddev->lock);
1517
1518	return ret;
1519}
1520
1521static ssize_t
1522ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1523{
1524	struct r5conf *conf;
1525	struct ppl_conf *ppl_conf;
1526	int err = 0;
1527	unsigned short new;
1528
1529	if (len >= PAGE_SIZE)
1530		return -EINVAL;
1531	if (kstrtou16(page, 10, &new))
1532		return -EINVAL;
1533
1534	err = mddev_lock(mddev);
1535	if (err)
1536		return err;
1537
1538	conf = mddev->private;
1539	if (!conf) {
1540		err = -ENODEV;
1541	} else if (raid5_has_ppl(conf)) {
1542		ppl_conf = conf->log_private;
1543		if (!ppl_conf)
1544			err = -EINVAL;
1545		else
1546			ppl_conf->write_hint = new;
1547	} else {
1548		err = -EINVAL;
1549	}
1550
1551	mddev_unlock(mddev);
1552
1553	return err ?: len;
1554}
1555
1556struct md_sysfs_entry
1557ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1558			ppl_write_hint_show,
1559			ppl_write_hint_store);