Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bio-record.h"
  10
  11#include <linux/compiler.h>
  12#include <linux/module.h>
  13#include <linux/device-mapper.h>
  14#include <linux/dm-io.h>
  15#include <linux/vmalloc.h>
  16#include <linux/sort.h>
  17#include <linux/rbtree.h>
  18#include <linux/delay.h>
  19#include <linux/random.h>
  20#include <linux/reboot.h>
  21#include <crypto/hash.h>
  22#include <crypto/skcipher.h>
  23#include <linux/async_tx.h>
  24#include <linux/dm-bufio.h>
  25
  26#include "dm-audit.h"
  27
  28#define DM_MSG_PREFIX "integrity"
  29
  30#define DEFAULT_INTERLEAVE_SECTORS	32768
  31#define DEFAULT_JOURNAL_SIZE_FACTOR	7
  32#define DEFAULT_SECTORS_PER_BITMAP_BIT	32768
  33#define DEFAULT_BUFFER_SECTORS		128
  34#define DEFAULT_JOURNAL_WATERMARK	50
  35#define DEFAULT_SYNC_MSEC		10000
  36#define DEFAULT_MAX_JOURNAL_SECTORS	131072
  37#define MIN_LOG2_INTERLEAVE_SECTORS	3
  38#define MAX_LOG2_INTERLEAVE_SECTORS	31
  39#define METADATA_WORKQUEUE_MAX_ACTIVE	16
  40#define RECALC_SECTORS			32768
  41#define RECALC_WRITE_SUPER		16
  42#define BITMAP_BLOCK_SIZE		4096	/* don't change it */
  43#define BITMAP_FLUSH_INTERVAL		(10 * HZ)
  44#define DISCARD_FILLER			0xf6
  45#define SALT_SIZE			16
  46
  47/*
  48 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  49 * so it should not be enabled in the official kernel
  50 */
  51//#define DEBUG_PRINT
  52//#define INTERNAL_VERIFY
  53
  54/*
  55 * On disk structures
  56 */
  57
  58#define SB_MAGIC			"integrt"
  59#define SB_VERSION_1			1
  60#define SB_VERSION_2			2
  61#define SB_VERSION_3			3
  62#define SB_VERSION_4			4
  63#define SB_VERSION_5			5
  64#define SB_SECTORS			8
  65#define MAX_SECTORS_PER_BLOCK		8
  66
  67struct superblock {
  68	__u8 magic[8];
  69	__u8 version;
  70	__u8 log2_interleave_sectors;
  71	__le16 integrity_tag_size;
  72	__le32 journal_sections;
  73	__le64 provided_data_sectors;	/* userspace uses this value */
  74	__le32 flags;
  75	__u8 log2_sectors_per_block;
  76	__u8 log2_blocks_per_bitmap_bit;
  77	__u8 pad[2];
  78	__le64 recalc_sector;
  79	__u8 pad2[8];
  80	__u8 salt[SALT_SIZE];
  81};
  82
  83#define SB_FLAG_HAVE_JOURNAL_MAC	0x1
  84#define SB_FLAG_RECALCULATING		0x2
  85#define SB_FLAG_DIRTY_BITMAP		0x4
  86#define SB_FLAG_FIXED_PADDING		0x8
  87#define SB_FLAG_FIXED_HMAC		0x10
  88
  89#define	JOURNAL_ENTRY_ROUNDUP		8
  90
  91typedef __le64 commit_id_t;
  92#define JOURNAL_MAC_PER_SECTOR		8
  93
  94struct journal_entry {
  95	union {
  96		struct {
  97			__le32 sector_lo;
  98			__le32 sector_hi;
  99		} s;
 100		__le64 sector;
 101	} u;
 102	commit_id_t last_bytes[];
 103	/* __u8 tag[0]; */
 104};
 105
 106#define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
 107
 108#if BITS_PER_LONG == 64
 109#define journal_entry_set_sector(je, x)		do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
 110#else
 111#define journal_entry_set_sector(je, x)		do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
 112#endif
 113#define journal_entry_get_sector(je)		le64_to_cpu((je)->u.sector)
 114#define journal_entry_is_unused(je)		((je)->u.s.sector_hi == cpu_to_le32(-1))
 115#define journal_entry_set_unused(je)		do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 116#define journal_entry_is_inprogress(je)		((je)->u.s.sector_hi == cpu_to_le32(-2))
 117#define journal_entry_set_inprogress(je)	do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 118
 119#define JOURNAL_BLOCK_SECTORS		8
 120#define JOURNAL_SECTOR_DATA		((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 121#define JOURNAL_MAC_SIZE		(JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 122
 123struct journal_sector {
 124	struct_group(sectors,
 125		__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 126		__u8 mac[JOURNAL_MAC_PER_SECTOR];
 127	);
 128	commit_id_t commit_id;
 129};
 130
 131#define MAX_TAG_SIZE			(JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 132
 133#define METADATA_PADDING_SECTORS	8
 134
 135#define N_COMMIT_IDS			4
 136
 137static unsigned char prev_commit_seq(unsigned char seq)
 138{
 139	return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 140}
 141
 142static unsigned char next_commit_seq(unsigned char seq)
 143{
 144	return (seq + 1) % N_COMMIT_IDS;
 145}
 146
 147/*
 148 * In-memory structures
 149 */
 150
 151struct journal_node {
 152	struct rb_node node;
 153	sector_t sector;
 154};
 155
 156struct alg_spec {
 157	char *alg_string;
 158	char *key_string;
 159	__u8 *key;
 160	unsigned key_size;
 161};
 162
 163struct dm_integrity_c {
 164	struct dm_dev *dev;
 165	struct dm_dev *meta_dev;
 166	unsigned tag_size;
 167	__s8 log2_tag_size;
 168	sector_t start;
 169	mempool_t journal_io_mempool;
 170	struct dm_io_client *io;
 171	struct dm_bufio_client *bufio;
 172	struct workqueue_struct *metadata_wq;
 173	struct superblock *sb;
 174	unsigned journal_pages;
 175	unsigned n_bitmap_blocks;
 176
 177	struct page_list *journal;
 178	struct page_list *journal_io;
 179	struct page_list *journal_xor;
 180	struct page_list *recalc_bitmap;
 181	struct page_list *may_write_bitmap;
 182	struct bitmap_block_status *bbs;
 183	unsigned bitmap_flush_interval;
 184	int synchronous_mode;
 185	struct bio_list synchronous_bios;
 186	struct delayed_work bitmap_flush_work;
 187
 188	struct crypto_skcipher *journal_crypt;
 189	struct scatterlist **journal_scatterlist;
 190	struct scatterlist **journal_io_scatterlist;
 191	struct skcipher_request **sk_requests;
 192
 193	struct crypto_shash *journal_mac;
 194
 195	struct journal_node *journal_tree;
 196	struct rb_root journal_tree_root;
 197
 198	sector_t provided_data_sectors;
 199
 200	unsigned short journal_entry_size;
 201	unsigned char journal_entries_per_sector;
 202	unsigned char journal_section_entries;
 203	unsigned short journal_section_sectors;
 204	unsigned journal_sections;
 205	unsigned journal_entries;
 206	sector_t data_device_sectors;
 207	sector_t meta_device_sectors;
 208	unsigned initial_sectors;
 209	unsigned metadata_run;
 210	__s8 log2_metadata_run;
 211	__u8 log2_buffer_sectors;
 212	__u8 sectors_per_block;
 213	__u8 log2_blocks_per_bitmap_bit;
 214
 215	unsigned char mode;
 216
 217	int failed;
 218
 219	struct crypto_shash *internal_hash;
 220
 221	struct dm_target *ti;
 222
 223	/* these variables are locked with endio_wait.lock */
 224	struct rb_root in_progress;
 225	struct list_head wait_list;
 226	wait_queue_head_t endio_wait;
 227	struct workqueue_struct *wait_wq;
 228	struct workqueue_struct *offload_wq;
 229
 230	unsigned char commit_seq;
 231	commit_id_t commit_ids[N_COMMIT_IDS];
 232
 233	unsigned committed_section;
 234	unsigned n_committed_sections;
 235
 236	unsigned uncommitted_section;
 237	unsigned n_uncommitted_sections;
 238
 239	unsigned free_section;
 240	unsigned char free_section_entry;
 241	unsigned free_sectors;
 242
 243	unsigned free_sectors_threshold;
 244
 245	struct workqueue_struct *commit_wq;
 246	struct work_struct commit_work;
 247
 248	struct workqueue_struct *writer_wq;
 249	struct work_struct writer_work;
 250
 251	struct workqueue_struct *recalc_wq;
 252	struct work_struct recalc_work;
 253	u8 *recalc_buffer;
 254	u8 *recalc_tags;
 255
 256	struct bio_list flush_bio_list;
 257
 258	unsigned long autocommit_jiffies;
 259	struct timer_list autocommit_timer;
 260	unsigned autocommit_msec;
 261
 262	wait_queue_head_t copy_to_journal_wait;
 263
 264	struct completion crypto_backoff;
 265
 266	bool wrote_to_journal;
 267	bool journal_uptodate;
 268	bool just_formatted;
 269	bool recalculate_flag;
 270	bool reset_recalculate_flag;
 271	bool discard;
 272	bool fix_padding;
 273	bool fix_hmac;
 274	bool legacy_recalculate;
 275
 276	struct alg_spec internal_hash_alg;
 277	struct alg_spec journal_crypt_alg;
 278	struct alg_spec journal_mac_alg;
 279
 280	atomic64_t number_of_mismatches;
 281
 282	struct notifier_block reboot_notifier;
 283};
 284
 285struct dm_integrity_range {
 286	sector_t logical_sector;
 287	sector_t n_sectors;
 288	bool waiting;
 289	union {
 290		struct rb_node node;
 291		struct {
 292			struct task_struct *task;
 293			struct list_head wait_entry;
 294		};
 295	};
 296};
 297
 298struct dm_integrity_io {
 299	struct work_struct work;
 300
 301	struct dm_integrity_c *ic;
 302	enum req_op op;
 303	bool fua;
 304
 305	struct dm_integrity_range range;
 306
 307	sector_t metadata_block;
 308	unsigned metadata_offset;
 309
 310	atomic_t in_flight;
 311	blk_status_t bi_status;
 312
 313	struct completion *completion;
 314
 315	struct dm_bio_details bio_details;
 316};
 317
 318struct journal_completion {
 319	struct dm_integrity_c *ic;
 320	atomic_t in_flight;
 321	struct completion comp;
 322};
 323
 324struct journal_io {
 325	struct dm_integrity_range range;
 326	struct journal_completion *comp;
 327};
 328
 329struct bitmap_block_status {
 330	struct work_struct work;
 331	struct dm_integrity_c *ic;
 332	unsigned idx;
 333	unsigned long *bitmap;
 334	struct bio_list bio_queue;
 335	spinlock_t bio_queue_lock;
 336
 337};
 338
 339static struct kmem_cache *journal_io_cache;
 340
 341#define JOURNAL_IO_MEMPOOL	32
 342
 343#ifdef DEBUG_PRINT
 344#define DEBUG_print(x, ...)	printk(KERN_DEBUG x, ##__VA_ARGS__)
 345static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 346{
 347	va_list args;
 348	va_start(args, msg);
 349	vprintk(msg, args);
 350	va_end(args);
 351	if (len)
 352		pr_cont(":");
 353	while (len) {
 354		pr_cont(" %02x", *bytes);
 355		bytes++;
 356		len--;
 357	}
 358	pr_cont("\n");
 359}
 360#define DEBUG_bytes(bytes, len, msg, ...)	__DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 361#else
 362#define DEBUG_print(x, ...)			do { } while (0)
 363#define DEBUG_bytes(bytes, len, msg, ...)	do { } while (0)
 364#endif
 365
 366static void dm_integrity_prepare(struct request *rq)
 367{
 368}
 369
 370static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
 371{
 372}
 373
 374/*
 375 * DM Integrity profile, protection is performed layer above (dm-crypt)
 376 */
 377static const struct blk_integrity_profile dm_integrity_profile = {
 378	.name			= "DM-DIF-EXT-TAG",
 379	.generate_fn		= NULL,
 380	.verify_fn		= NULL,
 381	.prepare_fn		= dm_integrity_prepare,
 382	.complete_fn		= dm_integrity_complete,
 383};
 384
 385static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 386static void integrity_bio_wait(struct work_struct *w);
 387static void dm_integrity_dtr(struct dm_target *ti);
 388
 389static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 390{
 391	if (err == -EILSEQ)
 392		atomic64_inc(&ic->number_of_mismatches);
 393	if (!cmpxchg(&ic->failed, 0, err))
 394		DMERR("Error on %s: %d", msg, err);
 395}
 396
 397static int dm_integrity_failed(struct dm_integrity_c *ic)
 398{
 399	return READ_ONCE(ic->failed);
 400}
 401
 402static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
 403{
 404	if (ic->legacy_recalculate)
 405		return false;
 406	if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
 407	    ic->internal_hash_alg.key || ic->journal_mac_alg.key :
 408	    ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
 409		return true;
 410	return false;
 411}
 412
 413static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 414					  unsigned j, unsigned char seq)
 415{
 416	/*
 417	 * Xor the number with section and sector, so that if a piece of
 418	 * journal is written at wrong place, it is detected.
 419	 */
 420	return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 421}
 422
 423static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 424				sector_t *area, sector_t *offset)
 425{
 426	if (!ic->meta_dev) {
 427		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 428		*area = data_sector >> log2_interleave_sectors;
 429		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 430	} else {
 431		*area = 0;
 432		*offset = data_sector;
 433	}
 434}
 435
 436#define sector_to_block(ic, n)						\
 437do {									\
 438	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
 439	(n) >>= (ic)->sb->log2_sectors_per_block;			\
 440} while (0)
 441
 442static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 443					    sector_t offset, unsigned *metadata_offset)
 444{
 445	__u64 ms;
 446	unsigned mo;
 447
 448	ms = area << ic->sb->log2_interleave_sectors;
 449	if (likely(ic->log2_metadata_run >= 0))
 450		ms += area << ic->log2_metadata_run;
 451	else
 452		ms += area * ic->metadata_run;
 453	ms >>= ic->log2_buffer_sectors;
 454
 455	sector_to_block(ic, offset);
 456
 457	if (likely(ic->log2_tag_size >= 0)) {
 458		ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 459		mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 460	} else {
 461		ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 462		mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 463	}
 464	*metadata_offset = mo;
 465	return ms;
 466}
 467
 468static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 469{
 470	sector_t result;
 471
 472	if (ic->meta_dev)
 473		return offset;
 474
 475	result = area << ic->sb->log2_interleave_sectors;
 476	if (likely(ic->log2_metadata_run >= 0))
 477		result += (area + 1) << ic->log2_metadata_run;
 478	else
 479		result += (area + 1) * ic->metadata_run;
 480
 481	result += (sector_t)ic->initial_sectors + offset;
 482	result += ic->start;
 483
 484	return result;
 485}
 486
 487static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 488{
 489	if (unlikely(*sec_ptr >= ic->journal_sections))
 490		*sec_ptr -= ic->journal_sections;
 491}
 492
 493static void sb_set_version(struct dm_integrity_c *ic)
 494{
 495	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
 496		ic->sb->version = SB_VERSION_5;
 497	else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
 498		ic->sb->version = SB_VERSION_4;
 499	else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
 500		ic->sb->version = SB_VERSION_3;
 501	else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 502		ic->sb->version = SB_VERSION_2;
 503	else
 504		ic->sb->version = SB_VERSION_1;
 505}
 506
 507static int sb_mac(struct dm_integrity_c *ic, bool wr)
 508{
 509	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 510	int r;
 511	unsigned size = crypto_shash_digestsize(ic->journal_mac);
 512
 513	if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
 514		dm_integrity_io_error(ic, "digest is too long", -EINVAL);
 515		return -EINVAL;
 516	}
 517
 518	desc->tfm = ic->journal_mac;
 519
 520	r = crypto_shash_init(desc);
 521	if (unlikely(r < 0)) {
 522		dm_integrity_io_error(ic, "crypto_shash_init", r);
 523		return r;
 524	}
 525
 526	r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
 527	if (unlikely(r < 0)) {
 528		dm_integrity_io_error(ic, "crypto_shash_update", r);
 529		return r;
 530	}
 531
 532	if (likely(wr)) {
 533		r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
 534		if (unlikely(r < 0)) {
 535			dm_integrity_io_error(ic, "crypto_shash_final", r);
 536			return r;
 537		}
 538	} else {
 539		__u8 result[HASH_MAX_DIGESTSIZE];
 540		r = crypto_shash_final(desc, result);
 541		if (unlikely(r < 0)) {
 542			dm_integrity_io_error(ic, "crypto_shash_final", r);
 543			return r;
 544		}
 545		if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
 546			dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
 547			dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
 548			return -EILSEQ;
 549		}
 550	}
 551
 552	return 0;
 553}
 554
 555static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
 556{
 557	struct dm_io_request io_req;
 558	struct dm_io_region io_loc;
 559	const enum req_op op = opf & REQ_OP_MASK;
 560	int r;
 561
 562	io_req.bi_opf = opf;
 
 563	io_req.mem.type = DM_IO_KMEM;
 564	io_req.mem.ptr.addr = ic->sb;
 565	io_req.notify.fn = NULL;
 566	io_req.client = ic->io;
 567	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 568	io_loc.sector = ic->start;
 569	io_loc.count = SB_SECTORS;
 570
 571	if (op == REQ_OP_WRITE) {
 572		sb_set_version(ic);
 573		if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
 574			r = sb_mac(ic, true);
 575			if (unlikely(r))
 576				return r;
 577		}
 578	}
 579
 580	r = dm_io(&io_req, 1, &io_loc, NULL);
 581	if (unlikely(r))
 582		return r;
 583
 584	if (op == REQ_OP_READ) {
 585		if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
 586			r = sb_mac(ic, false);
 587			if (unlikely(r))
 588				return r;
 589		}
 590	}
 591
 592	return 0;
 593}
 594
 595#define BITMAP_OP_TEST_ALL_SET		0
 596#define BITMAP_OP_TEST_ALL_CLEAR	1
 597#define BITMAP_OP_SET			2
 598#define BITMAP_OP_CLEAR			3
 599
 600static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
 601			    sector_t sector, sector_t n_sectors, int mode)
 602{
 603	unsigned long bit, end_bit, this_end_bit, page, end_page;
 604	unsigned long *data;
 605
 606	if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
 607		DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
 608			sector,
 609			n_sectors,
 610			ic->sb->log2_sectors_per_block,
 611			ic->log2_blocks_per_bitmap_bit,
 612			mode);
 613		BUG();
 614	}
 615
 616	if (unlikely(!n_sectors))
 617		return true;
 618
 619	bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 620	end_bit = (sector + n_sectors - 1) >>
 621		(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 622
 623	page = bit / (PAGE_SIZE * 8);
 624	bit %= PAGE_SIZE * 8;
 625
 626	end_page = end_bit / (PAGE_SIZE * 8);
 627	end_bit %= PAGE_SIZE * 8;
 628
 629repeat:
 630	if (page < end_page) {
 631		this_end_bit = PAGE_SIZE * 8 - 1;
 632	} else {
 633		this_end_bit = end_bit;
 634	}
 635
 636	data = lowmem_page_address(bitmap[page].page);
 637
 638	if (mode == BITMAP_OP_TEST_ALL_SET) {
 639		while (bit <= this_end_bit) {
 640			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 641				do {
 642					if (data[bit / BITS_PER_LONG] != -1)
 643						return false;
 644					bit += BITS_PER_LONG;
 645				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 646				continue;
 647			}
 648			if (!test_bit(bit, data))
 649				return false;
 650			bit++;
 651		}
 652	} else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
 653		while (bit <= this_end_bit) {
 654			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 655				do {
 656					if (data[bit / BITS_PER_LONG] != 0)
 657						return false;
 658					bit += BITS_PER_LONG;
 659				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 660				continue;
 661			}
 662			if (test_bit(bit, data))
 663				return false;
 664			bit++;
 665		}
 666	} else if (mode == BITMAP_OP_SET) {
 667		while (bit <= this_end_bit) {
 668			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 669				do {
 670					data[bit / BITS_PER_LONG] = -1;
 671					bit += BITS_PER_LONG;
 672				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 673				continue;
 674			}
 675			__set_bit(bit, data);
 676			bit++;
 677		}
 678	} else if (mode == BITMAP_OP_CLEAR) {
 679		if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
 680			clear_page(data);
 681		else while (bit <= this_end_bit) {
 682			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 683				do {
 684					data[bit / BITS_PER_LONG] = 0;
 685					bit += BITS_PER_LONG;
 686				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 687				continue;
 688			}
 689			__clear_bit(bit, data);
 690			bit++;
 691		}
 692	} else {
 693		BUG();
 694	}
 695
 696	if (unlikely(page < end_page)) {
 697		bit = 0;
 698		page++;
 699		goto repeat;
 700	}
 701
 702	return true;
 703}
 704
 705static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
 706{
 707	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
 708	unsigned i;
 709
 710	for (i = 0; i < n_bitmap_pages; i++) {
 711		unsigned long *dst_data = lowmem_page_address(dst[i].page);
 712		unsigned long *src_data = lowmem_page_address(src[i].page);
 713		copy_page(dst_data, src_data);
 714	}
 715}
 716
 717static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
 718{
 719	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 720	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
 721
 722	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
 723	return &ic->bbs[bitmap_block];
 724}
 725
 726static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 727				 bool e, const char *function)
 728{
 729#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 730	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 731
 732	if (unlikely(section >= ic->journal_sections) ||
 733	    unlikely(offset >= limit)) {
 734		DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
 735		       function, section, offset, ic->journal_sections, limit);
 736		BUG();
 737	}
 738#endif
 739}
 740
 741static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 742			       unsigned *pl_index, unsigned *pl_offset)
 743{
 744	unsigned sector;
 745
 746	access_journal_check(ic, section, offset, false, "page_list_location");
 747
 748	sector = section * ic->journal_section_sectors + offset;
 749
 750	*pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 751	*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 752}
 753
 754static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 755					       unsigned section, unsigned offset, unsigned *n_sectors)
 756{
 757	unsigned pl_index, pl_offset;
 758	char *va;
 759
 760	page_list_location(ic, section, offset, &pl_index, &pl_offset);
 761
 762	if (n_sectors)
 763		*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 764
 765	va = lowmem_page_address(pl[pl_index].page);
 766
 767	return (struct journal_sector *)(va + pl_offset);
 768}
 769
 770static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 771{
 772	return access_page_list(ic, ic->journal, section, offset, NULL);
 773}
 774
 775static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 776{
 777	unsigned rel_sector, offset;
 778	struct journal_sector *js;
 779
 780	access_journal_check(ic, section, n, true, "access_journal_entry");
 781
 782	rel_sector = n % JOURNAL_BLOCK_SECTORS;
 783	offset = n / JOURNAL_BLOCK_SECTORS;
 784
 785	js = access_journal(ic, section, rel_sector);
 786	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 787}
 788
 789static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 790{
 791	n <<= ic->sb->log2_sectors_per_block;
 792
 793	n += JOURNAL_BLOCK_SECTORS;
 794
 795	access_journal_check(ic, section, n, false, "access_journal_data");
 796
 797	return access_journal(ic, section, n);
 798}
 799
 800static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 801{
 802	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 803	int r;
 804	unsigned j, size;
 805
 806	desc->tfm = ic->journal_mac;
 807
 808	r = crypto_shash_init(desc);
 809	if (unlikely(r < 0)) {
 810		dm_integrity_io_error(ic, "crypto_shash_init", r);
 811		goto err;
 812	}
 813
 814	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
 815		__le64 section_le;
 816
 817		r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
 818		if (unlikely(r < 0)) {
 819			dm_integrity_io_error(ic, "crypto_shash_update", r);
 820			goto err;
 821		}
 822
 823		section_le = cpu_to_le64(section);
 824		r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof section_le);
 825		if (unlikely(r < 0)) {
 826			dm_integrity_io_error(ic, "crypto_shash_update", r);
 827			goto err;
 828		}
 829	}
 830
 831	for (j = 0; j < ic->journal_section_entries; j++) {
 832		struct journal_entry *je = access_journal_entry(ic, section, j);
 833		r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 834		if (unlikely(r < 0)) {
 835			dm_integrity_io_error(ic, "crypto_shash_update", r);
 836			goto err;
 837		}
 838	}
 839
 840	size = crypto_shash_digestsize(ic->journal_mac);
 841
 842	if (likely(size <= JOURNAL_MAC_SIZE)) {
 843		r = crypto_shash_final(desc, result);
 844		if (unlikely(r < 0)) {
 845			dm_integrity_io_error(ic, "crypto_shash_final", r);
 846			goto err;
 847		}
 848		memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 849	} else {
 850		__u8 digest[HASH_MAX_DIGESTSIZE];
 851
 852		if (WARN_ON(size > sizeof(digest))) {
 853			dm_integrity_io_error(ic, "digest_size", -EINVAL);
 854			goto err;
 855		}
 856		r = crypto_shash_final(desc, digest);
 857		if (unlikely(r < 0)) {
 858			dm_integrity_io_error(ic, "crypto_shash_final", r);
 859			goto err;
 860		}
 861		memcpy(result, digest, JOURNAL_MAC_SIZE);
 862	}
 863
 864	return;
 865err:
 866	memset(result, 0, JOURNAL_MAC_SIZE);
 867}
 868
 869static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 870{
 871	__u8 result[JOURNAL_MAC_SIZE];
 872	unsigned j;
 873
 874	if (!ic->journal_mac)
 875		return;
 876
 877	section_mac(ic, section, result);
 878
 879	for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 880		struct journal_sector *js = access_journal(ic, section, j);
 881
 882		if (likely(wr))
 883			memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 884		else {
 885			if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
 886				dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 887				dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
 888			}
 889		}
 890	}
 891}
 892
 893static void complete_journal_op(void *context)
 894{
 895	struct journal_completion *comp = context;
 896	BUG_ON(!atomic_read(&comp->in_flight));
 897	if (likely(atomic_dec_and_test(&comp->in_flight)))
 898		complete(&comp->comp);
 899}
 900
 901static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 902			unsigned n_sections, struct journal_completion *comp)
 903{
 904	struct async_submit_ctl submit;
 905	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 906	unsigned pl_index, pl_offset, section_index;
 907	struct page_list *source_pl, *target_pl;
 908
 909	if (likely(encrypt)) {
 910		source_pl = ic->journal;
 911		target_pl = ic->journal_io;
 912	} else {
 913		source_pl = ic->journal_io;
 914		target_pl = ic->journal;
 915	}
 916
 917	page_list_location(ic, section, 0, &pl_index, &pl_offset);
 918
 919	atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 920
 921	init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 922
 923	section_index = pl_index;
 924
 925	do {
 926		size_t this_step;
 927		struct page *src_pages[2];
 928		struct page *dst_page;
 929
 930		while (unlikely(pl_index == section_index)) {
 931			unsigned dummy;
 932			if (likely(encrypt))
 933				rw_section_mac(ic, section, true);
 934			section++;
 935			n_sections--;
 936			if (!n_sections)
 937				break;
 938			page_list_location(ic, section, 0, &section_index, &dummy);
 939		}
 940
 941		this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 942		dst_page = target_pl[pl_index].page;
 943		src_pages[0] = source_pl[pl_index].page;
 944		src_pages[1] = ic->journal_xor[pl_index].page;
 945
 946		async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 947
 948		pl_index++;
 949		pl_offset = 0;
 950		n_bytes -= this_step;
 951	} while (n_bytes);
 952
 953	BUG_ON(n_sections);
 954
 955	async_tx_issue_pending_all();
 956}
 957
 958static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 959{
 960	struct journal_completion *comp = req->data;
 961	if (unlikely(err)) {
 962		if (likely(err == -EINPROGRESS)) {
 963			complete(&comp->ic->crypto_backoff);
 964			return;
 965		}
 966		dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 967	}
 968	complete_journal_op(comp);
 969}
 970
 971static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 972{
 973	int r;
 974	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 975				      complete_journal_encrypt, comp);
 976	if (likely(encrypt))
 977		r = crypto_skcipher_encrypt(req);
 978	else
 979		r = crypto_skcipher_decrypt(req);
 980	if (likely(!r))
 981		return false;
 982	if (likely(r == -EINPROGRESS))
 983		return true;
 984	if (likely(r == -EBUSY)) {
 985		wait_for_completion(&comp->ic->crypto_backoff);
 986		reinit_completion(&comp->ic->crypto_backoff);
 987		return true;
 988	}
 989	dm_integrity_io_error(comp->ic, "encrypt", r);
 990	return false;
 991}
 992
 993static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 994			  unsigned n_sections, struct journal_completion *comp)
 995{
 996	struct scatterlist **source_sg;
 997	struct scatterlist **target_sg;
 998
 999	atomic_add(2, &comp->in_flight);
1000
1001	if (likely(encrypt)) {
1002		source_sg = ic->journal_scatterlist;
1003		target_sg = ic->journal_io_scatterlist;
1004	} else {
1005		source_sg = ic->journal_io_scatterlist;
1006		target_sg = ic->journal_scatterlist;
1007	}
1008
1009	do {
1010		struct skcipher_request *req;
1011		unsigned ivsize;
1012		char *iv;
1013
1014		if (likely(encrypt))
1015			rw_section_mac(ic, section, true);
1016
1017		req = ic->sk_requests[section];
1018		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1019		iv = req->iv;
1020
1021		memcpy(iv, iv + ivsize, ivsize);
1022
1023		req->src = source_sg[section];
1024		req->dst = target_sg[section];
1025
1026		if (unlikely(do_crypt(encrypt, req, comp)))
1027			atomic_inc(&comp->in_flight);
1028
1029		section++;
1030		n_sections--;
1031	} while (n_sections);
1032
1033	atomic_dec(&comp->in_flight);
1034	complete_journal_op(comp);
1035}
1036
1037static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
1038			    unsigned n_sections, struct journal_completion *comp)
1039{
1040	if (ic->journal_xor)
1041		return xor_journal(ic, encrypt, section, n_sections, comp);
1042	else
1043		return crypt_journal(ic, encrypt, section, n_sections, comp);
1044}
1045
1046static void complete_journal_io(unsigned long error, void *context)
1047{
1048	struct journal_completion *comp = context;
1049	if (unlikely(error != 0))
1050		dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1051	complete_journal_op(comp);
1052}
1053
1054static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
1055			       unsigned sector, unsigned n_sectors,
1056			       struct journal_completion *comp)
1057{
1058	struct dm_io_request io_req;
1059	struct dm_io_region io_loc;
1060	unsigned pl_index, pl_offset;
1061	int r;
1062
1063	if (unlikely(dm_integrity_failed(ic))) {
1064		if (comp)
1065			complete_journal_io(-1UL, comp);
1066		return;
1067	}
1068
1069	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1070	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1071
1072	io_req.bi_opf = opf;
 
1073	io_req.mem.type = DM_IO_PAGE_LIST;
1074	if (ic->journal_io)
1075		io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1076	else
1077		io_req.mem.ptr.pl = &ic->journal[pl_index];
1078	io_req.mem.offset = pl_offset;
1079	if (likely(comp != NULL)) {
1080		io_req.notify.fn = complete_journal_io;
1081		io_req.notify.context = comp;
1082	} else {
1083		io_req.notify.fn = NULL;
1084	}
1085	io_req.client = ic->io;
1086	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1087	io_loc.sector = ic->start + SB_SECTORS + sector;
1088	io_loc.count = n_sectors;
1089
1090	r = dm_io(&io_req, 1, &io_loc, NULL);
1091	if (unlikely(r)) {
1092		dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
1093				      "reading journal" : "writing journal", r);
1094		if (comp) {
1095			WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1096			complete_journal_io(-1UL, comp);
1097		}
1098	}
1099}
1100
1101static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
1102		       unsigned section, unsigned n_sections,
1103		       struct journal_completion *comp)
1104{
1105	unsigned sector, n_sectors;
1106
1107	sector = section * ic->journal_section_sectors;
1108	n_sectors = n_sections * ic->journal_section_sectors;
1109
1110	rw_journal_sectors(ic, opf, sector, n_sectors, comp);
1111}
1112
1113static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1114{
1115	struct journal_completion io_comp;
1116	struct journal_completion crypt_comp_1;
1117	struct journal_completion crypt_comp_2;
1118	unsigned i;
1119
1120	io_comp.ic = ic;
1121	init_completion(&io_comp.comp);
1122
1123	if (commit_start + commit_sections <= ic->journal_sections) {
1124		io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1125		if (ic->journal_io) {
1126			crypt_comp_1.ic = ic;
1127			init_completion(&crypt_comp_1.comp);
1128			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1129			encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1130			wait_for_completion_io(&crypt_comp_1.comp);
1131		} else {
1132			for (i = 0; i < commit_sections; i++)
1133				rw_section_mac(ic, commit_start + i, true);
1134		}
1135		rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
1136			   commit_sections, &io_comp);
1137	} else {
1138		unsigned to_end;
1139		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1140		to_end = ic->journal_sections - commit_start;
1141		if (ic->journal_io) {
1142			crypt_comp_1.ic = ic;
1143			init_completion(&crypt_comp_1.comp);
1144			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1145			encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1146			if (try_wait_for_completion(&crypt_comp_1.comp)) {
1147				rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
1148					   commit_start, to_end, &io_comp);
1149				reinit_completion(&crypt_comp_1.comp);
1150				crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1151				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1152				wait_for_completion_io(&crypt_comp_1.comp);
1153			} else {
1154				crypt_comp_2.ic = ic;
1155				init_completion(&crypt_comp_2.comp);
1156				crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1157				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1158				wait_for_completion_io(&crypt_comp_1.comp);
1159				rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1160				wait_for_completion_io(&crypt_comp_2.comp);
1161			}
1162		} else {
1163			for (i = 0; i < to_end; i++)
1164				rw_section_mac(ic, commit_start + i, true);
1165			rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1166			for (i = 0; i < commit_sections - to_end; i++)
1167				rw_section_mac(ic, i, true);
1168		}
1169		rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
1170	}
1171
1172	wait_for_completion_io(&io_comp.comp);
1173}
1174
1175static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1176			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1177{
1178	struct dm_io_request io_req;
1179	struct dm_io_region io_loc;
1180	int r;
1181	unsigned sector, pl_index, pl_offset;
1182
1183	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1184
1185	if (unlikely(dm_integrity_failed(ic))) {
1186		fn(-1UL, data);
1187		return;
1188	}
1189
1190	sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1191
1192	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1193	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1194
1195	io_req.bi_opf = REQ_OP_WRITE;
 
1196	io_req.mem.type = DM_IO_PAGE_LIST;
1197	io_req.mem.ptr.pl = &ic->journal[pl_index];
1198	io_req.mem.offset = pl_offset;
1199	io_req.notify.fn = fn;
1200	io_req.notify.context = data;
1201	io_req.client = ic->io;
1202	io_loc.bdev = ic->dev->bdev;
1203	io_loc.sector = target;
1204	io_loc.count = n_sectors;
1205
1206	r = dm_io(&io_req, 1, &io_loc, NULL);
1207	if (unlikely(r)) {
1208		WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1209		fn(-1UL, data);
1210	}
1211}
1212
1213static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1214{
1215	return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1216	       range1->logical_sector + range1->n_sectors > range2->logical_sector;
1217}
1218
1219static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1220{
1221	struct rb_node **n = &ic->in_progress.rb_node;
1222	struct rb_node *parent;
1223
1224	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1225
1226	if (likely(check_waiting)) {
1227		struct dm_integrity_range *range;
1228		list_for_each_entry(range, &ic->wait_list, wait_entry) {
1229			if (unlikely(ranges_overlap(range, new_range)))
1230				return false;
1231		}
1232	}
1233
1234	parent = NULL;
1235
1236	while (*n) {
1237		struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1238
1239		parent = *n;
1240		if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1241			n = &range->node.rb_left;
1242		} else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1243			n = &range->node.rb_right;
1244		} else {
1245			return false;
1246		}
1247	}
1248
1249	rb_link_node(&new_range->node, parent, n);
1250	rb_insert_color(&new_range->node, &ic->in_progress);
1251
1252	return true;
1253}
1254
1255static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1256{
1257	rb_erase(&range->node, &ic->in_progress);
1258	while (unlikely(!list_empty(&ic->wait_list))) {
1259		struct dm_integrity_range *last_range =
1260			list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1261		struct task_struct *last_range_task;
1262		last_range_task = last_range->task;
1263		list_del(&last_range->wait_entry);
1264		if (!add_new_range(ic, last_range, false)) {
1265			last_range->task = last_range_task;
1266			list_add(&last_range->wait_entry, &ic->wait_list);
1267			break;
1268		}
1269		last_range->waiting = false;
1270		wake_up_process(last_range_task);
1271	}
1272}
1273
1274static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1275{
1276	unsigned long flags;
1277
1278	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1279	remove_range_unlocked(ic, range);
1280	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1281}
1282
1283static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1284{
1285	new_range->waiting = true;
1286	list_add_tail(&new_range->wait_entry, &ic->wait_list);
1287	new_range->task = current;
1288	do {
1289		__set_current_state(TASK_UNINTERRUPTIBLE);
1290		spin_unlock_irq(&ic->endio_wait.lock);
1291		io_schedule();
1292		spin_lock_irq(&ic->endio_wait.lock);
1293	} while (unlikely(new_range->waiting));
1294}
1295
1296static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1297{
1298	if (unlikely(!add_new_range(ic, new_range, true)))
1299		wait_and_add_new_range(ic, new_range);
1300}
1301
1302static void init_journal_node(struct journal_node *node)
1303{
1304	RB_CLEAR_NODE(&node->node);
1305	node->sector = (sector_t)-1;
1306}
1307
1308static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1309{
1310	struct rb_node **link;
1311	struct rb_node *parent;
1312
1313	node->sector = sector;
1314	BUG_ON(!RB_EMPTY_NODE(&node->node));
1315
1316	link = &ic->journal_tree_root.rb_node;
1317	parent = NULL;
1318
1319	while (*link) {
1320		struct journal_node *j;
1321		parent = *link;
1322		j = container_of(parent, struct journal_node, node);
1323		if (sector < j->sector)
1324			link = &j->node.rb_left;
1325		else
1326			link = &j->node.rb_right;
1327	}
1328
1329	rb_link_node(&node->node, parent, link);
1330	rb_insert_color(&node->node, &ic->journal_tree_root);
1331}
1332
1333static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1334{
1335	BUG_ON(RB_EMPTY_NODE(&node->node));
1336	rb_erase(&node->node, &ic->journal_tree_root);
1337	init_journal_node(node);
1338}
1339
1340#define NOT_FOUND	(-1U)
1341
1342static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1343{
1344	struct rb_node *n = ic->journal_tree_root.rb_node;
1345	unsigned found = NOT_FOUND;
1346	*next_sector = (sector_t)-1;
1347	while (n) {
1348		struct journal_node *j = container_of(n, struct journal_node, node);
1349		if (sector == j->sector) {
1350			found = j - ic->journal_tree;
1351		}
1352		if (sector < j->sector) {
1353			*next_sector = j->sector;
1354			n = j->node.rb_left;
1355		} else {
1356			n = j->node.rb_right;
1357		}
1358	}
1359
1360	return found;
1361}
1362
1363static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1364{
1365	struct journal_node *node, *next_node;
1366	struct rb_node *next;
1367
1368	if (unlikely(pos >= ic->journal_entries))
1369		return false;
1370	node = &ic->journal_tree[pos];
1371	if (unlikely(RB_EMPTY_NODE(&node->node)))
1372		return false;
1373	if (unlikely(node->sector != sector))
1374		return false;
1375
1376	next = rb_next(&node->node);
1377	if (unlikely(!next))
1378		return true;
1379
1380	next_node = container_of(next, struct journal_node, node);
1381	return next_node->sector != sector;
1382}
1383
1384static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1385{
1386	struct rb_node *next;
1387	struct journal_node *next_node;
1388	unsigned next_section;
1389
1390	BUG_ON(RB_EMPTY_NODE(&node->node));
1391
1392	next = rb_next(&node->node);
1393	if (unlikely(!next))
1394		return false;
1395
1396	next_node = container_of(next, struct journal_node, node);
1397
1398	if (next_node->sector != node->sector)
1399		return false;
1400
1401	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1402	if (next_section >= ic->committed_section &&
1403	    next_section < ic->committed_section + ic->n_committed_sections)
1404		return true;
1405	if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1406		return true;
1407
1408	return false;
1409}
1410
1411#define TAG_READ	0
1412#define TAG_WRITE	1
1413#define TAG_CMP		2
1414
1415static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1416			       unsigned *metadata_offset, unsigned total_size, int op)
1417{
1418#define MAY_BE_FILLER		1
1419#define MAY_BE_HASH		2
1420	unsigned hash_offset = 0;
1421	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1422
1423	do {
1424		unsigned char *data, *dp;
1425		struct dm_buffer *b;
1426		unsigned to_copy;
1427		int r;
1428
1429		r = dm_integrity_failed(ic);
1430		if (unlikely(r))
1431			return r;
1432
1433		data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1434		if (IS_ERR(data))
1435			return PTR_ERR(data);
1436
1437		to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1438		dp = data + *metadata_offset;
1439		if (op == TAG_READ) {
1440			memcpy(tag, dp, to_copy);
1441		} else if (op == TAG_WRITE) {
1442			if (memcmp(dp, tag, to_copy)) {
1443				memcpy(dp, tag, to_copy);
1444				dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1445			}
1446		} else {
1447			/* e.g.: op == TAG_CMP */
1448
1449			if (likely(is_power_of_2(ic->tag_size))) {
1450				if (unlikely(memcmp(dp, tag, to_copy)))
1451					if (unlikely(!ic->discard) ||
1452					    unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1453						goto thorough_test;
1454				}
1455			} else {
1456				unsigned i, ts;
1457thorough_test:
1458				ts = total_size;
1459
1460				for (i = 0; i < to_copy; i++, ts--) {
1461					if (unlikely(dp[i] != tag[i]))
1462						may_be &= ~MAY_BE_HASH;
1463					if (likely(dp[i] != DISCARD_FILLER))
1464						may_be &= ~MAY_BE_FILLER;
1465					hash_offset++;
1466					if (unlikely(hash_offset == ic->tag_size)) {
1467						if (unlikely(!may_be)) {
1468							dm_bufio_release(b);
1469							return ts;
1470						}
1471						hash_offset = 0;
1472						may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1473					}
1474				}
1475			}
1476		}
1477		dm_bufio_release(b);
1478
1479		tag += to_copy;
1480		*metadata_offset += to_copy;
1481		if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1482			(*metadata_block)++;
1483			*metadata_offset = 0;
1484		}
1485
1486		if (unlikely(!is_power_of_2(ic->tag_size))) {
1487			hash_offset = (hash_offset + to_copy) % ic->tag_size;
1488		}
1489
1490		total_size -= to_copy;
1491	} while (unlikely(total_size));
1492
1493	return 0;
1494#undef MAY_BE_FILLER
1495#undef MAY_BE_HASH
1496}
1497
1498struct flush_request {
1499	struct dm_io_request io_req;
1500	struct dm_io_region io_reg;
1501	struct dm_integrity_c *ic;
1502	struct completion comp;
1503};
1504
1505static void flush_notify(unsigned long error, void *fr_)
1506{
1507	struct flush_request *fr = fr_;
1508	if (unlikely(error != 0))
1509		dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1510	complete(&fr->comp);
1511}
1512
1513static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1514{
1515	int r;
1516
1517	struct flush_request fr;
1518
1519	if (!ic->meta_dev)
1520		flush_data = false;
1521	if (flush_data) {
1522		fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1523		fr.io_req.mem.type = DM_IO_KMEM,
1524		fr.io_req.mem.ptr.addr = NULL,
1525		fr.io_req.notify.fn = flush_notify,
1526		fr.io_req.notify.context = &fr;
1527		fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1528		fr.io_reg.bdev = ic->dev->bdev,
1529		fr.io_reg.sector = 0,
1530		fr.io_reg.count = 0,
1531		fr.ic = ic;
1532		init_completion(&fr.comp);
1533		r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1534		BUG_ON(r);
1535	}
1536
1537	r = dm_bufio_write_dirty_buffers(ic->bufio);
1538	if (unlikely(r))
1539		dm_integrity_io_error(ic, "writing tags", r);
1540
1541	if (flush_data)
1542		wait_for_completion(&fr.comp);
1543}
1544
1545static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1546{
1547	DECLARE_WAITQUEUE(wait, current);
1548	__add_wait_queue(&ic->endio_wait, &wait);
1549	__set_current_state(TASK_UNINTERRUPTIBLE);
1550	spin_unlock_irq(&ic->endio_wait.lock);
1551	io_schedule();
1552	spin_lock_irq(&ic->endio_wait.lock);
1553	__remove_wait_queue(&ic->endio_wait, &wait);
1554}
1555
1556static void autocommit_fn(struct timer_list *t)
1557{
1558	struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1559
1560	if (likely(!dm_integrity_failed(ic)))
1561		queue_work(ic->commit_wq, &ic->commit_work);
1562}
1563
1564static void schedule_autocommit(struct dm_integrity_c *ic)
1565{
1566	if (!timer_pending(&ic->autocommit_timer))
1567		mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1568}
1569
1570static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1571{
1572	struct bio *bio;
1573	unsigned long flags;
1574
1575	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1576	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1577	bio_list_add(&ic->flush_bio_list, bio);
1578	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1579
1580	queue_work(ic->commit_wq, &ic->commit_work);
1581}
1582
1583static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1584{
1585	int r = dm_integrity_failed(ic);
1586	if (unlikely(r) && !bio->bi_status)
1587		bio->bi_status = errno_to_blk_status(r);
1588	if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1589		unsigned long flags;
1590		spin_lock_irqsave(&ic->endio_wait.lock, flags);
1591		bio_list_add(&ic->synchronous_bios, bio);
1592		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1593		spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1594		return;
1595	}
1596	bio_endio(bio);
1597}
1598
1599static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1600{
1601	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1602
1603	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1604		submit_flush_bio(ic, dio);
1605	else
1606		do_endio(ic, bio);
1607}
1608
1609static void dec_in_flight(struct dm_integrity_io *dio)
1610{
1611	if (atomic_dec_and_test(&dio->in_flight)) {
1612		struct dm_integrity_c *ic = dio->ic;
1613		struct bio *bio;
1614
1615		remove_range(ic, &dio->range);
1616
1617		if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1618			schedule_autocommit(ic);
1619
1620		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1621
1622		if (unlikely(dio->bi_status) && !bio->bi_status)
1623			bio->bi_status = dio->bi_status;
1624		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1625			dio->range.logical_sector += dio->range.n_sectors;
1626			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1627			INIT_WORK(&dio->work, integrity_bio_wait);
1628			queue_work(ic->offload_wq, &dio->work);
1629			return;
1630		}
1631		do_endio_flush(ic, dio);
1632	}
1633}
1634
1635static void integrity_end_io(struct bio *bio)
1636{
1637	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1638
1639	dm_bio_restore(&dio->bio_details, bio);
1640	if (bio->bi_integrity)
1641		bio->bi_opf |= REQ_INTEGRITY;
1642
1643	if (dio->completion)
1644		complete(dio->completion);
1645
1646	dec_in_flight(dio);
1647}
1648
1649static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1650				      const char *data, char *result)
1651{
1652	__le64 sector_le = cpu_to_le64(sector);
1653	SHASH_DESC_ON_STACK(req, ic->internal_hash);
1654	int r;
1655	unsigned digest_size;
1656
1657	req->tfm = ic->internal_hash;
1658
1659	r = crypto_shash_init(req);
1660	if (unlikely(r < 0)) {
1661		dm_integrity_io_error(ic, "crypto_shash_init", r);
1662		goto failed;
1663	}
1664
1665	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1666		r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1667		if (unlikely(r < 0)) {
1668			dm_integrity_io_error(ic, "crypto_shash_update", r);
1669			goto failed;
1670		}
1671	}
1672
1673	r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1674	if (unlikely(r < 0)) {
1675		dm_integrity_io_error(ic, "crypto_shash_update", r);
1676		goto failed;
1677	}
1678
1679	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1680	if (unlikely(r < 0)) {
1681		dm_integrity_io_error(ic, "crypto_shash_update", r);
1682		goto failed;
1683	}
1684
1685	r = crypto_shash_final(req, result);
1686	if (unlikely(r < 0)) {
1687		dm_integrity_io_error(ic, "crypto_shash_final", r);
1688		goto failed;
1689	}
1690
1691	digest_size = crypto_shash_digestsize(ic->internal_hash);
1692	if (unlikely(digest_size < ic->tag_size))
1693		memset(result + digest_size, 0, ic->tag_size - digest_size);
1694
1695	return;
1696
1697failed:
1698	/* this shouldn't happen anyway, the hash functions have no reason to fail */
1699	get_random_bytes(result, ic->tag_size);
1700}
1701
1702static void integrity_metadata(struct work_struct *w)
1703{
1704	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1705	struct dm_integrity_c *ic = dio->ic;
1706
1707	int r;
1708
1709	if (ic->internal_hash) {
1710		struct bvec_iter iter;
1711		struct bio_vec bv;
1712		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1713		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1714		char *checksums;
1715		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1716		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1717		sector_t sector;
1718		unsigned sectors_to_process;
1719
1720		if (unlikely(ic->mode == 'R'))
1721			goto skip_io;
1722
1723		if (likely(dio->op != REQ_OP_DISCARD))
1724			checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1725					    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1726		else
1727			checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1728		if (!checksums) {
1729			checksums = checksums_onstack;
1730			if (WARN_ON(extra_space &&
1731				    digest_size > sizeof(checksums_onstack))) {
1732				r = -EINVAL;
1733				goto error;
1734			}
1735		}
1736
1737		if (unlikely(dio->op == REQ_OP_DISCARD)) {
1738			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1739			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1740			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1741			unsigned max_blocks = max_size / ic->tag_size;
1742			memset(checksums, DISCARD_FILLER, max_size);
1743
1744			while (bi_size) {
1745				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1746				this_step_blocks = min(this_step_blocks, max_blocks);
1747				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1748							this_step_blocks * ic->tag_size, TAG_WRITE);
1749				if (unlikely(r)) {
1750					if (likely(checksums != checksums_onstack))
1751						kfree(checksums);
1752					goto error;
1753				}
1754
1755				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1756					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1757					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1758					BUG();
1759				}*/
1760				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1761				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1762			}
1763
1764			if (likely(checksums != checksums_onstack))
1765				kfree(checksums);
1766			goto skip_io;
1767		}
1768
1769		sector = dio->range.logical_sector;
1770		sectors_to_process = dio->range.n_sectors;
1771
1772		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1773			unsigned pos;
1774			char *mem, *checksums_ptr;
1775
1776again:
1777			mem = bvec_kmap_local(&bv);
1778			pos = 0;
1779			checksums_ptr = checksums;
1780			do {
1781				integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1782				checksums_ptr += ic->tag_size;
1783				sectors_to_process -= ic->sectors_per_block;
1784				pos += ic->sectors_per_block << SECTOR_SHIFT;
1785				sector += ic->sectors_per_block;
1786			} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1787			kunmap_local(mem);
1788
1789			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1790						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1791			if (unlikely(r)) {
1792				if (r > 0) {
1793					sector_t s;
1794
1795					s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
1796					DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
1797						    bio->bi_bdev, s);
1798					r = -EILSEQ;
1799					atomic64_inc(&ic->number_of_mismatches);
1800					dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
1801							 bio, s, 0);
1802				}
1803				if (likely(checksums != checksums_onstack))
1804					kfree(checksums);
1805				goto error;
1806			}
1807
1808			if (!sectors_to_process)
1809				break;
1810
1811			if (unlikely(pos < bv.bv_len)) {
1812				bv.bv_offset += pos;
1813				bv.bv_len -= pos;
1814				goto again;
1815			}
1816		}
1817
1818		if (likely(checksums != checksums_onstack))
1819			kfree(checksums);
1820	} else {
1821		struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1822
1823		if (bip) {
1824			struct bio_vec biv;
1825			struct bvec_iter iter;
1826			unsigned data_to_process = dio->range.n_sectors;
1827			sector_to_block(ic, data_to_process);
1828			data_to_process *= ic->tag_size;
1829
1830			bip_for_each_vec(biv, bip, iter) {
1831				unsigned char *tag;
1832				unsigned this_len;
1833
1834				BUG_ON(PageHighMem(biv.bv_page));
1835				tag = bvec_virt(&biv);
1836				this_len = min(biv.bv_len, data_to_process);
1837				r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1838							this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1839				if (unlikely(r))
1840					goto error;
1841				data_to_process -= this_len;
1842				if (!data_to_process)
1843					break;
1844			}
1845		}
1846	}
1847skip_io:
1848	dec_in_flight(dio);
1849	return;
1850error:
1851	dio->bi_status = errno_to_blk_status(r);
1852	dec_in_flight(dio);
1853}
1854
1855static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1856{
1857	struct dm_integrity_c *ic = ti->private;
1858	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1859	struct bio_integrity_payload *bip;
1860
1861	sector_t area, offset;
1862
1863	dio->ic = ic;
1864	dio->bi_status = 0;
1865	dio->op = bio_op(bio);
1866
1867	if (unlikely(dio->op == REQ_OP_DISCARD)) {
1868		if (ti->max_io_len) {
1869			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1870			unsigned log2_max_io_len = __fls(ti->max_io_len);
1871			sector_t start_boundary = sec >> log2_max_io_len;
1872			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1873			if (start_boundary < end_boundary) {
1874				sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1875				dm_accept_partial_bio(bio, len);
1876			}
1877		}
1878	}
1879
1880	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1881		submit_flush_bio(ic, dio);
1882		return DM_MAPIO_SUBMITTED;
1883	}
1884
1885	dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1886	dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1887	if (unlikely(dio->fua)) {
1888		/*
1889		 * Don't pass down the FUA flag because we have to flush
1890		 * disk cache anyway.
1891		 */
1892		bio->bi_opf &= ~REQ_FUA;
1893	}
1894	if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1895		DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1896		      dio->range.logical_sector, bio_sectors(bio),
1897		      ic->provided_data_sectors);
1898		return DM_MAPIO_KILL;
1899	}
1900	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1901		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1902		      ic->sectors_per_block,
1903		      dio->range.logical_sector, bio_sectors(bio));
1904		return DM_MAPIO_KILL;
1905	}
1906
1907	if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1908		struct bvec_iter iter;
1909		struct bio_vec bv;
1910		bio_for_each_segment(bv, bio, iter) {
1911			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1912				DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1913					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1914				return DM_MAPIO_KILL;
1915			}
1916		}
1917	}
1918
1919	bip = bio_integrity(bio);
1920	if (!ic->internal_hash) {
1921		if (bip) {
1922			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1923			if (ic->log2_tag_size >= 0)
1924				wanted_tag_size <<= ic->log2_tag_size;
1925			else
1926				wanted_tag_size *= ic->tag_size;
1927			if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1928				DMERR("Invalid integrity data size %u, expected %u",
1929				      bip->bip_iter.bi_size, wanted_tag_size);
1930				return DM_MAPIO_KILL;
1931			}
1932		}
1933	} else {
1934		if (unlikely(bip != NULL)) {
1935			DMERR("Unexpected integrity data when using internal hash");
1936			return DM_MAPIO_KILL;
1937		}
1938	}
1939
1940	if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1941		return DM_MAPIO_KILL;
1942
1943	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1944	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1945	bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1946
1947	dm_integrity_map_continue(dio, true);
1948	return DM_MAPIO_SUBMITTED;
1949}
1950
1951static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1952				 unsigned journal_section, unsigned journal_entry)
1953{
1954	struct dm_integrity_c *ic = dio->ic;
1955	sector_t logical_sector;
1956	unsigned n_sectors;
1957
1958	logical_sector = dio->range.logical_sector;
1959	n_sectors = dio->range.n_sectors;
1960	do {
1961		struct bio_vec bv = bio_iovec(bio);
1962		char *mem;
1963
1964		if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1965			bv.bv_len = n_sectors << SECTOR_SHIFT;
1966		n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1967		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1968retry_kmap:
1969		mem = kmap_local_page(bv.bv_page);
1970		if (likely(dio->op == REQ_OP_WRITE))
1971			flush_dcache_page(bv.bv_page);
1972
1973		do {
1974			struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1975
1976			if (unlikely(dio->op == REQ_OP_READ)) {
1977				struct journal_sector *js;
1978				char *mem_ptr;
1979				unsigned s;
1980
1981				if (unlikely(journal_entry_is_inprogress(je))) {
1982					flush_dcache_page(bv.bv_page);
1983					kunmap_local(mem);
1984
1985					__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1986					goto retry_kmap;
1987				}
1988				smp_rmb();
1989				BUG_ON(journal_entry_get_sector(je) != logical_sector);
1990				js = access_journal_data(ic, journal_section, journal_entry);
1991				mem_ptr = mem + bv.bv_offset;
1992				s = 0;
1993				do {
1994					memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1995					*(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1996					js++;
1997					mem_ptr += 1 << SECTOR_SHIFT;
1998				} while (++s < ic->sectors_per_block);
1999#ifdef INTERNAL_VERIFY
2000				if (ic->internal_hash) {
2001					char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2002
2003					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
2004					if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
2005						DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
2006							    logical_sector);
2007						dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
2008								 bio, logical_sector, 0);
2009					}
2010				}
2011#endif
2012			}
2013
2014			if (!ic->internal_hash) {
2015				struct bio_integrity_payload *bip = bio_integrity(bio);
2016				unsigned tag_todo = ic->tag_size;
2017				char *tag_ptr = journal_entry_tag(ic, je);
2018
2019				if (bip) do {
2020					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2021					unsigned tag_now = min(biv.bv_len, tag_todo);
2022					char *tag_addr;
2023					BUG_ON(PageHighMem(biv.bv_page));
2024					tag_addr = bvec_virt(&biv);
2025					if (likely(dio->op == REQ_OP_WRITE))
2026						memcpy(tag_ptr, tag_addr, tag_now);
2027					else
2028						memcpy(tag_addr, tag_ptr, tag_now);
2029					bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2030					tag_ptr += tag_now;
2031					tag_todo -= tag_now;
2032				} while (unlikely(tag_todo)); else {
2033					if (likely(dio->op == REQ_OP_WRITE))
2034						memset(tag_ptr, 0, tag_todo);
2035				}
2036			}
2037
2038			if (likely(dio->op == REQ_OP_WRITE)) {
2039				struct journal_sector *js;
2040				unsigned s;
2041
2042				js = access_journal_data(ic, journal_section, journal_entry);
2043				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2044
2045				s = 0;
2046				do {
2047					je->last_bytes[s] = js[s].commit_id;
2048				} while (++s < ic->sectors_per_block);
2049
2050				if (ic->internal_hash) {
2051					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
2052					if (unlikely(digest_size > ic->tag_size)) {
2053						char checksums_onstack[HASH_MAX_DIGESTSIZE];
2054						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2055						memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2056					} else
2057						integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2058				}
2059
2060				journal_entry_set_sector(je, logical_sector);
2061			}
2062			logical_sector += ic->sectors_per_block;
2063
2064			journal_entry++;
2065			if (unlikely(journal_entry == ic->journal_section_entries)) {
2066				journal_entry = 0;
2067				journal_section++;
2068				wraparound_section(ic, &journal_section);
2069			}
2070
2071			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2072		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2073
2074		if (unlikely(dio->op == REQ_OP_READ))
2075			flush_dcache_page(bv.bv_page);
2076		kunmap_local(mem);
2077	} while (n_sectors);
2078
2079	if (likely(dio->op == REQ_OP_WRITE)) {
2080		smp_mb();
2081		if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2082			wake_up(&ic->copy_to_journal_wait);
2083		if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
2084			queue_work(ic->commit_wq, &ic->commit_work);
2085		} else {
2086			schedule_autocommit(ic);
2087		}
2088	} else {
2089		remove_range(ic, &dio->range);
2090	}
2091
2092	if (unlikely(bio->bi_iter.bi_size)) {
2093		sector_t area, offset;
2094
2095		dio->range.logical_sector = logical_sector;
2096		get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2097		dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2098		return true;
2099	}
2100
2101	return false;
2102}
2103
2104static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2105{
2106	struct dm_integrity_c *ic = dio->ic;
2107	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2108	unsigned journal_section, journal_entry;
2109	unsigned journal_read_pos;
2110	struct completion read_comp;
2111	bool discard_retried = false;
2112	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2113	if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2114		need_sync_io = true;
2115
2116	if (need_sync_io && from_map) {
2117		INIT_WORK(&dio->work, integrity_bio_wait);
2118		queue_work(ic->offload_wq, &dio->work);
2119		return;
2120	}
2121
2122lock_retry:
2123	spin_lock_irq(&ic->endio_wait.lock);
2124retry:
2125	if (unlikely(dm_integrity_failed(ic))) {
2126		spin_unlock_irq(&ic->endio_wait.lock);
2127		do_endio(ic, bio);
2128		return;
2129	}
2130	dio->range.n_sectors = bio_sectors(bio);
2131	journal_read_pos = NOT_FOUND;
2132	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2133		if (dio->op == REQ_OP_WRITE) {
2134			unsigned next_entry, i, pos;
2135			unsigned ws, we, range_sectors;
2136
2137			dio->range.n_sectors = min(dio->range.n_sectors,
2138						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2139			if (unlikely(!dio->range.n_sectors)) {
2140				if (from_map)
2141					goto offload_to_thread;
2142				sleep_on_endio_wait(ic);
2143				goto retry;
2144			}
2145			range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2146			ic->free_sectors -= range_sectors;
2147			journal_section = ic->free_section;
2148			journal_entry = ic->free_section_entry;
2149
2150			next_entry = ic->free_section_entry + range_sectors;
2151			ic->free_section_entry = next_entry % ic->journal_section_entries;
2152			ic->free_section += next_entry / ic->journal_section_entries;
2153			ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2154			wraparound_section(ic, &ic->free_section);
2155
2156			pos = journal_section * ic->journal_section_entries + journal_entry;
2157			ws = journal_section;
2158			we = journal_entry;
2159			i = 0;
2160			do {
2161				struct journal_entry *je;
2162
2163				add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2164				pos++;
2165				if (unlikely(pos >= ic->journal_entries))
2166					pos = 0;
2167
2168				je = access_journal_entry(ic, ws, we);
2169				BUG_ON(!journal_entry_is_unused(je));
2170				journal_entry_set_inprogress(je);
2171				we++;
2172				if (unlikely(we == ic->journal_section_entries)) {
2173					we = 0;
2174					ws++;
2175					wraparound_section(ic, &ws);
2176				}
2177			} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2178
2179			spin_unlock_irq(&ic->endio_wait.lock);
2180			goto journal_read_write;
2181		} else {
2182			sector_t next_sector;
2183			journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2184			if (likely(journal_read_pos == NOT_FOUND)) {
2185				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2186					dio->range.n_sectors = next_sector - dio->range.logical_sector;
2187			} else {
2188				unsigned i;
2189				unsigned jp = journal_read_pos + 1;
2190				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2191					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2192						break;
2193				}
2194				dio->range.n_sectors = i;
2195			}
2196		}
2197	}
2198	if (unlikely(!add_new_range(ic, &dio->range, true))) {
2199		/*
2200		 * We must not sleep in the request routine because it could
2201		 * stall bios on current->bio_list.
2202		 * So, we offload the bio to a workqueue if we have to sleep.
2203		 */
2204		if (from_map) {
2205offload_to_thread:
2206			spin_unlock_irq(&ic->endio_wait.lock);
2207			INIT_WORK(&dio->work, integrity_bio_wait);
2208			queue_work(ic->wait_wq, &dio->work);
2209			return;
2210		}
2211		if (journal_read_pos != NOT_FOUND)
2212			dio->range.n_sectors = ic->sectors_per_block;
2213		wait_and_add_new_range(ic, &dio->range);
2214		/*
2215		 * wait_and_add_new_range drops the spinlock, so the journal
2216		 * may have been changed arbitrarily. We need to recheck.
2217		 * To simplify the code, we restrict I/O size to just one block.
2218		 */
2219		if (journal_read_pos != NOT_FOUND) {
2220			sector_t next_sector;
2221			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2222			if (unlikely(new_pos != journal_read_pos)) {
2223				remove_range_unlocked(ic, &dio->range);
2224				goto retry;
2225			}
2226		}
2227	}
2228	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2229		sector_t next_sector;
2230		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2231		if (unlikely(new_pos != NOT_FOUND) ||
2232		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2233			remove_range_unlocked(ic, &dio->range);
2234			spin_unlock_irq(&ic->endio_wait.lock);
2235			queue_work(ic->commit_wq, &ic->commit_work);
2236			flush_workqueue(ic->commit_wq);
2237			queue_work(ic->writer_wq, &ic->writer_work);
2238			flush_workqueue(ic->writer_wq);
2239			discard_retried = true;
2240			goto lock_retry;
2241		}
2242	}
2243	spin_unlock_irq(&ic->endio_wait.lock);
2244
2245	if (unlikely(journal_read_pos != NOT_FOUND)) {
2246		journal_section = journal_read_pos / ic->journal_section_entries;
2247		journal_entry = journal_read_pos % ic->journal_section_entries;
2248		goto journal_read_write;
2249	}
2250
2251	if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2252		if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2253				     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2254			struct bitmap_block_status *bbs;
2255
2256			bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2257			spin_lock(&bbs->bio_queue_lock);
2258			bio_list_add(&bbs->bio_queue, bio);
2259			spin_unlock(&bbs->bio_queue_lock);
2260			queue_work(ic->writer_wq, &bbs->work);
2261			return;
2262		}
2263	}
2264
2265	dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2266
2267	if (need_sync_io) {
2268		init_completion(&read_comp);
2269		dio->completion = &read_comp;
2270	} else
2271		dio->completion = NULL;
2272
2273	dm_bio_record(&dio->bio_details, bio);
2274	bio_set_dev(bio, ic->dev->bdev);
2275	bio->bi_integrity = NULL;
2276	bio->bi_opf &= ~REQ_INTEGRITY;
2277	bio->bi_end_io = integrity_end_io;
2278	bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2279
2280	if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2281		integrity_metadata(&dio->work);
2282		dm_integrity_flush_buffers(ic, false);
2283
2284		dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2285		dio->completion = NULL;
2286
2287		submit_bio_noacct(bio);
2288
2289		return;
2290	}
2291
2292	submit_bio_noacct(bio);
2293
2294	if (need_sync_io) {
2295		wait_for_completion_io(&read_comp);
2296		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2297		    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2298			goto skip_check;
2299		if (ic->mode == 'B') {
2300			if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2301					     dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2302				goto skip_check;
2303		}
2304
2305		if (likely(!bio->bi_status))
2306			integrity_metadata(&dio->work);
2307		else
2308skip_check:
2309			dec_in_flight(dio);
2310
2311	} else {
2312		INIT_WORK(&dio->work, integrity_metadata);
2313		queue_work(ic->metadata_wq, &dio->work);
2314	}
2315
2316	return;
2317
2318journal_read_write:
2319	if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2320		goto lock_retry;
2321
2322	do_endio_flush(ic, dio);
2323}
2324
2325
2326static void integrity_bio_wait(struct work_struct *w)
2327{
2328	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2329
2330	dm_integrity_map_continue(dio, false);
2331}
2332
2333static void pad_uncommitted(struct dm_integrity_c *ic)
2334{
2335	if (ic->free_section_entry) {
2336		ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2337		ic->free_section_entry = 0;
2338		ic->free_section++;
2339		wraparound_section(ic, &ic->free_section);
2340		ic->n_uncommitted_sections++;
2341	}
2342	if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2343		    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2344		    ic->journal_section_entries + ic->free_sectors)) {
2345		DMCRIT("journal_sections %u, journal_section_entries %u, "
2346		       "n_uncommitted_sections %u, n_committed_sections %u, "
2347		       "journal_section_entries %u, free_sectors %u",
2348		       ic->journal_sections, ic->journal_section_entries,
2349		       ic->n_uncommitted_sections, ic->n_committed_sections,
2350		       ic->journal_section_entries, ic->free_sectors);
2351	}
2352}
2353
2354static void integrity_commit(struct work_struct *w)
2355{
2356	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2357	unsigned commit_start, commit_sections;
2358	unsigned i, j, n;
2359	struct bio *flushes;
2360
2361	del_timer(&ic->autocommit_timer);
2362
2363	spin_lock_irq(&ic->endio_wait.lock);
2364	flushes = bio_list_get(&ic->flush_bio_list);
2365	if (unlikely(ic->mode != 'J')) {
2366		spin_unlock_irq(&ic->endio_wait.lock);
2367		dm_integrity_flush_buffers(ic, true);
2368		goto release_flush_bios;
2369	}
2370
2371	pad_uncommitted(ic);
2372	commit_start = ic->uncommitted_section;
2373	commit_sections = ic->n_uncommitted_sections;
2374	spin_unlock_irq(&ic->endio_wait.lock);
2375
2376	if (!commit_sections)
2377		goto release_flush_bios;
2378
2379	ic->wrote_to_journal = true;
2380
2381	i = commit_start;
2382	for (n = 0; n < commit_sections; n++) {
2383		for (j = 0; j < ic->journal_section_entries; j++) {
2384			struct journal_entry *je;
2385			je = access_journal_entry(ic, i, j);
2386			io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2387		}
2388		for (j = 0; j < ic->journal_section_sectors; j++) {
2389			struct journal_sector *js;
2390			js = access_journal(ic, i, j);
2391			js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2392		}
2393		i++;
2394		if (unlikely(i >= ic->journal_sections))
2395			ic->commit_seq = next_commit_seq(ic->commit_seq);
2396		wraparound_section(ic, &i);
2397	}
2398	smp_rmb();
2399
2400	write_journal(ic, commit_start, commit_sections);
2401
2402	spin_lock_irq(&ic->endio_wait.lock);
2403	ic->uncommitted_section += commit_sections;
2404	wraparound_section(ic, &ic->uncommitted_section);
2405	ic->n_uncommitted_sections -= commit_sections;
2406	ic->n_committed_sections += commit_sections;
2407	spin_unlock_irq(&ic->endio_wait.lock);
2408
2409	if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2410		queue_work(ic->writer_wq, &ic->writer_work);
2411
2412release_flush_bios:
2413	while (flushes) {
2414		struct bio *next = flushes->bi_next;
2415		flushes->bi_next = NULL;
2416		do_endio(ic, flushes);
2417		flushes = next;
2418	}
2419}
2420
2421static void complete_copy_from_journal(unsigned long error, void *context)
2422{
2423	struct journal_io *io = context;
2424	struct journal_completion *comp = io->comp;
2425	struct dm_integrity_c *ic = comp->ic;
2426	remove_range(ic, &io->range);
2427	mempool_free(io, &ic->journal_io_mempool);
2428	if (unlikely(error != 0))
2429		dm_integrity_io_error(ic, "copying from journal", -EIO);
2430	complete_journal_op(comp);
2431}
2432
2433static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2434			       struct journal_entry *je)
2435{
2436	unsigned s = 0;
2437	do {
2438		js->commit_id = je->last_bytes[s];
2439		js++;
2440	} while (++s < ic->sectors_per_block);
2441}
2442
2443static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2444			     unsigned write_sections, bool from_replay)
2445{
2446	unsigned i, j, n;
2447	struct journal_completion comp;
2448	struct blk_plug plug;
2449
2450	blk_start_plug(&plug);
2451
2452	comp.ic = ic;
2453	comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2454	init_completion(&comp.comp);
2455
2456	i = write_start;
2457	for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2458#ifndef INTERNAL_VERIFY
2459		if (unlikely(from_replay))
2460#endif
2461			rw_section_mac(ic, i, false);
2462		for (j = 0; j < ic->journal_section_entries; j++) {
2463			struct journal_entry *je = access_journal_entry(ic, i, j);
2464			sector_t sec, area, offset;
2465			unsigned k, l, next_loop;
2466			sector_t metadata_block;
2467			unsigned metadata_offset;
2468			struct journal_io *io;
2469
2470			if (journal_entry_is_unused(je))
2471				continue;
2472			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2473			sec = journal_entry_get_sector(je);
2474			if (unlikely(from_replay)) {
2475				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2476					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2477					sec &= ~(sector_t)(ic->sectors_per_block - 1);
2478				}
2479				if (unlikely(sec >= ic->provided_data_sectors)) {
2480					journal_entry_set_unused(je);
2481					continue;
2482				}
2483			}
 
 
2484			get_area_and_offset(ic, sec, &area, &offset);
2485			restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2486			for (k = j + 1; k < ic->journal_section_entries; k++) {
2487				struct journal_entry *je2 = access_journal_entry(ic, i, k);
2488				sector_t sec2, area2, offset2;
2489				if (journal_entry_is_unused(je2))
2490					break;
2491				BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2492				sec2 = journal_entry_get_sector(je2);
2493				if (unlikely(sec2 >= ic->provided_data_sectors))
2494					break;
2495				get_area_and_offset(ic, sec2, &area2, &offset2);
2496				if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2497					break;
2498				restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2499			}
2500			next_loop = k - 1;
2501
2502			io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2503			io->comp = &comp;
2504			io->range.logical_sector = sec;
2505			io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2506
2507			spin_lock_irq(&ic->endio_wait.lock);
2508			add_new_range_and_wait(ic, &io->range);
2509
2510			if (likely(!from_replay)) {
2511				struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2512
2513				/* don't write if there is newer committed sector */
2514				while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2515					struct journal_entry *je2 = access_journal_entry(ic, i, j);
2516
2517					journal_entry_set_unused(je2);
2518					remove_journal_node(ic, &section_node[j]);
2519					j++;
2520					sec += ic->sectors_per_block;
2521					offset += ic->sectors_per_block;
2522				}
2523				while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2524					struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2525
2526					journal_entry_set_unused(je2);
2527					remove_journal_node(ic, &section_node[k - 1]);
2528					k--;
2529				}
2530				if (j == k) {
2531					remove_range_unlocked(ic, &io->range);
2532					spin_unlock_irq(&ic->endio_wait.lock);
2533					mempool_free(io, &ic->journal_io_mempool);
2534					goto skip_io;
2535				}
2536				for (l = j; l < k; l++) {
2537					remove_journal_node(ic, &section_node[l]);
2538				}
2539			}
2540			spin_unlock_irq(&ic->endio_wait.lock);
2541
2542			metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2543			for (l = j; l < k; l++) {
2544				int r;
2545				struct journal_entry *je2 = access_journal_entry(ic, i, l);
2546
2547				if (
2548#ifndef INTERNAL_VERIFY
2549				    unlikely(from_replay) &&
2550#endif
2551				    ic->internal_hash) {
2552					char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2553
2554					integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2555								  (char *)access_journal_data(ic, i, l), test_tag);
2556					if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
2557						dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2558						dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
2559					}
2560				}
2561
2562				journal_entry_set_unused(je2);
2563				r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2564							ic->tag_size, TAG_WRITE);
2565				if (unlikely(r)) {
2566					dm_integrity_io_error(ic, "reading tags", r);
2567				}
2568			}
2569
2570			atomic_inc(&comp.in_flight);
2571			copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2572					  (k - j) << ic->sb->log2_sectors_per_block,
2573					  get_data_sector(ic, area, offset),
2574					  complete_copy_from_journal, io);
2575skip_io:
2576			j = next_loop;
2577		}
2578	}
2579
2580	dm_bufio_write_dirty_buffers_async(ic->bufio);
2581
2582	blk_finish_plug(&plug);
2583
2584	complete_journal_op(&comp);
2585	wait_for_completion_io(&comp.comp);
2586
2587	dm_integrity_flush_buffers(ic, true);
2588}
2589
2590static void integrity_writer(struct work_struct *w)
2591{
2592	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2593	unsigned write_start, write_sections;
2594
2595	unsigned prev_free_sectors;
2596
 
 
 
 
2597	spin_lock_irq(&ic->endio_wait.lock);
2598	write_start = ic->committed_section;
2599	write_sections = ic->n_committed_sections;
2600	spin_unlock_irq(&ic->endio_wait.lock);
2601
2602	if (!write_sections)
2603		return;
2604
2605	do_journal_write(ic, write_start, write_sections, false);
2606
2607	spin_lock_irq(&ic->endio_wait.lock);
2608
2609	ic->committed_section += write_sections;
2610	wraparound_section(ic, &ic->committed_section);
2611	ic->n_committed_sections -= write_sections;
2612
2613	prev_free_sectors = ic->free_sectors;
2614	ic->free_sectors += write_sections * ic->journal_section_entries;
2615	if (unlikely(!prev_free_sectors))
2616		wake_up_locked(&ic->endio_wait);
2617
2618	spin_unlock_irq(&ic->endio_wait.lock);
2619}
2620
2621static void recalc_write_super(struct dm_integrity_c *ic)
2622{
2623	int r;
2624
2625	dm_integrity_flush_buffers(ic, false);
2626	if (dm_integrity_failed(ic))
2627		return;
2628
2629	r = sync_rw_sb(ic, REQ_OP_WRITE);
2630	if (unlikely(r))
2631		dm_integrity_io_error(ic, "writing superblock", r);
2632}
2633
2634static void integrity_recalc(struct work_struct *w)
2635{
2636	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2637	struct dm_integrity_range range;
2638	struct dm_io_request io_req;
2639	struct dm_io_region io_loc;
2640	sector_t area, offset;
2641	sector_t metadata_block;
2642	unsigned metadata_offset;
2643	sector_t logical_sector, n_sectors;
2644	__u8 *t;
2645	unsigned i;
2646	int r;
2647	unsigned super_counter = 0;
2648
2649	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2650
2651	spin_lock_irq(&ic->endio_wait.lock);
2652
2653next_chunk:
2654
2655	if (unlikely(dm_post_suspending(ic->ti)))
2656		goto unlock_ret;
2657
2658	range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2659	if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2660		if (ic->mode == 'B') {
2661			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2662			DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2663			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2664		}
2665		goto unlock_ret;
2666	}
2667
2668	get_area_and_offset(ic, range.logical_sector, &area, &offset);
2669	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2670	if (!ic->meta_dev)
2671		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2672
2673	add_new_range_and_wait(ic, &range);
2674	spin_unlock_irq(&ic->endio_wait.lock);
2675	logical_sector = range.logical_sector;
2676	n_sectors = range.n_sectors;
2677
2678	if (ic->mode == 'B') {
2679		if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2680			goto advance_and_next;
2681		}
2682		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2683				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2684			logical_sector += ic->sectors_per_block;
2685			n_sectors -= ic->sectors_per_block;
2686			cond_resched();
2687		}
2688		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2689				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2690			n_sectors -= ic->sectors_per_block;
2691			cond_resched();
2692		}
2693		get_area_and_offset(ic, logical_sector, &area, &offset);
2694	}
2695
2696	DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2697
2698	if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2699		recalc_write_super(ic);
2700		if (ic->mode == 'B') {
2701			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2702		}
2703		super_counter = 0;
2704	}
2705
2706	if (unlikely(dm_integrity_failed(ic)))
2707		goto err;
2708
2709	io_req.bi_opf = REQ_OP_READ;
 
2710	io_req.mem.type = DM_IO_VMA;
2711	io_req.mem.ptr.addr = ic->recalc_buffer;
2712	io_req.notify.fn = NULL;
2713	io_req.client = ic->io;
2714	io_loc.bdev = ic->dev->bdev;
2715	io_loc.sector = get_data_sector(ic, area, offset);
2716	io_loc.count = n_sectors;
2717
2718	r = dm_io(&io_req, 1, &io_loc, NULL);
2719	if (unlikely(r)) {
2720		dm_integrity_io_error(ic, "reading data", r);
2721		goto err;
2722	}
2723
2724	t = ic->recalc_tags;
2725	for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2726		integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2727		t += ic->tag_size;
2728	}
2729
2730	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2731
2732	r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2733	if (unlikely(r)) {
2734		dm_integrity_io_error(ic, "writing tags", r);
2735		goto err;
2736	}
2737
2738	if (ic->mode == 'B') {
2739		sector_t start, end;
2740		start = (range.logical_sector >>
2741			 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2742			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2743		end = ((range.logical_sector + range.n_sectors) >>
2744		       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2745			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2746		block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2747	}
2748
2749advance_and_next:
2750	cond_resched();
2751
2752	spin_lock_irq(&ic->endio_wait.lock);
2753	remove_range_unlocked(ic, &range);
2754	ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2755	goto next_chunk;
2756
2757err:
2758	remove_range(ic, &range);
2759	return;
2760
2761unlock_ret:
2762	spin_unlock_irq(&ic->endio_wait.lock);
2763
2764	recalc_write_super(ic);
2765}
2766
2767static void bitmap_block_work(struct work_struct *w)
2768{
2769	struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2770	struct dm_integrity_c *ic = bbs->ic;
2771	struct bio *bio;
2772	struct bio_list bio_queue;
2773	struct bio_list waiting;
2774
2775	bio_list_init(&waiting);
2776
2777	spin_lock(&bbs->bio_queue_lock);
2778	bio_queue = bbs->bio_queue;
2779	bio_list_init(&bbs->bio_queue);
2780	spin_unlock(&bbs->bio_queue_lock);
2781
2782	while ((bio = bio_list_pop(&bio_queue))) {
2783		struct dm_integrity_io *dio;
2784
2785		dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2786
2787		if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2788				    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2789			remove_range(ic, &dio->range);
2790			INIT_WORK(&dio->work, integrity_bio_wait);
2791			queue_work(ic->offload_wq, &dio->work);
2792		} else {
2793			block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2794					dio->range.n_sectors, BITMAP_OP_SET);
2795			bio_list_add(&waiting, bio);
2796		}
2797	}
2798
2799	if (bio_list_empty(&waiting))
2800		return;
2801
2802	rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
2803			   bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2804			   BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2805
2806	while ((bio = bio_list_pop(&waiting))) {
2807		struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2808
2809		block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2810				dio->range.n_sectors, BITMAP_OP_SET);
2811
2812		remove_range(ic, &dio->range);
2813		INIT_WORK(&dio->work, integrity_bio_wait);
2814		queue_work(ic->offload_wq, &dio->work);
2815	}
2816
2817	queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2818}
2819
2820static void bitmap_flush_work(struct work_struct *work)
2821{
2822	struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2823	struct dm_integrity_range range;
2824	unsigned long limit;
2825	struct bio *bio;
2826
2827	dm_integrity_flush_buffers(ic, false);
2828
2829	range.logical_sector = 0;
2830	range.n_sectors = ic->provided_data_sectors;
2831
2832	spin_lock_irq(&ic->endio_wait.lock);
2833	add_new_range_and_wait(ic, &range);
2834	spin_unlock_irq(&ic->endio_wait.lock);
2835
2836	dm_integrity_flush_buffers(ic, true);
 
 
2837
2838	limit = ic->provided_data_sectors;
2839	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2840		limit = le64_to_cpu(ic->sb->recalc_sector)
2841			>> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2842			<< (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2843	}
2844	/*DEBUG_print("zeroing journal\n");*/
2845	block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2846	block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2847
2848	rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
2849			   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2850
2851	spin_lock_irq(&ic->endio_wait.lock);
2852	remove_range_unlocked(ic, &range);
2853	while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2854		bio_endio(bio);
2855		spin_unlock_irq(&ic->endio_wait.lock);
2856		spin_lock_irq(&ic->endio_wait.lock);
2857	}
2858	spin_unlock_irq(&ic->endio_wait.lock);
2859}
2860
2861
2862static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2863			 unsigned n_sections, unsigned char commit_seq)
2864{
2865	unsigned i, j, n;
2866
2867	if (!n_sections)
2868		return;
2869
2870	for (n = 0; n < n_sections; n++) {
2871		i = start_section + n;
2872		wraparound_section(ic, &i);
2873		for (j = 0; j < ic->journal_section_sectors; j++) {
2874			struct journal_sector *js = access_journal(ic, i, j);
2875			BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
2876			memset(&js->sectors, 0, sizeof(js->sectors));
2877			js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2878		}
2879		for (j = 0; j < ic->journal_section_entries; j++) {
2880			struct journal_entry *je = access_journal_entry(ic, i, j);
2881			journal_entry_set_unused(je);
2882		}
2883	}
2884
2885	write_journal(ic, start_section, n_sections);
2886}
2887
2888static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2889{
2890	unsigned char k;
2891	for (k = 0; k < N_COMMIT_IDS; k++) {
2892		if (dm_integrity_commit_id(ic, i, j, k) == id)
2893			return k;
2894	}
2895	dm_integrity_io_error(ic, "journal commit id", -EIO);
2896	return -EIO;
2897}
2898
2899static void replay_journal(struct dm_integrity_c *ic)
2900{
2901	unsigned i, j;
2902	bool used_commit_ids[N_COMMIT_IDS];
2903	unsigned max_commit_id_sections[N_COMMIT_IDS];
2904	unsigned write_start, write_sections;
2905	unsigned continue_section;
2906	bool journal_empty;
2907	unsigned char unused, last_used, want_commit_seq;
2908
2909	if (ic->mode == 'R')
2910		return;
2911
2912	if (ic->journal_uptodate)
2913		return;
2914
2915	last_used = 0;
2916	write_start = 0;
2917
2918	if (!ic->just_formatted) {
2919		DEBUG_print("reading journal\n");
2920		rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
2921		if (ic->journal_io)
2922			DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2923		if (ic->journal_io) {
2924			struct journal_completion crypt_comp;
2925			crypt_comp.ic = ic;
2926			init_completion(&crypt_comp.comp);
2927			crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2928			encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2929			wait_for_completion(&crypt_comp.comp);
2930		}
2931		DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2932	}
2933
2934	if (dm_integrity_failed(ic))
2935		goto clear_journal;
2936
2937	journal_empty = true;
2938	memset(used_commit_ids, 0, sizeof used_commit_ids);
2939	memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2940	for (i = 0; i < ic->journal_sections; i++) {
2941		for (j = 0; j < ic->journal_section_sectors; j++) {
2942			int k;
2943			struct journal_sector *js = access_journal(ic, i, j);
2944			k = find_commit_seq(ic, i, j, js->commit_id);
2945			if (k < 0)
2946				goto clear_journal;
2947			used_commit_ids[k] = true;
2948			max_commit_id_sections[k] = i;
2949		}
2950		if (journal_empty) {
2951			for (j = 0; j < ic->journal_section_entries; j++) {
2952				struct journal_entry *je = access_journal_entry(ic, i, j);
2953				if (!journal_entry_is_unused(je)) {
2954					journal_empty = false;
2955					break;
2956				}
2957			}
2958		}
2959	}
2960
2961	if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2962		unused = N_COMMIT_IDS - 1;
2963		while (unused && !used_commit_ids[unused - 1])
2964			unused--;
2965	} else {
2966		for (unused = 0; unused < N_COMMIT_IDS; unused++)
2967			if (!used_commit_ids[unused])
2968				break;
2969		if (unused == N_COMMIT_IDS) {
2970			dm_integrity_io_error(ic, "journal commit ids", -EIO);
2971			goto clear_journal;
2972		}
2973	}
2974	DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2975		    unused, used_commit_ids[0], used_commit_ids[1],
2976		    used_commit_ids[2], used_commit_ids[3]);
2977
2978	last_used = prev_commit_seq(unused);
2979	want_commit_seq = prev_commit_seq(last_used);
2980
2981	if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2982		journal_empty = true;
2983
2984	write_start = max_commit_id_sections[last_used] + 1;
2985	if (unlikely(write_start >= ic->journal_sections))
2986		want_commit_seq = next_commit_seq(want_commit_seq);
2987	wraparound_section(ic, &write_start);
2988
2989	i = write_start;
2990	for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2991		for (j = 0; j < ic->journal_section_sectors; j++) {
2992			struct journal_sector *js = access_journal(ic, i, j);
2993
2994			if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2995				/*
2996				 * This could be caused by crash during writing.
2997				 * We won't replay the inconsistent part of the
2998				 * journal.
2999				 */
3000				DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
3001					    i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
3002				goto brk;
3003			}
3004		}
3005		i++;
3006		if (unlikely(i >= ic->journal_sections))
3007			want_commit_seq = next_commit_seq(want_commit_seq);
3008		wraparound_section(ic, &i);
3009	}
3010brk:
3011
3012	if (!journal_empty) {
3013		DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
3014			    write_sections, write_start, want_commit_seq);
3015		do_journal_write(ic, write_start, write_sections, true);
3016	}
3017
3018	if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3019		continue_section = write_start;
3020		ic->commit_seq = want_commit_seq;
3021		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3022	} else {
3023		unsigned s;
3024		unsigned char erase_seq;
3025clear_journal:
3026		DEBUG_print("clearing journal\n");
3027
3028		erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3029		s = write_start;
3030		init_journal(ic, s, 1, erase_seq);
3031		s++;
3032		wraparound_section(ic, &s);
3033		if (ic->journal_sections >= 2) {
3034			init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3035			s += ic->journal_sections - 2;
3036			wraparound_section(ic, &s);
3037			init_journal(ic, s, 1, erase_seq);
3038		}
3039
3040		continue_section = 0;
3041		ic->commit_seq = next_commit_seq(erase_seq);
3042	}
3043
3044	ic->committed_section = continue_section;
3045	ic->n_committed_sections = 0;
3046
3047	ic->uncommitted_section = continue_section;
3048	ic->n_uncommitted_sections = 0;
3049
3050	ic->free_section = continue_section;
3051	ic->free_section_entry = 0;
3052	ic->free_sectors = ic->journal_entries;
3053
3054	ic->journal_tree_root = RB_ROOT;
3055	for (i = 0; i < ic->journal_entries; i++)
3056		init_journal_node(&ic->journal_tree[i]);
3057}
3058
3059static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3060{
3061	DEBUG_print("dm_integrity_enter_synchronous_mode\n");
3062
3063	if (ic->mode == 'B') {
3064		ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3065		ic->synchronous_mode = 1;
3066
3067		cancel_delayed_work_sync(&ic->bitmap_flush_work);
3068		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3069		flush_workqueue(ic->commit_wq);
3070	}
3071}
3072
3073static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3074{
3075	struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3076
3077	DEBUG_print("dm_integrity_reboot\n");
3078
3079	dm_integrity_enter_synchronous_mode(ic);
3080
3081	return NOTIFY_DONE;
3082}
3083
3084static void dm_integrity_postsuspend(struct dm_target *ti)
3085{
3086	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3087	int r;
3088
3089	WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3090
3091	del_timer_sync(&ic->autocommit_timer);
3092
3093	if (ic->recalc_wq)
3094		drain_workqueue(ic->recalc_wq);
3095
3096	if (ic->mode == 'B')
3097		cancel_delayed_work_sync(&ic->bitmap_flush_work);
3098
3099	queue_work(ic->commit_wq, &ic->commit_work);
3100	drain_workqueue(ic->commit_wq);
3101
3102	if (ic->mode == 'J') {
3103		queue_work(ic->writer_wq, &ic->writer_work);
 
3104		drain_workqueue(ic->writer_wq);
3105		dm_integrity_flush_buffers(ic, true);
3106		if (ic->wrote_to_journal) {
3107			init_journal(ic, ic->free_section,
3108				     ic->journal_sections - ic->free_section, ic->commit_seq);
3109			if (ic->free_section) {
3110				init_journal(ic, 0, ic->free_section,
3111					     next_commit_seq(ic->commit_seq));
3112			}
3113		}
3114	}
3115
3116	if (ic->mode == 'B') {
3117		dm_integrity_flush_buffers(ic, true);
3118#if 1
3119		/* set to 0 to test bitmap replay code */
3120		init_journal(ic, 0, ic->journal_sections, 0);
3121		ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3122		r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3123		if (unlikely(r))
3124			dm_integrity_io_error(ic, "writing superblock", r);
3125#endif
3126	}
3127
3128	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3129
3130	ic->journal_uptodate = true;
3131}
3132
3133static void dm_integrity_resume(struct dm_target *ti)
3134{
3135	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3136	__u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3137	int r;
3138
3139	DEBUG_print("resume\n");
3140
3141	ic->wrote_to_journal = false;
3142
3143	if (ic->provided_data_sectors != old_provided_data_sectors) {
3144		if (ic->provided_data_sectors > old_provided_data_sectors &&
3145		    ic->mode == 'B' &&
3146		    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3147			rw_journal_sectors(ic, REQ_OP_READ, 0,
3148					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3149			block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3150					ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3151			rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3152					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3153		}
3154
3155		ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3156		r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3157		if (unlikely(r))
3158			dm_integrity_io_error(ic, "writing superblock", r);
3159	}
3160
3161	if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3162		DEBUG_print("resume dirty_bitmap\n");
3163		rw_journal_sectors(ic, REQ_OP_READ, 0,
3164				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3165		if (ic->mode == 'B') {
3166			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3167			    !ic->reset_recalculate_flag) {
3168				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3169				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3170				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3171						     BITMAP_OP_TEST_ALL_CLEAR)) {
3172					ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3173					ic->sb->recalc_sector = cpu_to_le64(0);
3174				}
3175			} else {
3176				DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3177					    ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3178				ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3179				block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3180				block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3181				block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3182				rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3183						   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3184				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3185				ic->sb->recalc_sector = cpu_to_le64(0);
3186			}
3187		} else {
3188			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3189			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3190			    ic->reset_recalculate_flag) {
3191				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3192				ic->sb->recalc_sector = cpu_to_le64(0);
3193			}
3194			init_journal(ic, 0, ic->journal_sections, 0);
3195			replay_journal(ic);
3196			ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3197		}
3198		r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3199		if (unlikely(r))
3200			dm_integrity_io_error(ic, "writing superblock", r);
3201	} else {
3202		replay_journal(ic);
3203		if (ic->reset_recalculate_flag) {
3204			ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3205			ic->sb->recalc_sector = cpu_to_le64(0);
3206		}
3207		if (ic->mode == 'B') {
3208			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3209			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3210			r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3211			if (unlikely(r))
3212				dm_integrity_io_error(ic, "writing superblock", r);
3213
3214			block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3215			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3216			block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3217			if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3218			    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3219				block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3220						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3221				block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3222						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3223				block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3224						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3225			}
3226			rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3227					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3228		}
3229	}
3230
3231	DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3232	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3233		__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3234		DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3235		if (recalc_pos < ic->provided_data_sectors) {
3236			queue_work(ic->recalc_wq, &ic->recalc_work);
3237		} else if (recalc_pos > ic->provided_data_sectors) {
3238			ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3239			recalc_write_super(ic);
3240		}
3241	}
3242
3243	ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3244	ic->reboot_notifier.next = NULL;
3245	ic->reboot_notifier.priority = INT_MAX - 1;	/* be notified after md and before hardware drivers */
3246	WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3247
3248#if 0
3249	/* set to 1 to stress test synchronous mode */
3250	dm_integrity_enter_synchronous_mode(ic);
3251#endif
3252}
3253
3254static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3255				unsigned status_flags, char *result, unsigned maxlen)
3256{
3257	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3258	unsigned arg_count;
3259	size_t sz = 0;
3260
3261	switch (type) {
3262	case STATUSTYPE_INFO:
3263		DMEMIT("%llu %llu",
3264			(unsigned long long)atomic64_read(&ic->number_of_mismatches),
3265			ic->provided_data_sectors);
3266		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3267			DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3268		else
3269			DMEMIT(" -");
3270		break;
3271
3272	case STATUSTYPE_TABLE: {
3273		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3274		watermark_percentage += ic->journal_entries / 2;
3275		do_div(watermark_percentage, ic->journal_entries);
3276		arg_count = 3;
3277		arg_count += !!ic->meta_dev;
3278		arg_count += ic->sectors_per_block != 1;
3279		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3280		arg_count += ic->reset_recalculate_flag;
3281		arg_count += ic->discard;
3282		arg_count += ic->mode == 'J';
3283		arg_count += ic->mode == 'J';
3284		arg_count += ic->mode == 'B';
3285		arg_count += ic->mode == 'B';
3286		arg_count += !!ic->internal_hash_alg.alg_string;
3287		arg_count += !!ic->journal_crypt_alg.alg_string;
3288		arg_count += !!ic->journal_mac_alg.alg_string;
3289		arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3290		arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3291		arg_count += ic->legacy_recalculate;
3292		DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3293		       ic->tag_size, ic->mode, arg_count);
3294		if (ic->meta_dev)
3295			DMEMIT(" meta_device:%s", ic->meta_dev->name);
3296		if (ic->sectors_per_block != 1)
3297			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3298		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3299			DMEMIT(" recalculate");
3300		if (ic->reset_recalculate_flag)
3301			DMEMIT(" reset_recalculate");
3302		if (ic->discard)
3303			DMEMIT(" allow_discards");
3304		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3305		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3306		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3307		if (ic->mode == 'J') {
3308			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3309			DMEMIT(" commit_time:%u", ic->autocommit_msec);
3310		}
3311		if (ic->mode == 'B') {
3312			DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3313			DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3314		}
3315		if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3316			DMEMIT(" fix_padding");
3317		if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3318			DMEMIT(" fix_hmac");
3319		if (ic->legacy_recalculate)
3320			DMEMIT(" legacy_recalculate");
3321
3322#define EMIT_ALG(a, n)							\
3323		do {							\
3324			if (ic->a.alg_string) {				\
3325				DMEMIT(" %s:%s", n, ic->a.alg_string);	\
3326				if (ic->a.key_string)			\
3327					DMEMIT(":%s", ic->a.key_string);\
3328			}						\
3329		} while (0)
3330		EMIT_ALG(internal_hash_alg, "internal_hash");
3331		EMIT_ALG(journal_crypt_alg, "journal_crypt");
3332		EMIT_ALG(journal_mac_alg, "journal_mac");
3333		break;
3334	}
3335	case STATUSTYPE_IMA:
3336		DMEMIT_TARGET_NAME_VERSION(ti->type);
3337		DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
3338			ic->dev->name, ic->start, ic->tag_size, ic->mode);
3339
3340		if (ic->meta_dev)
3341			DMEMIT(",meta_device=%s", ic->meta_dev->name);
3342		if (ic->sectors_per_block != 1)
3343			DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
3344
3345		DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
3346		       'y' : 'n');
3347		DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
3348		DMEMIT(",fix_padding=%c",
3349		       ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
3350		DMEMIT(",fix_hmac=%c",
3351		       ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
3352		DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
3353
3354		DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
3355		DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
3356		DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
3357		DMEMIT(";");
3358		break;
3359	}
3360}
3361
3362static int dm_integrity_iterate_devices(struct dm_target *ti,
3363					iterate_devices_callout_fn fn, void *data)
3364{
3365	struct dm_integrity_c *ic = ti->private;
3366
3367	if (!ic->meta_dev)
3368		return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3369	else
3370		return fn(ti, ic->dev, 0, ti->len, data);
3371}
3372
3373static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3374{
3375	struct dm_integrity_c *ic = ti->private;
3376
3377	if (ic->sectors_per_block > 1) {
3378		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3379		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3380		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3381		limits->dma_alignment = limits->logical_block_size - 1;
3382	}
3383}
3384
3385static void calculate_journal_section_size(struct dm_integrity_c *ic)
3386{
3387	unsigned sector_space = JOURNAL_SECTOR_DATA;
3388
3389	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3390	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3391					 JOURNAL_ENTRY_ROUNDUP);
3392
3393	if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3394		sector_space -= JOURNAL_MAC_PER_SECTOR;
3395	ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3396	ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3397	ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3398	ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3399}
3400
3401static int calculate_device_limits(struct dm_integrity_c *ic)
3402{
3403	__u64 initial_sectors;
3404
3405	calculate_journal_section_size(ic);
3406	initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3407	if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3408		return -EINVAL;
3409	ic->initial_sectors = initial_sectors;
3410
3411	if (!ic->meta_dev) {
3412		sector_t last_sector, last_area, last_offset;
3413
3414		/* we have to maintain excessive padding for compatibility with existing volumes */
3415		__u64 metadata_run_padding =
3416			ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3417			(__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3418			(__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3419
3420		ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3421					    metadata_run_padding) >> SECTOR_SHIFT;
3422		if (!(ic->metadata_run & (ic->metadata_run - 1)))
3423			ic->log2_metadata_run = __ffs(ic->metadata_run);
3424		else
3425			ic->log2_metadata_run = -1;
3426
3427		get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3428		last_sector = get_data_sector(ic, last_area, last_offset);
3429		if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3430			return -EINVAL;
3431	} else {
3432		__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3433		meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3434				>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3435		meta_size <<= ic->log2_buffer_sectors;
3436		if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3437		    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3438			return -EINVAL;
3439		ic->metadata_run = 1;
3440		ic->log2_metadata_run = 0;
3441	}
3442
3443	return 0;
3444}
3445
3446static void get_provided_data_sectors(struct dm_integrity_c *ic)
3447{
3448	if (!ic->meta_dev) {
3449		int test_bit;
3450		ic->provided_data_sectors = 0;
3451		for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3452			__u64 prev_data_sectors = ic->provided_data_sectors;
3453
3454			ic->provided_data_sectors |= (sector_t)1 << test_bit;
3455			if (calculate_device_limits(ic))
3456				ic->provided_data_sectors = prev_data_sectors;
3457		}
3458	} else {
3459		ic->provided_data_sectors = ic->data_device_sectors;
3460		ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3461	}
3462}
3463
3464static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3465{
3466	unsigned journal_sections;
3467	int test_bit;
3468
3469	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3470	memcpy(ic->sb->magic, SB_MAGIC, 8);
3471	ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3472	ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3473	if (ic->journal_mac_alg.alg_string)
3474		ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3475
3476	calculate_journal_section_size(ic);
3477	journal_sections = journal_sectors / ic->journal_section_sectors;
3478	if (!journal_sections)
3479		journal_sections = 1;
3480
3481	if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3482		ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3483		get_random_bytes(ic->sb->salt, SALT_SIZE);
3484	}
3485
3486	if (!ic->meta_dev) {
3487		if (ic->fix_padding)
3488			ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3489		ic->sb->journal_sections = cpu_to_le32(journal_sections);
3490		if (!interleave_sectors)
3491			interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3492		ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3493		ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3494		ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3495
3496		get_provided_data_sectors(ic);
3497		if (!ic->provided_data_sectors)
3498			return -EINVAL;
3499	} else {
3500		ic->sb->log2_interleave_sectors = 0;
3501
3502		get_provided_data_sectors(ic);
3503		if (!ic->provided_data_sectors)
3504			return -EINVAL;
3505
3506try_smaller_buffer:
3507		ic->sb->journal_sections = cpu_to_le32(0);
3508		for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3509			__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3510			__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3511			if (test_journal_sections > journal_sections)
3512				continue;
3513			ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3514			if (calculate_device_limits(ic))
3515				ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3516
3517		}
3518		if (!le32_to_cpu(ic->sb->journal_sections)) {
3519			if (ic->log2_buffer_sectors > 3) {
3520				ic->log2_buffer_sectors--;
3521				goto try_smaller_buffer;
3522			}
3523			return -EINVAL;
3524		}
3525	}
3526
3527	ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3528
3529	sb_set_version(ic);
3530
3531	return 0;
3532}
3533
3534static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3535{
3536	struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3537	struct blk_integrity bi;
3538
3539	memset(&bi, 0, sizeof(bi));
3540	bi.profile = &dm_integrity_profile;
3541	bi.tuple_size = ic->tag_size;
3542	bi.tag_size = bi.tuple_size;
3543	bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3544
3545	blk_integrity_register(disk, &bi);
3546	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3547}
3548
3549static void dm_integrity_free_page_list(struct page_list *pl)
3550{
3551	unsigned i;
3552
3553	if (!pl)
3554		return;
3555	for (i = 0; pl[i].page; i++)
3556		__free_page(pl[i].page);
3557	kvfree(pl);
3558}
3559
3560static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3561{
3562	struct page_list *pl;
3563	unsigned i;
3564
3565	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3566	if (!pl)
3567		return NULL;
3568
3569	for (i = 0; i < n_pages; i++) {
3570		pl[i].page = alloc_page(GFP_KERNEL);
3571		if (!pl[i].page) {
3572			dm_integrity_free_page_list(pl);
3573			return NULL;
3574		}
3575		if (i)
3576			pl[i - 1].next = &pl[i];
3577	}
3578	pl[i].page = NULL;
3579	pl[i].next = NULL;
3580
3581	return pl;
3582}
3583
3584static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3585{
3586	unsigned i;
3587	for (i = 0; i < ic->journal_sections; i++)
3588		kvfree(sl[i]);
3589	kvfree(sl);
3590}
3591
3592static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3593								   struct page_list *pl)
3594{
3595	struct scatterlist **sl;
3596	unsigned i;
3597
3598	sl = kvmalloc_array(ic->journal_sections,
3599			    sizeof(struct scatterlist *),
3600			    GFP_KERNEL | __GFP_ZERO);
3601	if (!sl)
3602		return NULL;
3603
3604	for (i = 0; i < ic->journal_sections; i++) {
3605		struct scatterlist *s;
3606		unsigned start_index, start_offset;
3607		unsigned end_index, end_offset;
3608		unsigned n_pages;
3609		unsigned idx;
3610
3611		page_list_location(ic, i, 0, &start_index, &start_offset);
3612		page_list_location(ic, i, ic->journal_section_sectors - 1,
3613				   &end_index, &end_offset);
3614
3615		n_pages = (end_index - start_index + 1);
3616
3617		s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3618				   GFP_KERNEL);
3619		if (!s) {
3620			dm_integrity_free_journal_scatterlist(ic, sl);
3621			return NULL;
3622		}
3623
3624		sg_init_table(s, n_pages);
3625		for (idx = start_index; idx <= end_index; idx++) {
3626			char *va = lowmem_page_address(pl[idx].page);
3627			unsigned start = 0, end = PAGE_SIZE;
3628			if (idx == start_index)
3629				start = start_offset;
3630			if (idx == end_index)
3631				end = end_offset + (1 << SECTOR_SHIFT);
3632			sg_set_buf(&s[idx - start_index], va + start, end - start);
3633		}
3634
3635		sl[i] = s;
3636	}
3637
3638	return sl;
3639}
3640
3641static void free_alg(struct alg_spec *a)
3642{
3643	kfree_sensitive(a->alg_string);
3644	kfree_sensitive(a->key);
3645	memset(a, 0, sizeof *a);
3646}
3647
3648static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3649{
3650	char *k;
3651
3652	free_alg(a);
3653
3654	a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3655	if (!a->alg_string)
3656		goto nomem;
3657
3658	k = strchr(a->alg_string, ':');
3659	if (k) {
3660		*k = 0;
3661		a->key_string = k + 1;
3662		if (strlen(a->key_string) & 1)
3663			goto inval;
3664
3665		a->key_size = strlen(a->key_string) / 2;
3666		a->key = kmalloc(a->key_size, GFP_KERNEL);
3667		if (!a->key)
3668			goto nomem;
3669		if (hex2bin(a->key, a->key_string, a->key_size))
3670			goto inval;
3671	}
3672
3673	return 0;
3674inval:
3675	*error = error_inval;
3676	return -EINVAL;
3677nomem:
3678	*error = "Out of memory for an argument";
3679	return -ENOMEM;
3680}
3681
3682static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3683		   char *error_alg, char *error_key)
3684{
3685	int r;
3686
3687	if (a->alg_string) {
3688		*hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3689		if (IS_ERR(*hash)) {
3690			*error = error_alg;
3691			r = PTR_ERR(*hash);
3692			*hash = NULL;
3693			return r;
3694		}
3695
3696		if (a->key) {
3697			r = crypto_shash_setkey(*hash, a->key, a->key_size);
3698			if (r) {
3699				*error = error_key;
3700				return r;
3701			}
3702		} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3703			*error = error_key;
3704			return -ENOKEY;
3705		}
3706	}
3707
3708	return 0;
3709}
3710
3711static int create_journal(struct dm_integrity_c *ic, char **error)
3712{
3713	int r = 0;
3714	unsigned i;
3715	__u64 journal_pages, journal_desc_size, journal_tree_size;
3716	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3717	struct skcipher_request *req = NULL;
3718
3719	ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3720	ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3721	ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3722	ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3723
3724	journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3725				PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3726	journal_desc_size = journal_pages * sizeof(struct page_list);
3727	if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3728		*error = "Journal doesn't fit into memory";
3729		r = -ENOMEM;
3730		goto bad;
3731	}
3732	ic->journal_pages = journal_pages;
3733
3734	ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3735	if (!ic->journal) {
3736		*error = "Could not allocate memory for journal";
3737		r = -ENOMEM;
3738		goto bad;
3739	}
3740	if (ic->journal_crypt_alg.alg_string) {
3741		unsigned ivsize, blocksize;
3742		struct journal_completion comp;
3743
3744		comp.ic = ic;
3745		ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3746		if (IS_ERR(ic->journal_crypt)) {
3747			*error = "Invalid journal cipher";
3748			r = PTR_ERR(ic->journal_crypt);
3749			ic->journal_crypt = NULL;
3750			goto bad;
3751		}
3752		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3753		blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3754
3755		if (ic->journal_crypt_alg.key) {
3756			r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3757						   ic->journal_crypt_alg.key_size);
3758			if (r) {
3759				*error = "Error setting encryption key";
3760				goto bad;
3761			}
3762		}
3763		DEBUG_print("cipher %s, block size %u iv size %u\n",
3764			    ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3765
3766		ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3767		if (!ic->journal_io) {
3768			*error = "Could not allocate memory for journal io";
3769			r = -ENOMEM;
3770			goto bad;
3771		}
3772
3773		if (blocksize == 1) {
3774			struct scatterlist *sg;
3775
3776			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3777			if (!req) {
3778				*error = "Could not allocate crypt request";
3779				r = -ENOMEM;
3780				goto bad;
3781			}
3782
3783			crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3784			if (!crypt_iv) {
3785				*error = "Could not allocate iv";
3786				r = -ENOMEM;
3787				goto bad;
3788			}
3789
3790			ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3791			if (!ic->journal_xor) {
3792				*error = "Could not allocate memory for journal xor";
3793				r = -ENOMEM;
3794				goto bad;
3795			}
3796
3797			sg = kvmalloc_array(ic->journal_pages + 1,
3798					    sizeof(struct scatterlist),
3799					    GFP_KERNEL);
3800			if (!sg) {
3801				*error = "Unable to allocate sg list";
3802				r = -ENOMEM;
3803				goto bad;
3804			}
3805			sg_init_table(sg, ic->journal_pages + 1);
3806			for (i = 0; i < ic->journal_pages; i++) {
3807				char *va = lowmem_page_address(ic->journal_xor[i].page);
3808				clear_page(va);
3809				sg_set_buf(&sg[i], va, PAGE_SIZE);
3810			}
3811			sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3812
3813			skcipher_request_set_crypt(req, sg, sg,
3814						   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3815			init_completion(&comp.comp);
3816			comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3817			if (do_crypt(true, req, &comp))
3818				wait_for_completion(&comp.comp);
3819			kvfree(sg);
3820			r = dm_integrity_failed(ic);
3821			if (r) {
3822				*error = "Unable to encrypt journal";
3823				goto bad;
3824			}
3825			DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3826
3827			crypto_free_skcipher(ic->journal_crypt);
3828			ic->journal_crypt = NULL;
3829		} else {
3830			unsigned crypt_len = roundup(ivsize, blocksize);
3831
3832			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3833			if (!req) {
3834				*error = "Could not allocate crypt request";
3835				r = -ENOMEM;
3836				goto bad;
3837			}
3838
3839			crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3840			if (!crypt_iv) {
3841				*error = "Could not allocate iv";
3842				r = -ENOMEM;
3843				goto bad;
3844			}
3845
3846			crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3847			if (!crypt_data) {
3848				*error = "Unable to allocate crypt data";
3849				r = -ENOMEM;
3850				goto bad;
3851			}
3852
3853			ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3854			if (!ic->journal_scatterlist) {
3855				*error = "Unable to allocate sg list";
3856				r = -ENOMEM;
3857				goto bad;
3858			}
3859			ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3860			if (!ic->journal_io_scatterlist) {
3861				*error = "Unable to allocate sg list";
3862				r = -ENOMEM;
3863				goto bad;
3864			}
3865			ic->sk_requests = kvmalloc_array(ic->journal_sections,
3866							 sizeof(struct skcipher_request *),
3867							 GFP_KERNEL | __GFP_ZERO);
3868			if (!ic->sk_requests) {
3869				*error = "Unable to allocate sk requests";
3870				r = -ENOMEM;
3871				goto bad;
3872			}
3873			for (i = 0; i < ic->journal_sections; i++) {
3874				struct scatterlist sg;
3875				struct skcipher_request *section_req;
3876				__le32 section_le = cpu_to_le32(i);
3877
3878				memset(crypt_iv, 0x00, ivsize);
3879				memset(crypt_data, 0x00, crypt_len);
3880				memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3881
3882				sg_init_one(&sg, crypt_data, crypt_len);
3883				skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3884				init_completion(&comp.comp);
3885				comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3886				if (do_crypt(true, req, &comp))
3887					wait_for_completion(&comp.comp);
3888
3889				r = dm_integrity_failed(ic);
3890				if (r) {
3891					*error = "Unable to generate iv";
3892					goto bad;
3893				}
3894
3895				section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3896				if (!section_req) {
3897					*error = "Unable to allocate crypt request";
3898					r = -ENOMEM;
3899					goto bad;
3900				}
3901				section_req->iv = kmalloc_array(ivsize, 2,
3902								GFP_KERNEL);
3903				if (!section_req->iv) {
3904					skcipher_request_free(section_req);
3905					*error = "Unable to allocate iv";
3906					r = -ENOMEM;
3907					goto bad;
3908				}
3909				memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3910				section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3911				ic->sk_requests[i] = section_req;
3912				DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3913			}
3914		}
3915	}
3916
3917	for (i = 0; i < N_COMMIT_IDS; i++) {
3918		unsigned j;
3919retest_commit_id:
3920		for (j = 0; j < i; j++) {
3921			if (ic->commit_ids[j] == ic->commit_ids[i]) {
3922				ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3923				goto retest_commit_id;
3924			}
3925		}
3926		DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3927	}
3928
3929	journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3930	if (journal_tree_size > ULONG_MAX) {
3931		*error = "Journal doesn't fit into memory";
3932		r = -ENOMEM;
3933		goto bad;
3934	}
3935	ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3936	if (!ic->journal_tree) {
3937		*error = "Could not allocate memory for journal tree";
3938		r = -ENOMEM;
3939	}
3940bad:
3941	kfree(crypt_data);
3942	kfree(crypt_iv);
3943	skcipher_request_free(req);
3944
3945	return r;
3946}
3947
3948/*
3949 * Construct a integrity mapping
3950 *
3951 * Arguments:
3952 *	device
3953 *	offset from the start of the device
3954 *	tag size
3955 *	D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3956 *	number of optional arguments
3957 *	optional arguments:
3958 *		journal_sectors
3959 *		interleave_sectors
3960 *		buffer_sectors
3961 *		journal_watermark
3962 *		commit_time
3963 *		meta_device
3964 *		block_size
3965 *		sectors_per_bit
3966 *		bitmap_flush_interval
3967 *		internal_hash
3968 *		journal_crypt
3969 *		journal_mac
3970 *		recalculate
3971 */
3972static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3973{
3974	struct dm_integrity_c *ic;
3975	char dummy;
3976	int r;
3977	unsigned extra_args;
3978	struct dm_arg_set as;
3979	static const struct dm_arg _args[] = {
3980		{0, 18, "Invalid number of feature args"},
3981	};
3982	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3983	bool should_write_sb;
3984	__u64 threshold;
3985	unsigned long long start;
3986	__s8 log2_sectors_per_bitmap_bit = -1;
3987	__s8 log2_blocks_per_bitmap_bit;
3988	__u64 bits_in_journal;
3989	__u64 n_bitmap_bits;
3990
3991#define DIRECT_ARGUMENTS	4
3992
3993	if (argc <= DIRECT_ARGUMENTS) {
3994		ti->error = "Invalid argument count";
3995		return -EINVAL;
3996	}
3997
3998	ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3999	if (!ic) {
4000		ti->error = "Cannot allocate integrity context";
4001		return -ENOMEM;
4002	}
4003	ti->private = ic;
4004	ti->per_io_data_size = sizeof(struct dm_integrity_io);
4005	ic->ti = ti;
4006
4007	ic->in_progress = RB_ROOT;
4008	INIT_LIST_HEAD(&ic->wait_list);
4009	init_waitqueue_head(&ic->endio_wait);
4010	bio_list_init(&ic->flush_bio_list);
4011	init_waitqueue_head(&ic->copy_to_journal_wait);
4012	init_completion(&ic->crypto_backoff);
4013	atomic64_set(&ic->number_of_mismatches, 0);
4014	ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
4015
4016	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
4017	if (r) {
4018		ti->error = "Device lookup failed";
4019		goto bad;
4020	}
4021
4022	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
4023		ti->error = "Invalid starting offset";
4024		r = -EINVAL;
4025		goto bad;
4026	}
4027	ic->start = start;
4028
4029	if (strcmp(argv[2], "-")) {
4030		if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
4031			ti->error = "Invalid tag size";
4032			r = -EINVAL;
4033			goto bad;
4034		}
4035	}
4036
4037	if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
4038	    !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
4039		ic->mode = argv[3][0];
4040	} else {
4041		ti->error = "Invalid mode (expecting J, B, D, R)";
4042		r = -EINVAL;
4043		goto bad;
4044	}
4045
4046	journal_sectors = 0;
4047	interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
4048	buffer_sectors = DEFAULT_BUFFER_SECTORS;
4049	journal_watermark = DEFAULT_JOURNAL_WATERMARK;
4050	sync_msec = DEFAULT_SYNC_MSEC;
4051	ic->sectors_per_block = 1;
4052
4053	as.argc = argc - DIRECT_ARGUMENTS;
4054	as.argv = argv + DIRECT_ARGUMENTS;
4055	r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4056	if (r)
4057		goto bad;
4058
4059	while (extra_args--) {
4060		const char *opt_string;
4061		unsigned val;
4062		unsigned long long llval;
4063		opt_string = dm_shift_arg(&as);
4064		if (!opt_string) {
4065			r = -EINVAL;
4066			ti->error = "Not enough feature arguments";
4067			goto bad;
4068		}
4069		if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4070			journal_sectors = val ? val : 1;
4071		else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4072			interleave_sectors = val;
4073		else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4074			buffer_sectors = val;
4075		else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4076			journal_watermark = val;
4077		else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4078			sync_msec = val;
4079		else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4080			if (ic->meta_dev) {
4081				dm_put_device(ti, ic->meta_dev);
4082				ic->meta_dev = NULL;
4083			}
4084			r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4085					  dm_table_get_mode(ti->table), &ic->meta_dev);
4086			if (r) {
4087				ti->error = "Device lookup failed";
4088				goto bad;
4089			}
4090		} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4091			if (val < 1 << SECTOR_SHIFT ||
4092			    val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4093			    (val & (val -1))) {
4094				r = -EINVAL;
4095				ti->error = "Invalid block_size argument";
4096				goto bad;
4097			}
4098			ic->sectors_per_block = val >> SECTOR_SHIFT;
4099		} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4100			log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4101		} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4102			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4103				r = -EINVAL;
4104				ti->error = "Invalid bitmap_flush_interval argument";
4105				goto bad;
4106			}
4107			ic->bitmap_flush_interval = msecs_to_jiffies(val);
4108		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4109			r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4110					    "Invalid internal_hash argument");
4111			if (r)
4112				goto bad;
4113		} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4114			r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4115					    "Invalid journal_crypt argument");
4116			if (r)
4117				goto bad;
4118		} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4119			r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4120					    "Invalid journal_mac argument");
4121			if (r)
4122				goto bad;
4123		} else if (!strcmp(opt_string, "recalculate")) {
4124			ic->recalculate_flag = true;
4125		} else if (!strcmp(opt_string, "reset_recalculate")) {
4126			ic->recalculate_flag = true;
4127			ic->reset_recalculate_flag = true;
4128		} else if (!strcmp(opt_string, "allow_discards")) {
4129			ic->discard = true;
4130		} else if (!strcmp(opt_string, "fix_padding")) {
4131			ic->fix_padding = true;
4132		} else if (!strcmp(opt_string, "fix_hmac")) {
4133			ic->fix_hmac = true;
4134		} else if (!strcmp(opt_string, "legacy_recalculate")) {
4135			ic->legacy_recalculate = true;
4136		} else {
4137			r = -EINVAL;
4138			ti->error = "Invalid argument";
4139			goto bad;
4140		}
4141	}
4142
4143	ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
4144	if (!ic->meta_dev)
4145		ic->meta_device_sectors = ic->data_device_sectors;
4146	else
4147		ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
4148
4149	if (!journal_sectors) {
4150		journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4151				      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4152	}
4153
4154	if (!buffer_sectors)
4155		buffer_sectors = 1;
4156	ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4157
4158	r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4159		    "Invalid internal hash", "Error setting internal hash key");
4160	if (r)
4161		goto bad;
4162
4163	r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4164		    "Invalid journal mac", "Error setting journal mac key");
4165	if (r)
4166		goto bad;
4167
4168	if (!ic->tag_size) {
4169		if (!ic->internal_hash) {
4170			ti->error = "Unknown tag size";
4171			r = -EINVAL;
4172			goto bad;
4173		}
4174		ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4175	}
4176	if (ic->tag_size > MAX_TAG_SIZE) {
4177		ti->error = "Too big tag size";
4178		r = -EINVAL;
4179		goto bad;
4180	}
4181	if (!(ic->tag_size & (ic->tag_size - 1)))
4182		ic->log2_tag_size = __ffs(ic->tag_size);
4183	else
4184		ic->log2_tag_size = -1;
4185
4186	if (ic->mode == 'B' && !ic->internal_hash) {
4187		r = -EINVAL;
4188		ti->error = "Bitmap mode can be only used with internal hash";
4189		goto bad;
4190	}
4191
4192	if (ic->discard && !ic->internal_hash) {
4193		r = -EINVAL;
4194		ti->error = "Discard can be only used with internal hash";
4195		goto bad;
4196	}
4197
4198	ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4199	ic->autocommit_msec = sync_msec;
4200	timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4201
4202	ic->io = dm_io_client_create();
4203	if (IS_ERR(ic->io)) {
4204		r = PTR_ERR(ic->io);
4205		ic->io = NULL;
4206		ti->error = "Cannot allocate dm io";
4207		goto bad;
4208	}
4209
4210	r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4211	if (r) {
4212		ti->error = "Cannot allocate mempool";
4213		goto bad;
4214	}
4215
4216	ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4217					  WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4218	if (!ic->metadata_wq) {
4219		ti->error = "Cannot allocate workqueue";
4220		r = -ENOMEM;
4221		goto bad;
4222	}
4223
4224	/*
4225	 * If this workqueue were percpu, it would cause bio reordering
4226	 * and reduced performance.
4227	 */
4228	ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4229	if (!ic->wait_wq) {
4230		ti->error = "Cannot allocate workqueue";
4231		r = -ENOMEM;
4232		goto bad;
4233	}
4234
4235	ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4236					  METADATA_WORKQUEUE_MAX_ACTIVE);
4237	if (!ic->offload_wq) {
4238		ti->error = "Cannot allocate workqueue";
4239		r = -ENOMEM;
4240		goto bad;
4241	}
4242
4243	ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4244	if (!ic->commit_wq) {
4245		ti->error = "Cannot allocate workqueue";
4246		r = -ENOMEM;
4247		goto bad;
4248	}
4249	INIT_WORK(&ic->commit_work, integrity_commit);
4250
4251	if (ic->mode == 'J' || ic->mode == 'B') {
4252		ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4253		if (!ic->writer_wq) {
4254			ti->error = "Cannot allocate workqueue";
4255			r = -ENOMEM;
4256			goto bad;
4257		}
4258		INIT_WORK(&ic->writer_work, integrity_writer);
4259	}
4260
4261	ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4262	if (!ic->sb) {
4263		r = -ENOMEM;
4264		ti->error = "Cannot allocate superblock area";
4265		goto bad;
4266	}
4267
4268	r = sync_rw_sb(ic, REQ_OP_READ);
4269	if (r) {
4270		ti->error = "Error reading superblock";
4271		goto bad;
4272	}
4273	should_write_sb = false;
4274	if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4275		if (ic->mode != 'R') {
4276			if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4277				r = -EINVAL;
4278				ti->error = "The device is not initialized";
4279				goto bad;
4280			}
4281		}
4282
4283		r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4284		if (r) {
4285			ti->error = "Could not initialize superblock";
4286			goto bad;
4287		}
4288		if (ic->mode != 'R')
4289			should_write_sb = true;
4290	}
4291
4292	if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4293		r = -EINVAL;
4294		ti->error = "Unknown version";
4295		goto bad;
4296	}
4297	if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4298		r = -EINVAL;
4299		ti->error = "Tag size doesn't match the information in superblock";
4300		goto bad;
4301	}
4302	if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4303		r = -EINVAL;
4304		ti->error = "Block size doesn't match the information in superblock";
4305		goto bad;
4306	}
4307	if (!le32_to_cpu(ic->sb->journal_sections)) {
4308		r = -EINVAL;
4309		ti->error = "Corrupted superblock, journal_sections is 0";
4310		goto bad;
4311	}
4312	/* make sure that ti->max_io_len doesn't overflow */
4313	if (!ic->meta_dev) {
4314		if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4315		    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4316			r = -EINVAL;
4317			ti->error = "Invalid interleave_sectors in the superblock";
4318			goto bad;
4319		}
4320	} else {
4321		if (ic->sb->log2_interleave_sectors) {
4322			r = -EINVAL;
4323			ti->error = "Invalid interleave_sectors in the superblock";
4324			goto bad;
4325		}
4326	}
4327	if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4328		r = -EINVAL;
4329		ti->error = "Journal mac mismatch";
4330		goto bad;
4331	}
4332
4333	get_provided_data_sectors(ic);
4334	if (!ic->provided_data_sectors) {
4335		r = -EINVAL;
4336		ti->error = "The device is too small";
4337		goto bad;
4338	}
4339
4340try_smaller_buffer:
4341	r = calculate_device_limits(ic);
4342	if (r) {
4343		if (ic->meta_dev) {
4344			if (ic->log2_buffer_sectors > 3) {
4345				ic->log2_buffer_sectors--;
4346				goto try_smaller_buffer;
4347			}
4348		}
4349		ti->error = "The device is too small";
4350		goto bad;
4351	}
4352
4353	if (log2_sectors_per_bitmap_bit < 0)
4354		log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4355	if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4356		log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4357
4358	bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4359	if (bits_in_journal > UINT_MAX)
4360		bits_in_journal = UINT_MAX;
4361	while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4362		log2_sectors_per_bitmap_bit++;
4363
4364	log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4365	ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4366	if (should_write_sb) {
4367		ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4368	}
4369	n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4370				+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4371	ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4372
4373	if (!ic->meta_dev)
4374		ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4375
4376	if (ti->len > ic->provided_data_sectors) {
4377		r = -EINVAL;
4378		ti->error = "Not enough provided sectors for requested mapping size";
4379		goto bad;
4380	}
4381
4382
4383	threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4384	threshold += 50;
4385	do_div(threshold, 100);
4386	ic->free_sectors_threshold = threshold;
4387
4388	DEBUG_print("initialized:\n");
4389	DEBUG_print("	integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4390	DEBUG_print("	journal_entry_size %u\n", ic->journal_entry_size);
4391	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4392	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
4393	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
4394	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4395	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
4396	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4397	DEBUG_print("	data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
4398	DEBUG_print("	initial_sectors 0x%x\n", ic->initial_sectors);
4399	DEBUG_print("	metadata_run 0x%x\n", ic->metadata_run);
4400	DEBUG_print("	log2_metadata_run %d\n", ic->log2_metadata_run);
4401	DEBUG_print("	provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4402	DEBUG_print("	log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4403	DEBUG_print("	bits_in_journal %llu\n", bits_in_journal);
4404
4405	if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4406		ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4407		ic->sb->recalc_sector = cpu_to_le64(0);
4408	}
4409
4410	if (ic->internal_hash) {
4411		size_t recalc_tags_size;
4412		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4413		if (!ic->recalc_wq ) {
4414			ti->error = "Cannot allocate workqueue";
4415			r = -ENOMEM;
4416			goto bad;
4417		}
4418		INIT_WORK(&ic->recalc_work, integrity_recalc);
4419		ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4420		if (!ic->recalc_buffer) {
4421			ti->error = "Cannot allocate buffer for recalculating";
4422			r = -ENOMEM;
4423			goto bad;
4424		}
4425		recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
4426		if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
4427			recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
4428		ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
4429		if (!ic->recalc_tags) {
4430			ti->error = "Cannot allocate tags for recalculating";
4431			r = -ENOMEM;
4432			goto bad;
4433		}
4434	} else {
4435		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4436			ti->error = "Recalculate can only be specified with internal_hash";
4437			r = -EINVAL;
4438			goto bad;
4439		}
4440	}
4441
4442	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4443	    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4444	    dm_integrity_disable_recalculate(ic)) {
4445		ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4446		r = -EOPNOTSUPP;
4447		goto bad;
4448	}
4449
4450	ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4451			1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
4452	if (IS_ERR(ic->bufio)) {
4453		r = PTR_ERR(ic->bufio);
4454		ti->error = "Cannot initialize dm-bufio";
4455		ic->bufio = NULL;
4456		goto bad;
4457	}
4458	dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4459
4460	if (ic->mode != 'R') {
4461		r = create_journal(ic, &ti->error);
4462		if (r)
4463			goto bad;
4464
4465	}
4466
4467	if (ic->mode == 'B') {
4468		unsigned i;
4469		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4470
4471		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4472		if (!ic->recalc_bitmap) {
4473			r = -ENOMEM;
4474			goto bad;
4475		}
4476		ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4477		if (!ic->may_write_bitmap) {
4478			r = -ENOMEM;
4479			goto bad;
4480		}
4481		ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4482		if (!ic->bbs) {
4483			r = -ENOMEM;
4484			goto bad;
4485		}
4486		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4487		for (i = 0; i < ic->n_bitmap_blocks; i++) {
4488			struct bitmap_block_status *bbs = &ic->bbs[i];
4489			unsigned sector, pl_index, pl_offset;
4490
4491			INIT_WORK(&bbs->work, bitmap_block_work);
4492			bbs->ic = ic;
4493			bbs->idx = i;
4494			bio_list_init(&bbs->bio_queue);
4495			spin_lock_init(&bbs->bio_queue_lock);
4496
4497			sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4498			pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4499			pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4500
4501			bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4502		}
4503	}
4504
4505	if (should_write_sb) {
 
 
4506		init_journal(ic, 0, ic->journal_sections, 0);
4507		r = dm_integrity_failed(ic);
4508		if (unlikely(r)) {
4509			ti->error = "Error initializing journal";
4510			goto bad;
4511		}
4512		r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
4513		if (r) {
4514			ti->error = "Error initializing superblock";
4515			goto bad;
4516		}
4517		ic->just_formatted = true;
4518	}
4519
4520	if (!ic->meta_dev) {
4521		r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4522		if (r)
4523			goto bad;
4524	}
4525	if (ic->mode == 'B') {
4526		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4527		if (!max_io_len)
4528			max_io_len = 1U << 31;
4529		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4530		if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4531			r = dm_set_target_max_io_len(ti, max_io_len);
4532			if (r)
4533				goto bad;
4534		}
4535	}
4536
4537	if (!ic->internal_hash)
4538		dm_integrity_set(ti, ic);
4539
4540	ti->num_flush_bios = 1;
4541	ti->flush_supported = true;
4542	if (ic->discard)
4543		ti->num_discard_bios = 1;
4544
4545	dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
4546	return 0;
4547
4548bad:
4549	dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
4550	dm_integrity_dtr(ti);
4551	return r;
4552}
4553
4554static void dm_integrity_dtr(struct dm_target *ti)
4555{
4556	struct dm_integrity_c *ic = ti->private;
4557
4558	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4559	BUG_ON(!list_empty(&ic->wait_list));
4560
4561	if (ic->mode == 'B')
4562		cancel_delayed_work_sync(&ic->bitmap_flush_work);
4563	if (ic->metadata_wq)
4564		destroy_workqueue(ic->metadata_wq);
4565	if (ic->wait_wq)
4566		destroy_workqueue(ic->wait_wq);
4567	if (ic->offload_wq)
4568		destroy_workqueue(ic->offload_wq);
4569	if (ic->commit_wq)
4570		destroy_workqueue(ic->commit_wq);
4571	if (ic->writer_wq)
4572		destroy_workqueue(ic->writer_wq);
4573	if (ic->recalc_wq)
4574		destroy_workqueue(ic->recalc_wq);
4575	vfree(ic->recalc_buffer);
4576	kvfree(ic->recalc_tags);
4577	kvfree(ic->bbs);
4578	if (ic->bufio)
4579		dm_bufio_client_destroy(ic->bufio);
4580	mempool_exit(&ic->journal_io_mempool);
4581	if (ic->io)
4582		dm_io_client_destroy(ic->io);
4583	if (ic->dev)
4584		dm_put_device(ti, ic->dev);
4585	if (ic->meta_dev)
4586		dm_put_device(ti, ic->meta_dev);
4587	dm_integrity_free_page_list(ic->journal);
4588	dm_integrity_free_page_list(ic->journal_io);
4589	dm_integrity_free_page_list(ic->journal_xor);
4590	dm_integrity_free_page_list(ic->recalc_bitmap);
4591	dm_integrity_free_page_list(ic->may_write_bitmap);
4592	if (ic->journal_scatterlist)
4593		dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4594	if (ic->journal_io_scatterlist)
4595		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4596	if (ic->sk_requests) {
4597		unsigned i;
4598
4599		for (i = 0; i < ic->journal_sections; i++) {
4600			struct skcipher_request *req = ic->sk_requests[i];
4601			if (req) {
4602				kfree_sensitive(req->iv);
4603				skcipher_request_free(req);
4604			}
4605		}
4606		kvfree(ic->sk_requests);
4607	}
4608	kvfree(ic->journal_tree);
4609	if (ic->sb)
4610		free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4611
4612	if (ic->internal_hash)
4613		crypto_free_shash(ic->internal_hash);
4614	free_alg(&ic->internal_hash_alg);
4615
4616	if (ic->journal_crypt)
4617		crypto_free_skcipher(ic->journal_crypt);
4618	free_alg(&ic->journal_crypt_alg);
4619
4620	if (ic->journal_mac)
4621		crypto_free_shash(ic->journal_mac);
4622	free_alg(&ic->journal_mac_alg);
4623
4624	kfree(ic);
4625	dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
4626}
4627
4628static struct target_type integrity_target = {
4629	.name			= "integrity",
4630	.version		= {1, 10, 0},
4631	.module			= THIS_MODULE,
4632	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4633	.ctr			= dm_integrity_ctr,
4634	.dtr			= dm_integrity_dtr,
4635	.map			= dm_integrity_map,
4636	.postsuspend		= dm_integrity_postsuspend,
4637	.resume			= dm_integrity_resume,
4638	.status			= dm_integrity_status,
4639	.iterate_devices	= dm_integrity_iterate_devices,
4640	.io_hints		= dm_integrity_io_hints,
4641};
4642
4643static int __init dm_integrity_init(void)
4644{
4645	int r;
4646
4647	journal_io_cache = kmem_cache_create("integrity_journal_io",
4648					     sizeof(struct journal_io), 0, 0, NULL);
4649	if (!journal_io_cache) {
4650		DMERR("can't allocate journal io cache");
4651		return -ENOMEM;
4652	}
4653
4654	r = dm_register_target(&integrity_target);
4655
4656	if (r < 0)
4657		DMERR("register failed %d", r);
4658
4659	return r;
4660}
4661
4662static void __exit dm_integrity_exit(void)
4663{
4664	dm_unregister_target(&integrity_target);
4665	kmem_cache_destroy(journal_io_cache);
4666}
4667
4668module_init(dm_integrity_init);
4669module_exit(dm_integrity_exit);
4670
4671MODULE_AUTHOR("Milan Broz");
4672MODULE_AUTHOR("Mikulas Patocka");
4673MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4674MODULE_LICENSE("GPL");
v5.9
   1/*
   2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
   3 * Copyright (C) 2016-2017 Milan Broz
   4 * Copyright (C) 2016-2017 Mikulas Patocka
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bio-record.h"
  10
  11#include <linux/compiler.h>
  12#include <linux/module.h>
  13#include <linux/device-mapper.h>
  14#include <linux/dm-io.h>
  15#include <linux/vmalloc.h>
  16#include <linux/sort.h>
  17#include <linux/rbtree.h>
  18#include <linux/delay.h>
  19#include <linux/random.h>
  20#include <linux/reboot.h>
  21#include <crypto/hash.h>
  22#include <crypto/skcipher.h>
  23#include <linux/async_tx.h>
  24#include <linux/dm-bufio.h>
  25
 
 
  26#define DM_MSG_PREFIX "integrity"
  27
  28#define DEFAULT_INTERLEAVE_SECTORS	32768
  29#define DEFAULT_JOURNAL_SIZE_FACTOR	7
  30#define DEFAULT_SECTORS_PER_BITMAP_BIT	32768
  31#define DEFAULT_BUFFER_SECTORS		128
  32#define DEFAULT_JOURNAL_WATERMARK	50
  33#define DEFAULT_SYNC_MSEC		10000
  34#define DEFAULT_MAX_JOURNAL_SECTORS	131072
  35#define MIN_LOG2_INTERLEAVE_SECTORS	3
  36#define MAX_LOG2_INTERLEAVE_SECTORS	31
  37#define METADATA_WORKQUEUE_MAX_ACTIVE	16
  38#define RECALC_SECTORS			8192
  39#define RECALC_WRITE_SUPER		16
  40#define BITMAP_BLOCK_SIZE		4096	/* don't change it */
  41#define BITMAP_FLUSH_INTERVAL		(10 * HZ)
  42#define DISCARD_FILLER			0xf6
 
  43
  44/*
  45 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
  46 * so it should not be enabled in the official kernel
  47 */
  48//#define DEBUG_PRINT
  49//#define INTERNAL_VERIFY
  50
  51/*
  52 * On disk structures
  53 */
  54
  55#define SB_MAGIC			"integrt"
  56#define SB_VERSION_1			1
  57#define SB_VERSION_2			2
  58#define SB_VERSION_3			3
  59#define SB_VERSION_4			4
 
  60#define SB_SECTORS			8
  61#define MAX_SECTORS_PER_BLOCK		8
  62
  63struct superblock {
  64	__u8 magic[8];
  65	__u8 version;
  66	__u8 log2_interleave_sectors;
  67	__u16 integrity_tag_size;
  68	__u32 journal_sections;
  69	__u64 provided_data_sectors;	/* userspace uses this value */
  70	__u32 flags;
  71	__u8 log2_sectors_per_block;
  72	__u8 log2_blocks_per_bitmap_bit;
  73	__u8 pad[2];
  74	__u64 recalc_sector;
 
 
  75};
  76
  77#define SB_FLAG_HAVE_JOURNAL_MAC	0x1
  78#define SB_FLAG_RECALCULATING		0x2
  79#define SB_FLAG_DIRTY_BITMAP		0x4
  80#define SB_FLAG_FIXED_PADDING		0x8
 
  81
  82#define	JOURNAL_ENTRY_ROUNDUP		8
  83
  84typedef __u64 commit_id_t;
  85#define JOURNAL_MAC_PER_SECTOR		8
  86
  87struct journal_entry {
  88	union {
  89		struct {
  90			__u32 sector_lo;
  91			__u32 sector_hi;
  92		} s;
  93		__u64 sector;
  94	} u;
  95	commit_id_t last_bytes[];
  96	/* __u8 tag[0]; */
  97};
  98
  99#define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
 100
 101#if BITS_PER_LONG == 64
 102#define journal_entry_set_sector(je, x)		do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
 103#else
 104#define journal_entry_set_sector(je, x)		do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
 105#endif
 106#define journal_entry_get_sector(je)		le64_to_cpu((je)->u.sector)
 107#define journal_entry_is_unused(je)		((je)->u.s.sector_hi == cpu_to_le32(-1))
 108#define journal_entry_set_unused(je)		do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
 109#define journal_entry_is_inprogress(je)		((je)->u.s.sector_hi == cpu_to_le32(-2))
 110#define journal_entry_set_inprogress(je)	do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
 111
 112#define JOURNAL_BLOCK_SECTORS		8
 113#define JOURNAL_SECTOR_DATA		((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
 114#define JOURNAL_MAC_SIZE		(JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
 115
 116struct journal_sector {
 117	__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
 118	__u8 mac[JOURNAL_MAC_PER_SECTOR];
 
 
 119	commit_id_t commit_id;
 120};
 121
 122#define MAX_TAG_SIZE			(JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
 123
 124#define METADATA_PADDING_SECTORS	8
 125
 126#define N_COMMIT_IDS			4
 127
 128static unsigned char prev_commit_seq(unsigned char seq)
 129{
 130	return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
 131}
 132
 133static unsigned char next_commit_seq(unsigned char seq)
 134{
 135	return (seq + 1) % N_COMMIT_IDS;
 136}
 137
 138/*
 139 * In-memory structures
 140 */
 141
 142struct journal_node {
 143	struct rb_node node;
 144	sector_t sector;
 145};
 146
 147struct alg_spec {
 148	char *alg_string;
 149	char *key_string;
 150	__u8 *key;
 151	unsigned key_size;
 152};
 153
 154struct dm_integrity_c {
 155	struct dm_dev *dev;
 156	struct dm_dev *meta_dev;
 157	unsigned tag_size;
 158	__s8 log2_tag_size;
 159	sector_t start;
 160	mempool_t journal_io_mempool;
 161	struct dm_io_client *io;
 162	struct dm_bufio_client *bufio;
 163	struct workqueue_struct *metadata_wq;
 164	struct superblock *sb;
 165	unsigned journal_pages;
 166	unsigned n_bitmap_blocks;
 167
 168	struct page_list *journal;
 169	struct page_list *journal_io;
 170	struct page_list *journal_xor;
 171	struct page_list *recalc_bitmap;
 172	struct page_list *may_write_bitmap;
 173	struct bitmap_block_status *bbs;
 174	unsigned bitmap_flush_interval;
 175	int synchronous_mode;
 176	struct bio_list synchronous_bios;
 177	struct delayed_work bitmap_flush_work;
 178
 179	struct crypto_skcipher *journal_crypt;
 180	struct scatterlist **journal_scatterlist;
 181	struct scatterlist **journal_io_scatterlist;
 182	struct skcipher_request **sk_requests;
 183
 184	struct crypto_shash *journal_mac;
 185
 186	struct journal_node *journal_tree;
 187	struct rb_root journal_tree_root;
 188
 189	sector_t provided_data_sectors;
 190
 191	unsigned short journal_entry_size;
 192	unsigned char journal_entries_per_sector;
 193	unsigned char journal_section_entries;
 194	unsigned short journal_section_sectors;
 195	unsigned journal_sections;
 196	unsigned journal_entries;
 197	sector_t data_device_sectors;
 198	sector_t meta_device_sectors;
 199	unsigned initial_sectors;
 200	unsigned metadata_run;
 201	__s8 log2_metadata_run;
 202	__u8 log2_buffer_sectors;
 203	__u8 sectors_per_block;
 204	__u8 log2_blocks_per_bitmap_bit;
 205
 206	unsigned char mode;
 207
 208	int failed;
 209
 210	struct crypto_shash *internal_hash;
 211
 212	struct dm_target *ti;
 213
 214	/* these variables are locked with endio_wait.lock */
 215	struct rb_root in_progress;
 216	struct list_head wait_list;
 217	wait_queue_head_t endio_wait;
 218	struct workqueue_struct *wait_wq;
 219	struct workqueue_struct *offload_wq;
 220
 221	unsigned char commit_seq;
 222	commit_id_t commit_ids[N_COMMIT_IDS];
 223
 224	unsigned committed_section;
 225	unsigned n_committed_sections;
 226
 227	unsigned uncommitted_section;
 228	unsigned n_uncommitted_sections;
 229
 230	unsigned free_section;
 231	unsigned char free_section_entry;
 232	unsigned free_sectors;
 233
 234	unsigned free_sectors_threshold;
 235
 236	struct workqueue_struct *commit_wq;
 237	struct work_struct commit_work;
 238
 239	struct workqueue_struct *writer_wq;
 240	struct work_struct writer_work;
 241
 242	struct workqueue_struct *recalc_wq;
 243	struct work_struct recalc_work;
 244	u8 *recalc_buffer;
 245	u8 *recalc_tags;
 246
 247	struct bio_list flush_bio_list;
 248
 249	unsigned long autocommit_jiffies;
 250	struct timer_list autocommit_timer;
 251	unsigned autocommit_msec;
 252
 253	wait_queue_head_t copy_to_journal_wait;
 254
 255	struct completion crypto_backoff;
 256
 
 257	bool journal_uptodate;
 258	bool just_formatted;
 259	bool recalculate_flag;
 
 
 260	bool fix_padding;
 261	bool discard;
 
 262
 263	struct alg_spec internal_hash_alg;
 264	struct alg_spec journal_crypt_alg;
 265	struct alg_spec journal_mac_alg;
 266
 267	atomic64_t number_of_mismatches;
 268
 269	struct notifier_block reboot_notifier;
 270};
 271
 272struct dm_integrity_range {
 273	sector_t logical_sector;
 274	sector_t n_sectors;
 275	bool waiting;
 276	union {
 277		struct rb_node node;
 278		struct {
 279			struct task_struct *task;
 280			struct list_head wait_entry;
 281		};
 282	};
 283};
 284
 285struct dm_integrity_io {
 286	struct work_struct work;
 287
 288	struct dm_integrity_c *ic;
 289	enum req_opf op;
 290	bool fua;
 291
 292	struct dm_integrity_range range;
 293
 294	sector_t metadata_block;
 295	unsigned metadata_offset;
 296
 297	atomic_t in_flight;
 298	blk_status_t bi_status;
 299
 300	struct completion *completion;
 301
 302	struct dm_bio_details bio_details;
 303};
 304
 305struct journal_completion {
 306	struct dm_integrity_c *ic;
 307	atomic_t in_flight;
 308	struct completion comp;
 309};
 310
 311struct journal_io {
 312	struct dm_integrity_range range;
 313	struct journal_completion *comp;
 314};
 315
 316struct bitmap_block_status {
 317	struct work_struct work;
 318	struct dm_integrity_c *ic;
 319	unsigned idx;
 320	unsigned long *bitmap;
 321	struct bio_list bio_queue;
 322	spinlock_t bio_queue_lock;
 323
 324};
 325
 326static struct kmem_cache *journal_io_cache;
 327
 328#define JOURNAL_IO_MEMPOOL	32
 329
 330#ifdef DEBUG_PRINT
 331#define DEBUG_print(x, ...)	printk(KERN_DEBUG x, ##__VA_ARGS__)
 332static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
 333{
 334	va_list args;
 335	va_start(args, msg);
 336	vprintk(msg, args);
 337	va_end(args);
 338	if (len)
 339		pr_cont(":");
 340	while (len) {
 341		pr_cont(" %02x", *bytes);
 342		bytes++;
 343		len--;
 344	}
 345	pr_cont("\n");
 346}
 347#define DEBUG_bytes(bytes, len, msg, ...)	__DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
 348#else
 349#define DEBUG_print(x, ...)			do { } while (0)
 350#define DEBUG_bytes(bytes, len, msg, ...)	do { } while (0)
 351#endif
 352
 353static void dm_integrity_prepare(struct request *rq)
 354{
 355}
 356
 357static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
 358{
 359}
 360
 361/*
 362 * DM Integrity profile, protection is performed layer above (dm-crypt)
 363 */
 364static const struct blk_integrity_profile dm_integrity_profile = {
 365	.name			= "DM-DIF-EXT-TAG",
 366	.generate_fn		= NULL,
 367	.verify_fn		= NULL,
 368	.prepare_fn		= dm_integrity_prepare,
 369	.complete_fn		= dm_integrity_complete,
 370};
 371
 372static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
 373static void integrity_bio_wait(struct work_struct *w);
 374static void dm_integrity_dtr(struct dm_target *ti);
 375
 376static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
 377{
 378	if (err == -EILSEQ)
 379		atomic64_inc(&ic->number_of_mismatches);
 380	if (!cmpxchg(&ic->failed, 0, err))
 381		DMERR("Error on %s: %d", msg, err);
 382}
 383
 384static int dm_integrity_failed(struct dm_integrity_c *ic)
 385{
 386	return READ_ONCE(ic->failed);
 387}
 388
 
 
 
 
 
 
 
 
 
 
 
 389static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
 390					  unsigned j, unsigned char seq)
 391{
 392	/*
 393	 * Xor the number with section and sector, so that if a piece of
 394	 * journal is written at wrong place, it is detected.
 395	 */
 396	return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
 397}
 398
 399static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
 400				sector_t *area, sector_t *offset)
 401{
 402	if (!ic->meta_dev) {
 403		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
 404		*area = data_sector >> log2_interleave_sectors;
 405		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
 406	} else {
 407		*area = 0;
 408		*offset = data_sector;
 409	}
 410}
 411
 412#define sector_to_block(ic, n)						\
 413do {									\
 414	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
 415	(n) >>= (ic)->sb->log2_sectors_per_block;			\
 416} while (0)
 417
 418static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
 419					    sector_t offset, unsigned *metadata_offset)
 420{
 421	__u64 ms;
 422	unsigned mo;
 423
 424	ms = area << ic->sb->log2_interleave_sectors;
 425	if (likely(ic->log2_metadata_run >= 0))
 426		ms += area << ic->log2_metadata_run;
 427	else
 428		ms += area * ic->metadata_run;
 429	ms >>= ic->log2_buffer_sectors;
 430
 431	sector_to_block(ic, offset);
 432
 433	if (likely(ic->log2_tag_size >= 0)) {
 434		ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
 435		mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 436	} else {
 437		ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
 438		mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
 439	}
 440	*metadata_offset = mo;
 441	return ms;
 442}
 443
 444static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
 445{
 446	sector_t result;
 447
 448	if (ic->meta_dev)
 449		return offset;
 450
 451	result = area << ic->sb->log2_interleave_sectors;
 452	if (likely(ic->log2_metadata_run >= 0))
 453		result += (area + 1) << ic->log2_metadata_run;
 454	else
 455		result += (area + 1) * ic->metadata_run;
 456
 457	result += (sector_t)ic->initial_sectors + offset;
 458	result += ic->start;
 459
 460	return result;
 461}
 462
 463static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
 464{
 465	if (unlikely(*sec_ptr >= ic->journal_sections))
 466		*sec_ptr -= ic->journal_sections;
 467}
 468
 469static void sb_set_version(struct dm_integrity_c *ic)
 470{
 471	if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
 
 
 472		ic->sb->version = SB_VERSION_4;
 473	else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
 474		ic->sb->version = SB_VERSION_3;
 475	else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
 476		ic->sb->version = SB_VERSION_2;
 477	else
 478		ic->sb->version = SB_VERSION_1;
 479}
 480
 481static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482{
 483	struct dm_io_request io_req;
 484	struct dm_io_region io_loc;
 
 
 485
 486	io_req.bi_op = op;
 487	io_req.bi_op_flags = op_flags;
 488	io_req.mem.type = DM_IO_KMEM;
 489	io_req.mem.ptr.addr = ic->sb;
 490	io_req.notify.fn = NULL;
 491	io_req.client = ic->io;
 492	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 493	io_loc.sector = ic->start;
 494	io_loc.count = SB_SECTORS;
 495
 496	if (op == REQ_OP_WRITE)
 497		sb_set_version(ic);
 
 
 
 
 
 
 498
 499	return dm_io(&io_req, 1, &io_loc, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 500}
 501
 502#define BITMAP_OP_TEST_ALL_SET		0
 503#define BITMAP_OP_TEST_ALL_CLEAR	1
 504#define BITMAP_OP_SET			2
 505#define BITMAP_OP_CLEAR			3
 506
 507static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
 508			    sector_t sector, sector_t n_sectors, int mode)
 509{
 510	unsigned long bit, end_bit, this_end_bit, page, end_page;
 511	unsigned long *data;
 512
 513	if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
 514		DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
 515			sector,
 516			n_sectors,
 517			ic->sb->log2_sectors_per_block,
 518			ic->log2_blocks_per_bitmap_bit,
 519			mode);
 520		BUG();
 521	}
 522
 523	if (unlikely(!n_sectors))
 524		return true;
 525
 526	bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 527	end_bit = (sector + n_sectors - 1) >>
 528		(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 529
 530	page = bit / (PAGE_SIZE * 8);
 531	bit %= PAGE_SIZE * 8;
 532
 533	end_page = end_bit / (PAGE_SIZE * 8);
 534	end_bit %= PAGE_SIZE * 8;
 535
 536repeat:
 537	if (page < end_page) {
 538		this_end_bit = PAGE_SIZE * 8 - 1;
 539	} else {
 540		this_end_bit = end_bit;
 541	}
 542
 543	data = lowmem_page_address(bitmap[page].page);
 544
 545	if (mode == BITMAP_OP_TEST_ALL_SET) {
 546		while (bit <= this_end_bit) {
 547			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 548				do {
 549					if (data[bit / BITS_PER_LONG] != -1)
 550						return false;
 551					bit += BITS_PER_LONG;
 552				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 553				continue;
 554			}
 555			if (!test_bit(bit, data))
 556				return false;
 557			bit++;
 558		}
 559	} else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
 560		while (bit <= this_end_bit) {
 561			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 562				do {
 563					if (data[bit / BITS_PER_LONG] != 0)
 564						return false;
 565					bit += BITS_PER_LONG;
 566				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 567				continue;
 568			}
 569			if (test_bit(bit, data))
 570				return false;
 571			bit++;
 572		}
 573	} else if (mode == BITMAP_OP_SET) {
 574		while (bit <= this_end_bit) {
 575			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 576				do {
 577					data[bit / BITS_PER_LONG] = -1;
 578					bit += BITS_PER_LONG;
 579				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 580				continue;
 581			}
 582			__set_bit(bit, data);
 583			bit++;
 584		}
 585	} else if (mode == BITMAP_OP_CLEAR) {
 586		if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
 587			clear_page(data);
 588		else while (bit <= this_end_bit) {
 589			if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
 590				do {
 591					data[bit / BITS_PER_LONG] = 0;
 592					bit += BITS_PER_LONG;
 593				} while (this_end_bit >= bit + BITS_PER_LONG - 1);
 594				continue;
 595			}
 596			__clear_bit(bit, data);
 597			bit++;
 598		}
 599	} else {
 600		BUG();
 601	}
 602
 603	if (unlikely(page < end_page)) {
 604		bit = 0;
 605		page++;
 606		goto repeat;
 607	}
 608
 609	return true;
 610}
 611
 612static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
 613{
 614	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
 615	unsigned i;
 616
 617	for (i = 0; i < n_bitmap_pages; i++) {
 618		unsigned long *dst_data = lowmem_page_address(dst[i].page);
 619		unsigned long *src_data = lowmem_page_address(src[i].page);
 620		copy_page(dst_data, src_data);
 621	}
 622}
 623
 624static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
 625{
 626	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
 627	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
 628
 629	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
 630	return &ic->bbs[bitmap_block];
 631}
 632
 633static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 634				 bool e, const char *function)
 635{
 636#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
 637	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
 638
 639	if (unlikely(section >= ic->journal_sections) ||
 640	    unlikely(offset >= limit)) {
 641		DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
 642		       function, section, offset, ic->journal_sections, limit);
 643		BUG();
 644	}
 645#endif
 646}
 647
 648static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
 649			       unsigned *pl_index, unsigned *pl_offset)
 650{
 651	unsigned sector;
 652
 653	access_journal_check(ic, section, offset, false, "page_list_location");
 654
 655	sector = section * ic->journal_section_sectors + offset;
 656
 657	*pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 658	*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 659}
 660
 661static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
 662					       unsigned section, unsigned offset, unsigned *n_sectors)
 663{
 664	unsigned pl_index, pl_offset;
 665	char *va;
 666
 667	page_list_location(ic, section, offset, &pl_index, &pl_offset);
 668
 669	if (n_sectors)
 670		*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
 671
 672	va = lowmem_page_address(pl[pl_index].page);
 673
 674	return (struct journal_sector *)(va + pl_offset);
 675}
 676
 677static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
 678{
 679	return access_page_list(ic, ic->journal, section, offset, NULL);
 680}
 681
 682static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
 683{
 684	unsigned rel_sector, offset;
 685	struct journal_sector *js;
 686
 687	access_journal_check(ic, section, n, true, "access_journal_entry");
 688
 689	rel_sector = n % JOURNAL_BLOCK_SECTORS;
 690	offset = n / JOURNAL_BLOCK_SECTORS;
 691
 692	js = access_journal(ic, section, rel_sector);
 693	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
 694}
 695
 696static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
 697{
 698	n <<= ic->sb->log2_sectors_per_block;
 699
 700	n += JOURNAL_BLOCK_SECTORS;
 701
 702	access_journal_check(ic, section, n, false, "access_journal_data");
 703
 704	return access_journal(ic, section, n);
 705}
 706
 707static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
 708{
 709	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
 710	int r;
 711	unsigned j, size;
 712
 713	desc->tfm = ic->journal_mac;
 714
 715	r = crypto_shash_init(desc);
 716	if (unlikely(r)) {
 717		dm_integrity_io_error(ic, "crypto_shash_init", r);
 718		goto err;
 719	}
 720
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 721	for (j = 0; j < ic->journal_section_entries; j++) {
 722		struct journal_entry *je = access_journal_entry(ic, section, j);
 723		r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
 724		if (unlikely(r)) {
 725			dm_integrity_io_error(ic, "crypto_shash_update", r);
 726			goto err;
 727		}
 728	}
 729
 730	size = crypto_shash_digestsize(ic->journal_mac);
 731
 732	if (likely(size <= JOURNAL_MAC_SIZE)) {
 733		r = crypto_shash_final(desc, result);
 734		if (unlikely(r)) {
 735			dm_integrity_io_error(ic, "crypto_shash_final", r);
 736			goto err;
 737		}
 738		memset(result + size, 0, JOURNAL_MAC_SIZE - size);
 739	} else {
 740		__u8 digest[HASH_MAX_DIGESTSIZE];
 741
 742		if (WARN_ON(size > sizeof(digest))) {
 743			dm_integrity_io_error(ic, "digest_size", -EINVAL);
 744			goto err;
 745		}
 746		r = crypto_shash_final(desc, digest);
 747		if (unlikely(r)) {
 748			dm_integrity_io_error(ic, "crypto_shash_final", r);
 749			goto err;
 750		}
 751		memcpy(result, digest, JOURNAL_MAC_SIZE);
 752	}
 753
 754	return;
 755err:
 756	memset(result, 0, JOURNAL_MAC_SIZE);
 757}
 758
 759static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
 760{
 761	__u8 result[JOURNAL_MAC_SIZE];
 762	unsigned j;
 763
 764	if (!ic->journal_mac)
 765		return;
 766
 767	section_mac(ic, section, result);
 768
 769	for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
 770		struct journal_sector *js = access_journal(ic, section, j);
 771
 772		if (likely(wr))
 773			memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
 774		else {
 775			if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
 776				dm_integrity_io_error(ic, "journal mac", -EILSEQ);
 
 
 777		}
 778	}
 779}
 780
 781static void complete_journal_op(void *context)
 782{
 783	struct journal_completion *comp = context;
 784	BUG_ON(!atomic_read(&comp->in_flight));
 785	if (likely(atomic_dec_and_test(&comp->in_flight)))
 786		complete(&comp->comp);
 787}
 788
 789static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 790			unsigned n_sections, struct journal_completion *comp)
 791{
 792	struct async_submit_ctl submit;
 793	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
 794	unsigned pl_index, pl_offset, section_index;
 795	struct page_list *source_pl, *target_pl;
 796
 797	if (likely(encrypt)) {
 798		source_pl = ic->journal;
 799		target_pl = ic->journal_io;
 800	} else {
 801		source_pl = ic->journal_io;
 802		target_pl = ic->journal;
 803	}
 804
 805	page_list_location(ic, section, 0, &pl_index, &pl_offset);
 806
 807	atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
 808
 809	init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
 810
 811	section_index = pl_index;
 812
 813	do {
 814		size_t this_step;
 815		struct page *src_pages[2];
 816		struct page *dst_page;
 817
 818		while (unlikely(pl_index == section_index)) {
 819			unsigned dummy;
 820			if (likely(encrypt))
 821				rw_section_mac(ic, section, true);
 822			section++;
 823			n_sections--;
 824			if (!n_sections)
 825				break;
 826			page_list_location(ic, section, 0, &section_index, &dummy);
 827		}
 828
 829		this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
 830		dst_page = target_pl[pl_index].page;
 831		src_pages[0] = source_pl[pl_index].page;
 832		src_pages[1] = ic->journal_xor[pl_index].page;
 833
 834		async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
 835
 836		pl_index++;
 837		pl_offset = 0;
 838		n_bytes -= this_step;
 839	} while (n_bytes);
 840
 841	BUG_ON(n_sections);
 842
 843	async_tx_issue_pending_all();
 844}
 845
 846static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 847{
 848	struct journal_completion *comp = req->data;
 849	if (unlikely(err)) {
 850		if (likely(err == -EINPROGRESS)) {
 851			complete(&comp->ic->crypto_backoff);
 852			return;
 853		}
 854		dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
 855	}
 856	complete_journal_op(comp);
 857}
 858
 859static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 860{
 861	int r;
 862	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 863				      complete_journal_encrypt, comp);
 864	if (likely(encrypt))
 865		r = crypto_skcipher_encrypt(req);
 866	else
 867		r = crypto_skcipher_decrypt(req);
 868	if (likely(!r))
 869		return false;
 870	if (likely(r == -EINPROGRESS))
 871		return true;
 872	if (likely(r == -EBUSY)) {
 873		wait_for_completion(&comp->ic->crypto_backoff);
 874		reinit_completion(&comp->ic->crypto_backoff);
 875		return true;
 876	}
 877	dm_integrity_io_error(comp->ic, "encrypt", r);
 878	return false;
 879}
 880
 881static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 882			  unsigned n_sections, struct journal_completion *comp)
 883{
 884	struct scatterlist **source_sg;
 885	struct scatterlist **target_sg;
 886
 887	atomic_add(2, &comp->in_flight);
 888
 889	if (likely(encrypt)) {
 890		source_sg = ic->journal_scatterlist;
 891		target_sg = ic->journal_io_scatterlist;
 892	} else {
 893		source_sg = ic->journal_io_scatterlist;
 894		target_sg = ic->journal_scatterlist;
 895	}
 896
 897	do {
 898		struct skcipher_request *req;
 899		unsigned ivsize;
 900		char *iv;
 901
 902		if (likely(encrypt))
 903			rw_section_mac(ic, section, true);
 904
 905		req = ic->sk_requests[section];
 906		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
 907		iv = req->iv;
 908
 909		memcpy(iv, iv + ivsize, ivsize);
 910
 911		req->src = source_sg[section];
 912		req->dst = target_sg[section];
 913
 914		if (unlikely(do_crypt(encrypt, req, comp)))
 915			atomic_inc(&comp->in_flight);
 916
 917		section++;
 918		n_sections--;
 919	} while (n_sections);
 920
 921	atomic_dec(&comp->in_flight);
 922	complete_journal_op(comp);
 923}
 924
 925static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
 926			    unsigned n_sections, struct journal_completion *comp)
 927{
 928	if (ic->journal_xor)
 929		return xor_journal(ic, encrypt, section, n_sections, comp);
 930	else
 931		return crypt_journal(ic, encrypt, section, n_sections, comp);
 932}
 933
 934static void complete_journal_io(unsigned long error, void *context)
 935{
 936	struct journal_completion *comp = context;
 937	if (unlikely(error != 0))
 938		dm_integrity_io_error(comp->ic, "writing journal", -EIO);
 939	complete_journal_op(comp);
 940}
 941
 942static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
 943			       unsigned sector, unsigned n_sectors, struct journal_completion *comp)
 
 944{
 945	struct dm_io_request io_req;
 946	struct dm_io_region io_loc;
 947	unsigned pl_index, pl_offset;
 948	int r;
 949
 950	if (unlikely(dm_integrity_failed(ic))) {
 951		if (comp)
 952			complete_journal_io(-1UL, comp);
 953		return;
 954	}
 955
 956	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
 957	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
 958
 959	io_req.bi_op = op;
 960	io_req.bi_op_flags = op_flags;
 961	io_req.mem.type = DM_IO_PAGE_LIST;
 962	if (ic->journal_io)
 963		io_req.mem.ptr.pl = &ic->journal_io[pl_index];
 964	else
 965		io_req.mem.ptr.pl = &ic->journal[pl_index];
 966	io_req.mem.offset = pl_offset;
 967	if (likely(comp != NULL)) {
 968		io_req.notify.fn = complete_journal_io;
 969		io_req.notify.context = comp;
 970	} else {
 971		io_req.notify.fn = NULL;
 972	}
 973	io_req.client = ic->io;
 974	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
 975	io_loc.sector = ic->start + SB_SECTORS + sector;
 976	io_loc.count = n_sectors;
 977
 978	r = dm_io(&io_req, 1, &io_loc, NULL);
 979	if (unlikely(r)) {
 980		dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
 
 981		if (comp) {
 982			WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
 983			complete_journal_io(-1UL, comp);
 984		}
 985	}
 986}
 987
 988static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
 989		       unsigned n_sections, struct journal_completion *comp)
 
 990{
 991	unsigned sector, n_sectors;
 992
 993	sector = section * ic->journal_section_sectors;
 994	n_sectors = n_sections * ic->journal_section_sectors;
 995
 996	rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
 997}
 998
 999static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1000{
1001	struct journal_completion io_comp;
1002	struct journal_completion crypt_comp_1;
1003	struct journal_completion crypt_comp_2;
1004	unsigned i;
1005
1006	io_comp.ic = ic;
1007	init_completion(&io_comp.comp);
1008
1009	if (commit_start + commit_sections <= ic->journal_sections) {
1010		io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1011		if (ic->journal_io) {
1012			crypt_comp_1.ic = ic;
1013			init_completion(&crypt_comp_1.comp);
1014			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1015			encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1016			wait_for_completion_io(&crypt_comp_1.comp);
1017		} else {
1018			for (i = 0; i < commit_sections; i++)
1019				rw_section_mac(ic, commit_start + i, true);
1020		}
1021		rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1022			   commit_sections, &io_comp);
1023	} else {
1024		unsigned to_end;
1025		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1026		to_end = ic->journal_sections - commit_start;
1027		if (ic->journal_io) {
1028			crypt_comp_1.ic = ic;
1029			init_completion(&crypt_comp_1.comp);
1030			crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1031			encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1032			if (try_wait_for_completion(&crypt_comp_1.comp)) {
1033				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
 
1034				reinit_completion(&crypt_comp_1.comp);
1035				crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1036				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1037				wait_for_completion_io(&crypt_comp_1.comp);
1038			} else {
1039				crypt_comp_2.ic = ic;
1040				init_completion(&crypt_comp_2.comp);
1041				crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1042				encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1043				wait_for_completion_io(&crypt_comp_1.comp);
1044				rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1045				wait_for_completion_io(&crypt_comp_2.comp);
1046			}
1047		} else {
1048			for (i = 0; i < to_end; i++)
1049				rw_section_mac(ic, commit_start + i, true);
1050			rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1051			for (i = 0; i < commit_sections - to_end; i++)
1052				rw_section_mac(ic, i, true);
1053		}
1054		rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1055	}
1056
1057	wait_for_completion_io(&io_comp.comp);
1058}
1059
1060static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1061			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1062{
1063	struct dm_io_request io_req;
1064	struct dm_io_region io_loc;
1065	int r;
1066	unsigned sector, pl_index, pl_offset;
1067
1068	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1069
1070	if (unlikely(dm_integrity_failed(ic))) {
1071		fn(-1UL, data);
1072		return;
1073	}
1074
1075	sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1076
1077	pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1078	pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1079
1080	io_req.bi_op = REQ_OP_WRITE;
1081	io_req.bi_op_flags = 0;
1082	io_req.mem.type = DM_IO_PAGE_LIST;
1083	io_req.mem.ptr.pl = &ic->journal[pl_index];
1084	io_req.mem.offset = pl_offset;
1085	io_req.notify.fn = fn;
1086	io_req.notify.context = data;
1087	io_req.client = ic->io;
1088	io_loc.bdev = ic->dev->bdev;
1089	io_loc.sector = target;
1090	io_loc.count = n_sectors;
1091
1092	r = dm_io(&io_req, 1, &io_loc, NULL);
1093	if (unlikely(r)) {
1094		WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1095		fn(-1UL, data);
1096	}
1097}
1098
1099static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1100{
1101	return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1102	       range1->logical_sector + range1->n_sectors > range2->logical_sector;
1103}
1104
1105static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1106{
1107	struct rb_node **n = &ic->in_progress.rb_node;
1108	struct rb_node *parent;
1109
1110	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1111
1112	if (likely(check_waiting)) {
1113		struct dm_integrity_range *range;
1114		list_for_each_entry(range, &ic->wait_list, wait_entry) {
1115			if (unlikely(ranges_overlap(range, new_range)))
1116				return false;
1117		}
1118	}
1119
1120	parent = NULL;
1121
1122	while (*n) {
1123		struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1124
1125		parent = *n;
1126		if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1127			n = &range->node.rb_left;
1128		} else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1129			n = &range->node.rb_right;
1130		} else {
1131			return false;
1132		}
1133	}
1134
1135	rb_link_node(&new_range->node, parent, n);
1136	rb_insert_color(&new_range->node, &ic->in_progress);
1137
1138	return true;
1139}
1140
1141static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1142{
1143	rb_erase(&range->node, &ic->in_progress);
1144	while (unlikely(!list_empty(&ic->wait_list))) {
1145		struct dm_integrity_range *last_range =
1146			list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1147		struct task_struct *last_range_task;
1148		last_range_task = last_range->task;
1149		list_del(&last_range->wait_entry);
1150		if (!add_new_range(ic, last_range, false)) {
1151			last_range->task = last_range_task;
1152			list_add(&last_range->wait_entry, &ic->wait_list);
1153			break;
1154		}
1155		last_range->waiting = false;
1156		wake_up_process(last_range_task);
1157	}
1158}
1159
1160static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1161{
1162	unsigned long flags;
1163
1164	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1165	remove_range_unlocked(ic, range);
1166	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1167}
1168
1169static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1170{
1171	new_range->waiting = true;
1172	list_add_tail(&new_range->wait_entry, &ic->wait_list);
1173	new_range->task = current;
1174	do {
1175		__set_current_state(TASK_UNINTERRUPTIBLE);
1176		spin_unlock_irq(&ic->endio_wait.lock);
1177		io_schedule();
1178		spin_lock_irq(&ic->endio_wait.lock);
1179	} while (unlikely(new_range->waiting));
1180}
1181
1182static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1183{
1184	if (unlikely(!add_new_range(ic, new_range, true)))
1185		wait_and_add_new_range(ic, new_range);
1186}
1187
1188static void init_journal_node(struct journal_node *node)
1189{
1190	RB_CLEAR_NODE(&node->node);
1191	node->sector = (sector_t)-1;
1192}
1193
1194static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1195{
1196	struct rb_node **link;
1197	struct rb_node *parent;
1198
1199	node->sector = sector;
1200	BUG_ON(!RB_EMPTY_NODE(&node->node));
1201
1202	link = &ic->journal_tree_root.rb_node;
1203	parent = NULL;
1204
1205	while (*link) {
1206		struct journal_node *j;
1207		parent = *link;
1208		j = container_of(parent, struct journal_node, node);
1209		if (sector < j->sector)
1210			link = &j->node.rb_left;
1211		else
1212			link = &j->node.rb_right;
1213	}
1214
1215	rb_link_node(&node->node, parent, link);
1216	rb_insert_color(&node->node, &ic->journal_tree_root);
1217}
1218
1219static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1220{
1221	BUG_ON(RB_EMPTY_NODE(&node->node));
1222	rb_erase(&node->node, &ic->journal_tree_root);
1223	init_journal_node(node);
1224}
1225
1226#define NOT_FOUND	(-1U)
1227
1228static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1229{
1230	struct rb_node *n = ic->journal_tree_root.rb_node;
1231	unsigned found = NOT_FOUND;
1232	*next_sector = (sector_t)-1;
1233	while (n) {
1234		struct journal_node *j = container_of(n, struct journal_node, node);
1235		if (sector == j->sector) {
1236			found = j - ic->journal_tree;
1237		}
1238		if (sector < j->sector) {
1239			*next_sector = j->sector;
1240			n = j->node.rb_left;
1241		} else {
1242			n = j->node.rb_right;
1243		}
1244	}
1245
1246	return found;
1247}
1248
1249static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1250{
1251	struct journal_node *node, *next_node;
1252	struct rb_node *next;
1253
1254	if (unlikely(pos >= ic->journal_entries))
1255		return false;
1256	node = &ic->journal_tree[pos];
1257	if (unlikely(RB_EMPTY_NODE(&node->node)))
1258		return false;
1259	if (unlikely(node->sector != sector))
1260		return false;
1261
1262	next = rb_next(&node->node);
1263	if (unlikely(!next))
1264		return true;
1265
1266	next_node = container_of(next, struct journal_node, node);
1267	return next_node->sector != sector;
1268}
1269
1270static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1271{
1272	struct rb_node *next;
1273	struct journal_node *next_node;
1274	unsigned next_section;
1275
1276	BUG_ON(RB_EMPTY_NODE(&node->node));
1277
1278	next = rb_next(&node->node);
1279	if (unlikely(!next))
1280		return false;
1281
1282	next_node = container_of(next, struct journal_node, node);
1283
1284	if (next_node->sector != node->sector)
1285		return false;
1286
1287	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1288	if (next_section >= ic->committed_section &&
1289	    next_section < ic->committed_section + ic->n_committed_sections)
1290		return true;
1291	if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1292		return true;
1293
1294	return false;
1295}
1296
1297#define TAG_READ	0
1298#define TAG_WRITE	1
1299#define TAG_CMP		2
1300
1301static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1302			       unsigned *metadata_offset, unsigned total_size, int op)
1303{
1304#define MAY_BE_FILLER		1
1305#define MAY_BE_HASH		2
1306	unsigned hash_offset = 0;
1307	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1308
1309	do {
1310		unsigned char *data, *dp;
1311		struct dm_buffer *b;
1312		unsigned to_copy;
1313		int r;
1314
1315		r = dm_integrity_failed(ic);
1316		if (unlikely(r))
1317			return r;
1318
1319		data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1320		if (IS_ERR(data))
1321			return PTR_ERR(data);
1322
1323		to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1324		dp = data + *metadata_offset;
1325		if (op == TAG_READ) {
1326			memcpy(tag, dp, to_copy);
1327		} else if (op == TAG_WRITE) {
1328			memcpy(dp, tag, to_copy);
1329			dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
 
 
1330		} else {
1331			/* e.g.: op == TAG_CMP */
1332
1333			if (likely(is_power_of_2(ic->tag_size))) {
1334				if (unlikely(memcmp(dp, tag, to_copy)))
1335					if (unlikely(!ic->discard) ||
1336					    unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1337						goto thorough_test;
1338				}
1339			} else {
1340				unsigned i, ts;
1341thorough_test:
1342				ts = total_size;
1343
1344				for (i = 0; i < to_copy; i++, ts--) {
1345					if (unlikely(dp[i] != tag[i]))
1346						may_be &= ~MAY_BE_HASH;
1347					if (likely(dp[i] != DISCARD_FILLER))
1348						may_be &= ~MAY_BE_FILLER;
1349					hash_offset++;
1350					if (unlikely(hash_offset == ic->tag_size)) {
1351						if (unlikely(!may_be)) {
1352							dm_bufio_release(b);
1353							return ts;
1354						}
1355						hash_offset = 0;
1356						may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1357					}
1358				}
1359			}
1360		}
1361		dm_bufio_release(b);
1362
1363		tag += to_copy;
1364		*metadata_offset += to_copy;
1365		if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1366			(*metadata_block)++;
1367			*metadata_offset = 0;
1368		}
1369
1370		if (unlikely(!is_power_of_2(ic->tag_size))) {
1371			hash_offset = (hash_offset + to_copy) % ic->tag_size;
1372		}
1373
1374		total_size -= to_copy;
1375	} while (unlikely(total_size));
1376
1377	return 0;
1378#undef MAY_BE_FILLER
1379#undef MAY_BE_HASH
1380}
1381
1382static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383{
1384	int r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1385	r = dm_bufio_write_dirty_buffers(ic->bufio);
1386	if (unlikely(r))
1387		dm_integrity_io_error(ic, "writing tags", r);
 
 
 
1388}
1389
1390static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1391{
1392	DECLARE_WAITQUEUE(wait, current);
1393	__add_wait_queue(&ic->endio_wait, &wait);
1394	__set_current_state(TASK_UNINTERRUPTIBLE);
1395	spin_unlock_irq(&ic->endio_wait.lock);
1396	io_schedule();
1397	spin_lock_irq(&ic->endio_wait.lock);
1398	__remove_wait_queue(&ic->endio_wait, &wait);
1399}
1400
1401static void autocommit_fn(struct timer_list *t)
1402{
1403	struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1404
1405	if (likely(!dm_integrity_failed(ic)))
1406		queue_work(ic->commit_wq, &ic->commit_work);
1407}
1408
1409static void schedule_autocommit(struct dm_integrity_c *ic)
1410{
1411	if (!timer_pending(&ic->autocommit_timer))
1412		mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1413}
1414
1415static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1416{
1417	struct bio *bio;
1418	unsigned long flags;
1419
1420	spin_lock_irqsave(&ic->endio_wait.lock, flags);
1421	bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1422	bio_list_add(&ic->flush_bio_list, bio);
1423	spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1424
1425	queue_work(ic->commit_wq, &ic->commit_work);
1426}
1427
1428static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1429{
1430	int r = dm_integrity_failed(ic);
1431	if (unlikely(r) && !bio->bi_status)
1432		bio->bi_status = errno_to_blk_status(r);
1433	if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1434		unsigned long flags;
1435		spin_lock_irqsave(&ic->endio_wait.lock, flags);
1436		bio_list_add(&ic->synchronous_bios, bio);
1437		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1438		spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1439		return;
1440	}
1441	bio_endio(bio);
1442}
1443
1444static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1445{
1446	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1447
1448	if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1449		submit_flush_bio(ic, dio);
1450	else
1451		do_endio(ic, bio);
1452}
1453
1454static void dec_in_flight(struct dm_integrity_io *dio)
1455{
1456	if (atomic_dec_and_test(&dio->in_flight)) {
1457		struct dm_integrity_c *ic = dio->ic;
1458		struct bio *bio;
1459
1460		remove_range(ic, &dio->range);
1461
1462		if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1463			schedule_autocommit(ic);
1464
1465		bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1466
1467		if (unlikely(dio->bi_status) && !bio->bi_status)
1468			bio->bi_status = dio->bi_status;
1469		if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1470			dio->range.logical_sector += dio->range.n_sectors;
1471			bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1472			INIT_WORK(&dio->work, integrity_bio_wait);
1473			queue_work(ic->offload_wq, &dio->work);
1474			return;
1475		}
1476		do_endio_flush(ic, dio);
1477	}
1478}
1479
1480static void integrity_end_io(struct bio *bio)
1481{
1482	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1483
1484	dm_bio_restore(&dio->bio_details, bio);
1485	if (bio->bi_integrity)
1486		bio->bi_opf |= REQ_INTEGRITY;
1487
1488	if (dio->completion)
1489		complete(dio->completion);
1490
1491	dec_in_flight(dio);
1492}
1493
1494static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1495				      const char *data, char *result)
1496{
1497	__u64 sector_le = cpu_to_le64(sector);
1498	SHASH_DESC_ON_STACK(req, ic->internal_hash);
1499	int r;
1500	unsigned digest_size;
1501
1502	req->tfm = ic->internal_hash;
1503
1504	r = crypto_shash_init(req);
1505	if (unlikely(r < 0)) {
1506		dm_integrity_io_error(ic, "crypto_shash_init", r);
1507		goto failed;
1508	}
1509
 
 
 
 
 
 
 
 
1510	r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1511	if (unlikely(r < 0)) {
1512		dm_integrity_io_error(ic, "crypto_shash_update", r);
1513		goto failed;
1514	}
1515
1516	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1517	if (unlikely(r < 0)) {
1518		dm_integrity_io_error(ic, "crypto_shash_update", r);
1519		goto failed;
1520	}
1521
1522	r = crypto_shash_final(req, result);
1523	if (unlikely(r < 0)) {
1524		dm_integrity_io_error(ic, "crypto_shash_final", r);
1525		goto failed;
1526	}
1527
1528	digest_size = crypto_shash_digestsize(ic->internal_hash);
1529	if (unlikely(digest_size < ic->tag_size))
1530		memset(result + digest_size, 0, ic->tag_size - digest_size);
1531
1532	return;
1533
1534failed:
1535	/* this shouldn't happen anyway, the hash functions have no reason to fail */
1536	get_random_bytes(result, ic->tag_size);
1537}
1538
1539static void integrity_metadata(struct work_struct *w)
1540{
1541	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1542	struct dm_integrity_c *ic = dio->ic;
1543
1544	int r;
1545
1546	if (ic->internal_hash) {
1547		struct bvec_iter iter;
1548		struct bio_vec bv;
1549		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1550		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1551		char *checksums;
1552		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1553		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1554		sector_t sector;
1555		unsigned sectors_to_process;
1556
1557		if (unlikely(ic->mode == 'R'))
1558			goto skip_io;
1559
1560		if (likely(dio->op != REQ_OP_DISCARD))
1561			checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1562					    GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1563		else
1564			checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1565		if (!checksums) {
1566			checksums = checksums_onstack;
1567			if (WARN_ON(extra_space &&
1568				    digest_size > sizeof(checksums_onstack))) {
1569				r = -EINVAL;
1570				goto error;
1571			}
1572		}
1573
1574		if (unlikely(dio->op == REQ_OP_DISCARD)) {
1575			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1576			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1577			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1578			unsigned max_blocks = max_size / ic->tag_size;
1579			memset(checksums, DISCARD_FILLER, max_size);
1580
1581			while (bi_size) {
1582				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1583				this_step_blocks = min(this_step_blocks, max_blocks);
1584				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1585							this_step_blocks * ic->tag_size, TAG_WRITE);
1586				if (unlikely(r)) {
1587					if (likely(checksums != checksums_onstack))
1588						kfree(checksums);
1589					goto error;
1590				}
1591
1592				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1593					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1594					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1595					BUG();
1596				}*/
1597				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1598				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1599			}
1600
1601			if (likely(checksums != checksums_onstack))
1602				kfree(checksums);
1603			goto skip_io;
1604		}
1605
1606		sector = dio->range.logical_sector;
1607		sectors_to_process = dio->range.n_sectors;
1608
1609		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1610			unsigned pos;
1611			char *mem, *checksums_ptr;
1612
1613again:
1614			mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1615			pos = 0;
1616			checksums_ptr = checksums;
1617			do {
1618				integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1619				checksums_ptr += ic->tag_size;
1620				sectors_to_process -= ic->sectors_per_block;
1621				pos += ic->sectors_per_block << SECTOR_SHIFT;
1622				sector += ic->sectors_per_block;
1623			} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1624			kunmap_atomic(mem);
1625
1626			r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1627						checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1628			if (unlikely(r)) {
1629				if (r > 0) {
1630					char b[BDEVNAME_SIZE];
1631					DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1632						    (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
 
 
1633					r = -EILSEQ;
1634					atomic64_inc(&ic->number_of_mismatches);
 
 
1635				}
1636				if (likely(checksums != checksums_onstack))
1637					kfree(checksums);
1638				goto error;
1639			}
1640
1641			if (!sectors_to_process)
1642				break;
1643
1644			if (unlikely(pos < bv.bv_len)) {
1645				bv.bv_offset += pos;
1646				bv.bv_len -= pos;
1647				goto again;
1648			}
1649		}
1650
1651		if (likely(checksums != checksums_onstack))
1652			kfree(checksums);
1653	} else {
1654		struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1655
1656		if (bip) {
1657			struct bio_vec biv;
1658			struct bvec_iter iter;
1659			unsigned data_to_process = dio->range.n_sectors;
1660			sector_to_block(ic, data_to_process);
1661			data_to_process *= ic->tag_size;
1662
1663			bip_for_each_vec(biv, bip, iter) {
1664				unsigned char *tag;
1665				unsigned this_len;
1666
1667				BUG_ON(PageHighMem(biv.bv_page));
1668				tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1669				this_len = min(biv.bv_len, data_to_process);
1670				r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1671							this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1672				if (unlikely(r))
1673					goto error;
1674				data_to_process -= this_len;
1675				if (!data_to_process)
1676					break;
1677			}
1678		}
1679	}
1680skip_io:
1681	dec_in_flight(dio);
1682	return;
1683error:
1684	dio->bi_status = errno_to_blk_status(r);
1685	dec_in_flight(dio);
1686}
1687
1688static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1689{
1690	struct dm_integrity_c *ic = ti->private;
1691	struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1692	struct bio_integrity_payload *bip;
1693
1694	sector_t area, offset;
1695
1696	dio->ic = ic;
1697	dio->bi_status = 0;
1698	dio->op = bio_op(bio);
1699
1700	if (unlikely(dio->op == REQ_OP_DISCARD)) {
1701		if (ti->max_io_len) {
1702			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1703			unsigned log2_max_io_len = __fls(ti->max_io_len);
1704			sector_t start_boundary = sec >> log2_max_io_len;
1705			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1706			if (start_boundary < end_boundary) {
1707				sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1708				dm_accept_partial_bio(bio, len);
1709			}
1710		}
1711	}
1712
1713	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1714		submit_flush_bio(ic, dio);
1715		return DM_MAPIO_SUBMITTED;
1716	}
1717
1718	dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1719	dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1720	if (unlikely(dio->fua)) {
1721		/*
1722		 * Don't pass down the FUA flag because we have to flush
1723		 * disk cache anyway.
1724		 */
1725		bio->bi_opf &= ~REQ_FUA;
1726	}
1727	if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1728		DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1729		      dio->range.logical_sector, bio_sectors(bio),
1730		      ic->provided_data_sectors);
1731		return DM_MAPIO_KILL;
1732	}
1733	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1734		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1735		      ic->sectors_per_block,
1736		      dio->range.logical_sector, bio_sectors(bio));
1737		return DM_MAPIO_KILL;
1738	}
1739
1740	if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1741		struct bvec_iter iter;
1742		struct bio_vec bv;
1743		bio_for_each_segment(bv, bio, iter) {
1744			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1745				DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1746					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1747				return DM_MAPIO_KILL;
1748			}
1749		}
1750	}
1751
1752	bip = bio_integrity(bio);
1753	if (!ic->internal_hash) {
1754		if (bip) {
1755			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1756			if (ic->log2_tag_size >= 0)
1757				wanted_tag_size <<= ic->log2_tag_size;
1758			else
1759				wanted_tag_size *= ic->tag_size;
1760			if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1761				DMERR("Invalid integrity data size %u, expected %u",
1762				      bip->bip_iter.bi_size, wanted_tag_size);
1763				return DM_MAPIO_KILL;
1764			}
1765		}
1766	} else {
1767		if (unlikely(bip != NULL)) {
1768			DMERR("Unexpected integrity data when using internal hash");
1769			return DM_MAPIO_KILL;
1770		}
1771	}
1772
1773	if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1774		return DM_MAPIO_KILL;
1775
1776	get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1777	dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1778	bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1779
1780	dm_integrity_map_continue(dio, true);
1781	return DM_MAPIO_SUBMITTED;
1782}
1783
1784static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1785				 unsigned journal_section, unsigned journal_entry)
1786{
1787	struct dm_integrity_c *ic = dio->ic;
1788	sector_t logical_sector;
1789	unsigned n_sectors;
1790
1791	logical_sector = dio->range.logical_sector;
1792	n_sectors = dio->range.n_sectors;
1793	do {
1794		struct bio_vec bv = bio_iovec(bio);
1795		char *mem;
1796
1797		if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1798			bv.bv_len = n_sectors << SECTOR_SHIFT;
1799		n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1800		bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1801retry_kmap:
1802		mem = kmap_atomic(bv.bv_page);
1803		if (likely(dio->op == REQ_OP_WRITE))
1804			flush_dcache_page(bv.bv_page);
1805
1806		do {
1807			struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1808
1809			if (unlikely(dio->op == REQ_OP_READ)) {
1810				struct journal_sector *js;
1811				char *mem_ptr;
1812				unsigned s;
1813
1814				if (unlikely(journal_entry_is_inprogress(je))) {
1815					flush_dcache_page(bv.bv_page);
1816					kunmap_atomic(mem);
1817
1818					__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1819					goto retry_kmap;
1820				}
1821				smp_rmb();
1822				BUG_ON(journal_entry_get_sector(je) != logical_sector);
1823				js = access_journal_data(ic, journal_section, journal_entry);
1824				mem_ptr = mem + bv.bv_offset;
1825				s = 0;
1826				do {
1827					memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1828					*(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1829					js++;
1830					mem_ptr += 1 << SECTOR_SHIFT;
1831				} while (++s < ic->sectors_per_block);
1832#ifdef INTERNAL_VERIFY
1833				if (ic->internal_hash) {
1834					char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1835
1836					integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1837					if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1838						DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1839							    logical_sector);
 
 
1840					}
1841				}
1842#endif
1843			}
1844
1845			if (!ic->internal_hash) {
1846				struct bio_integrity_payload *bip = bio_integrity(bio);
1847				unsigned tag_todo = ic->tag_size;
1848				char *tag_ptr = journal_entry_tag(ic, je);
1849
1850				if (bip) do {
1851					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1852					unsigned tag_now = min(biv.bv_len, tag_todo);
1853					char *tag_addr;
1854					BUG_ON(PageHighMem(biv.bv_page));
1855					tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1856					if (likely(dio->op == REQ_OP_WRITE))
1857						memcpy(tag_ptr, tag_addr, tag_now);
1858					else
1859						memcpy(tag_addr, tag_ptr, tag_now);
1860					bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1861					tag_ptr += tag_now;
1862					tag_todo -= tag_now;
1863				} while (unlikely(tag_todo)); else {
1864					if (likely(dio->op == REQ_OP_WRITE))
1865						memset(tag_ptr, 0, tag_todo);
1866				}
1867			}
1868
1869			if (likely(dio->op == REQ_OP_WRITE)) {
1870				struct journal_sector *js;
1871				unsigned s;
1872
1873				js = access_journal_data(ic, journal_section, journal_entry);
1874				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1875
1876				s = 0;
1877				do {
1878					je->last_bytes[s] = js[s].commit_id;
1879				} while (++s < ic->sectors_per_block);
1880
1881				if (ic->internal_hash) {
1882					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1883					if (unlikely(digest_size > ic->tag_size)) {
1884						char checksums_onstack[HASH_MAX_DIGESTSIZE];
1885						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1886						memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1887					} else
1888						integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1889				}
1890
1891				journal_entry_set_sector(je, logical_sector);
1892			}
1893			logical_sector += ic->sectors_per_block;
1894
1895			journal_entry++;
1896			if (unlikely(journal_entry == ic->journal_section_entries)) {
1897				journal_entry = 0;
1898				journal_section++;
1899				wraparound_section(ic, &journal_section);
1900			}
1901
1902			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1903		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1904
1905		if (unlikely(dio->op == REQ_OP_READ))
1906			flush_dcache_page(bv.bv_page);
1907		kunmap_atomic(mem);
1908	} while (n_sectors);
1909
1910	if (likely(dio->op == REQ_OP_WRITE)) {
1911		smp_mb();
1912		if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1913			wake_up(&ic->copy_to_journal_wait);
1914		if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1915			queue_work(ic->commit_wq, &ic->commit_work);
1916		} else {
1917			schedule_autocommit(ic);
1918		}
1919	} else {
1920		remove_range(ic, &dio->range);
1921	}
1922
1923	if (unlikely(bio->bi_iter.bi_size)) {
1924		sector_t area, offset;
1925
1926		dio->range.logical_sector = logical_sector;
1927		get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1928		dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1929		return true;
1930	}
1931
1932	return false;
1933}
1934
1935static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1936{
1937	struct dm_integrity_c *ic = dio->ic;
1938	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1939	unsigned journal_section, journal_entry;
1940	unsigned journal_read_pos;
1941	struct completion read_comp;
1942	bool discard_retried = false;
1943	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1944	if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1945		need_sync_io = true;
1946
1947	if (need_sync_io && from_map) {
1948		INIT_WORK(&dio->work, integrity_bio_wait);
1949		queue_work(ic->offload_wq, &dio->work);
1950		return;
1951	}
1952
1953lock_retry:
1954	spin_lock_irq(&ic->endio_wait.lock);
1955retry:
1956	if (unlikely(dm_integrity_failed(ic))) {
1957		spin_unlock_irq(&ic->endio_wait.lock);
1958		do_endio(ic, bio);
1959		return;
1960	}
1961	dio->range.n_sectors = bio_sectors(bio);
1962	journal_read_pos = NOT_FOUND;
1963	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
1964		if (dio->op == REQ_OP_WRITE) {
1965			unsigned next_entry, i, pos;
1966			unsigned ws, we, range_sectors;
1967
1968			dio->range.n_sectors = min(dio->range.n_sectors,
1969						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1970			if (unlikely(!dio->range.n_sectors)) {
1971				if (from_map)
1972					goto offload_to_thread;
1973				sleep_on_endio_wait(ic);
1974				goto retry;
1975			}
1976			range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1977			ic->free_sectors -= range_sectors;
1978			journal_section = ic->free_section;
1979			journal_entry = ic->free_section_entry;
1980
1981			next_entry = ic->free_section_entry + range_sectors;
1982			ic->free_section_entry = next_entry % ic->journal_section_entries;
1983			ic->free_section += next_entry / ic->journal_section_entries;
1984			ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1985			wraparound_section(ic, &ic->free_section);
1986
1987			pos = journal_section * ic->journal_section_entries + journal_entry;
1988			ws = journal_section;
1989			we = journal_entry;
1990			i = 0;
1991			do {
1992				struct journal_entry *je;
1993
1994				add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1995				pos++;
1996				if (unlikely(pos >= ic->journal_entries))
1997					pos = 0;
1998
1999				je = access_journal_entry(ic, ws, we);
2000				BUG_ON(!journal_entry_is_unused(je));
2001				journal_entry_set_inprogress(je);
2002				we++;
2003				if (unlikely(we == ic->journal_section_entries)) {
2004					we = 0;
2005					ws++;
2006					wraparound_section(ic, &ws);
2007				}
2008			} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2009
2010			spin_unlock_irq(&ic->endio_wait.lock);
2011			goto journal_read_write;
2012		} else {
2013			sector_t next_sector;
2014			journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2015			if (likely(journal_read_pos == NOT_FOUND)) {
2016				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2017					dio->range.n_sectors = next_sector - dio->range.logical_sector;
2018			} else {
2019				unsigned i;
2020				unsigned jp = journal_read_pos + 1;
2021				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2022					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2023						break;
2024				}
2025				dio->range.n_sectors = i;
2026			}
2027		}
2028	}
2029	if (unlikely(!add_new_range(ic, &dio->range, true))) {
2030		/*
2031		 * We must not sleep in the request routine because it could
2032		 * stall bios on current->bio_list.
2033		 * So, we offload the bio to a workqueue if we have to sleep.
2034		 */
2035		if (from_map) {
2036offload_to_thread:
2037			spin_unlock_irq(&ic->endio_wait.lock);
2038			INIT_WORK(&dio->work, integrity_bio_wait);
2039			queue_work(ic->wait_wq, &dio->work);
2040			return;
2041		}
2042		if (journal_read_pos != NOT_FOUND)
2043			dio->range.n_sectors = ic->sectors_per_block;
2044		wait_and_add_new_range(ic, &dio->range);
2045		/*
2046		 * wait_and_add_new_range drops the spinlock, so the journal
2047		 * may have been changed arbitrarily. We need to recheck.
2048		 * To simplify the code, we restrict I/O size to just one block.
2049		 */
2050		if (journal_read_pos != NOT_FOUND) {
2051			sector_t next_sector;
2052			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2053			if (unlikely(new_pos != journal_read_pos)) {
2054				remove_range_unlocked(ic, &dio->range);
2055				goto retry;
2056			}
2057		}
2058	}
2059	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2060		sector_t next_sector;
2061		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2062		if (unlikely(new_pos != NOT_FOUND) ||
2063		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2064			remove_range_unlocked(ic, &dio->range);
2065			spin_unlock_irq(&ic->endio_wait.lock);
2066			queue_work(ic->commit_wq, &ic->commit_work);
2067			flush_workqueue(ic->commit_wq);
2068			queue_work(ic->writer_wq, &ic->writer_work);
2069			flush_workqueue(ic->writer_wq);
2070			discard_retried = true;
2071			goto lock_retry;
2072		}
2073	}
2074	spin_unlock_irq(&ic->endio_wait.lock);
2075
2076	if (unlikely(journal_read_pos != NOT_FOUND)) {
2077		journal_section = journal_read_pos / ic->journal_section_entries;
2078		journal_entry = journal_read_pos % ic->journal_section_entries;
2079		goto journal_read_write;
2080	}
2081
2082	if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2083		if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2084				     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2085			struct bitmap_block_status *bbs;
2086
2087			bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2088			spin_lock(&bbs->bio_queue_lock);
2089			bio_list_add(&bbs->bio_queue, bio);
2090			spin_unlock(&bbs->bio_queue_lock);
2091			queue_work(ic->writer_wq, &bbs->work);
2092			return;
2093		}
2094	}
2095
2096	dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2097
2098	if (need_sync_io) {
2099		init_completion(&read_comp);
2100		dio->completion = &read_comp;
2101	} else
2102		dio->completion = NULL;
2103
2104	dm_bio_record(&dio->bio_details, bio);
2105	bio_set_dev(bio, ic->dev->bdev);
2106	bio->bi_integrity = NULL;
2107	bio->bi_opf &= ~REQ_INTEGRITY;
2108	bio->bi_end_io = integrity_end_io;
2109	bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2110
2111	if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2112		integrity_metadata(&dio->work);
2113		dm_integrity_flush_buffers(ic);
2114
2115		dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2116		dio->completion = NULL;
2117
2118		submit_bio_noacct(bio);
2119
2120		return;
2121	}
2122
2123	submit_bio_noacct(bio);
2124
2125	if (need_sync_io) {
2126		wait_for_completion_io(&read_comp);
2127		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2128		    dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2129			goto skip_check;
2130		if (ic->mode == 'B') {
2131			if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2132					     dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2133				goto skip_check;
2134		}
2135
2136		if (likely(!bio->bi_status))
2137			integrity_metadata(&dio->work);
2138		else
2139skip_check:
2140			dec_in_flight(dio);
2141
2142	} else {
2143		INIT_WORK(&dio->work, integrity_metadata);
2144		queue_work(ic->metadata_wq, &dio->work);
2145	}
2146
2147	return;
2148
2149journal_read_write:
2150	if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2151		goto lock_retry;
2152
2153	do_endio_flush(ic, dio);
2154}
2155
2156
2157static void integrity_bio_wait(struct work_struct *w)
2158{
2159	struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2160
2161	dm_integrity_map_continue(dio, false);
2162}
2163
2164static void pad_uncommitted(struct dm_integrity_c *ic)
2165{
2166	if (ic->free_section_entry) {
2167		ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2168		ic->free_section_entry = 0;
2169		ic->free_section++;
2170		wraparound_section(ic, &ic->free_section);
2171		ic->n_uncommitted_sections++;
2172	}
2173	if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2174		    (ic->n_uncommitted_sections + ic->n_committed_sections) *
2175		    ic->journal_section_entries + ic->free_sectors)) {
2176		DMCRIT("journal_sections %u, journal_section_entries %u, "
2177		       "n_uncommitted_sections %u, n_committed_sections %u, "
2178		       "journal_section_entries %u, free_sectors %u",
2179		       ic->journal_sections, ic->journal_section_entries,
2180		       ic->n_uncommitted_sections, ic->n_committed_sections,
2181		       ic->journal_section_entries, ic->free_sectors);
2182	}
2183}
2184
2185static void integrity_commit(struct work_struct *w)
2186{
2187	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2188	unsigned commit_start, commit_sections;
2189	unsigned i, j, n;
2190	struct bio *flushes;
2191
2192	del_timer(&ic->autocommit_timer);
2193
2194	spin_lock_irq(&ic->endio_wait.lock);
2195	flushes = bio_list_get(&ic->flush_bio_list);
2196	if (unlikely(ic->mode != 'J')) {
2197		spin_unlock_irq(&ic->endio_wait.lock);
2198		dm_integrity_flush_buffers(ic);
2199		goto release_flush_bios;
2200	}
2201
2202	pad_uncommitted(ic);
2203	commit_start = ic->uncommitted_section;
2204	commit_sections = ic->n_uncommitted_sections;
2205	spin_unlock_irq(&ic->endio_wait.lock);
2206
2207	if (!commit_sections)
2208		goto release_flush_bios;
2209
 
 
2210	i = commit_start;
2211	for (n = 0; n < commit_sections; n++) {
2212		for (j = 0; j < ic->journal_section_entries; j++) {
2213			struct journal_entry *je;
2214			je = access_journal_entry(ic, i, j);
2215			io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2216		}
2217		for (j = 0; j < ic->journal_section_sectors; j++) {
2218			struct journal_sector *js;
2219			js = access_journal(ic, i, j);
2220			js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2221		}
2222		i++;
2223		if (unlikely(i >= ic->journal_sections))
2224			ic->commit_seq = next_commit_seq(ic->commit_seq);
2225		wraparound_section(ic, &i);
2226	}
2227	smp_rmb();
2228
2229	write_journal(ic, commit_start, commit_sections);
2230
2231	spin_lock_irq(&ic->endio_wait.lock);
2232	ic->uncommitted_section += commit_sections;
2233	wraparound_section(ic, &ic->uncommitted_section);
2234	ic->n_uncommitted_sections -= commit_sections;
2235	ic->n_committed_sections += commit_sections;
2236	spin_unlock_irq(&ic->endio_wait.lock);
2237
2238	if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2239		queue_work(ic->writer_wq, &ic->writer_work);
2240
2241release_flush_bios:
2242	while (flushes) {
2243		struct bio *next = flushes->bi_next;
2244		flushes->bi_next = NULL;
2245		do_endio(ic, flushes);
2246		flushes = next;
2247	}
2248}
2249
2250static void complete_copy_from_journal(unsigned long error, void *context)
2251{
2252	struct journal_io *io = context;
2253	struct journal_completion *comp = io->comp;
2254	struct dm_integrity_c *ic = comp->ic;
2255	remove_range(ic, &io->range);
2256	mempool_free(io, &ic->journal_io_mempool);
2257	if (unlikely(error != 0))
2258		dm_integrity_io_error(ic, "copying from journal", -EIO);
2259	complete_journal_op(comp);
2260}
2261
2262static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2263			       struct journal_entry *je)
2264{
2265	unsigned s = 0;
2266	do {
2267		js->commit_id = je->last_bytes[s];
2268		js++;
2269	} while (++s < ic->sectors_per_block);
2270}
2271
2272static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2273			     unsigned write_sections, bool from_replay)
2274{
2275	unsigned i, j, n;
2276	struct journal_completion comp;
2277	struct blk_plug plug;
2278
2279	blk_start_plug(&plug);
2280
2281	comp.ic = ic;
2282	comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2283	init_completion(&comp.comp);
2284
2285	i = write_start;
2286	for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2287#ifndef INTERNAL_VERIFY
2288		if (unlikely(from_replay))
2289#endif
2290			rw_section_mac(ic, i, false);
2291		for (j = 0; j < ic->journal_section_entries; j++) {
2292			struct journal_entry *je = access_journal_entry(ic, i, j);
2293			sector_t sec, area, offset;
2294			unsigned k, l, next_loop;
2295			sector_t metadata_block;
2296			unsigned metadata_offset;
2297			struct journal_io *io;
2298
2299			if (journal_entry_is_unused(je))
2300				continue;
2301			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2302			sec = journal_entry_get_sector(je);
2303			if (unlikely(from_replay)) {
2304				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2305					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2306					sec &= ~(sector_t)(ic->sectors_per_block - 1);
2307				}
 
 
 
 
2308			}
2309			if (unlikely(sec >= ic->provided_data_sectors))
2310				continue;
2311			get_area_and_offset(ic, sec, &area, &offset);
2312			restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2313			for (k = j + 1; k < ic->journal_section_entries; k++) {
2314				struct journal_entry *je2 = access_journal_entry(ic, i, k);
2315				sector_t sec2, area2, offset2;
2316				if (journal_entry_is_unused(je2))
2317					break;
2318				BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2319				sec2 = journal_entry_get_sector(je2);
2320				if (unlikely(sec2 >= ic->provided_data_sectors))
2321					break;
2322				get_area_and_offset(ic, sec2, &area2, &offset2);
2323				if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2324					break;
2325				restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2326			}
2327			next_loop = k - 1;
2328
2329			io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2330			io->comp = &comp;
2331			io->range.logical_sector = sec;
2332			io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2333
2334			spin_lock_irq(&ic->endio_wait.lock);
2335			add_new_range_and_wait(ic, &io->range);
2336
2337			if (likely(!from_replay)) {
2338				struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2339
2340				/* don't write if there is newer committed sector */
2341				while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2342					struct journal_entry *je2 = access_journal_entry(ic, i, j);
2343
2344					journal_entry_set_unused(je2);
2345					remove_journal_node(ic, &section_node[j]);
2346					j++;
2347					sec += ic->sectors_per_block;
2348					offset += ic->sectors_per_block;
2349				}
2350				while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2351					struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2352
2353					journal_entry_set_unused(je2);
2354					remove_journal_node(ic, &section_node[k - 1]);
2355					k--;
2356				}
2357				if (j == k) {
2358					remove_range_unlocked(ic, &io->range);
2359					spin_unlock_irq(&ic->endio_wait.lock);
2360					mempool_free(io, &ic->journal_io_mempool);
2361					goto skip_io;
2362				}
2363				for (l = j; l < k; l++) {
2364					remove_journal_node(ic, &section_node[l]);
2365				}
2366			}
2367			spin_unlock_irq(&ic->endio_wait.lock);
2368
2369			metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2370			for (l = j; l < k; l++) {
2371				int r;
2372				struct journal_entry *je2 = access_journal_entry(ic, i, l);
2373
2374				if (
2375#ifndef INTERNAL_VERIFY
2376				    unlikely(from_replay) &&
2377#endif
2378				    ic->internal_hash) {
2379					char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2380
2381					integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2382								  (char *)access_journal_data(ic, i, l), test_tag);
2383					if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2384						dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
 
 
2385				}
2386
2387				journal_entry_set_unused(je2);
2388				r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2389							ic->tag_size, TAG_WRITE);
2390				if (unlikely(r)) {
2391					dm_integrity_io_error(ic, "reading tags", r);
2392				}
2393			}
2394
2395			atomic_inc(&comp.in_flight);
2396			copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2397					  (k - j) << ic->sb->log2_sectors_per_block,
2398					  get_data_sector(ic, area, offset),
2399					  complete_copy_from_journal, io);
2400skip_io:
2401			j = next_loop;
2402		}
2403	}
2404
2405	dm_bufio_write_dirty_buffers_async(ic->bufio);
2406
2407	blk_finish_plug(&plug);
2408
2409	complete_journal_op(&comp);
2410	wait_for_completion_io(&comp.comp);
2411
2412	dm_integrity_flush_buffers(ic);
2413}
2414
2415static void integrity_writer(struct work_struct *w)
2416{
2417	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2418	unsigned write_start, write_sections;
2419
2420	unsigned prev_free_sectors;
2421
2422	/* the following test is not needed, but it tests the replay code */
2423	if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2424		return;
2425
2426	spin_lock_irq(&ic->endio_wait.lock);
2427	write_start = ic->committed_section;
2428	write_sections = ic->n_committed_sections;
2429	spin_unlock_irq(&ic->endio_wait.lock);
2430
2431	if (!write_sections)
2432		return;
2433
2434	do_journal_write(ic, write_start, write_sections, false);
2435
2436	spin_lock_irq(&ic->endio_wait.lock);
2437
2438	ic->committed_section += write_sections;
2439	wraparound_section(ic, &ic->committed_section);
2440	ic->n_committed_sections -= write_sections;
2441
2442	prev_free_sectors = ic->free_sectors;
2443	ic->free_sectors += write_sections * ic->journal_section_entries;
2444	if (unlikely(!prev_free_sectors))
2445		wake_up_locked(&ic->endio_wait);
2446
2447	spin_unlock_irq(&ic->endio_wait.lock);
2448}
2449
2450static void recalc_write_super(struct dm_integrity_c *ic)
2451{
2452	int r;
2453
2454	dm_integrity_flush_buffers(ic);
2455	if (dm_integrity_failed(ic))
2456		return;
2457
2458	r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2459	if (unlikely(r))
2460		dm_integrity_io_error(ic, "writing superblock", r);
2461}
2462
2463static void integrity_recalc(struct work_struct *w)
2464{
2465	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2466	struct dm_integrity_range range;
2467	struct dm_io_request io_req;
2468	struct dm_io_region io_loc;
2469	sector_t area, offset;
2470	sector_t metadata_block;
2471	unsigned metadata_offset;
2472	sector_t logical_sector, n_sectors;
2473	__u8 *t;
2474	unsigned i;
2475	int r;
2476	unsigned super_counter = 0;
2477
2478	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2479
2480	spin_lock_irq(&ic->endio_wait.lock);
2481
2482next_chunk:
2483
2484	if (unlikely(dm_post_suspending(ic->ti)))
2485		goto unlock_ret;
2486
2487	range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2488	if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2489		if (ic->mode == 'B') {
2490			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2491			DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2492			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2493		}
2494		goto unlock_ret;
2495	}
2496
2497	get_area_and_offset(ic, range.logical_sector, &area, &offset);
2498	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2499	if (!ic->meta_dev)
2500		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2501
2502	add_new_range_and_wait(ic, &range);
2503	spin_unlock_irq(&ic->endio_wait.lock);
2504	logical_sector = range.logical_sector;
2505	n_sectors = range.n_sectors;
2506
2507	if (ic->mode == 'B') {
2508		if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2509			goto advance_and_next;
2510		}
2511		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2512				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2513			logical_sector += ic->sectors_per_block;
2514			n_sectors -= ic->sectors_per_block;
2515			cond_resched();
2516		}
2517		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2518				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2519			n_sectors -= ic->sectors_per_block;
2520			cond_resched();
2521		}
2522		get_area_and_offset(ic, logical_sector, &area, &offset);
2523	}
2524
2525	DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2526
2527	if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2528		recalc_write_super(ic);
2529		if (ic->mode == 'B') {
2530			queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2531		}
2532		super_counter = 0;
2533	}
2534
2535	if (unlikely(dm_integrity_failed(ic)))
2536		goto err;
2537
2538	io_req.bi_op = REQ_OP_READ;
2539	io_req.bi_op_flags = 0;
2540	io_req.mem.type = DM_IO_VMA;
2541	io_req.mem.ptr.addr = ic->recalc_buffer;
2542	io_req.notify.fn = NULL;
2543	io_req.client = ic->io;
2544	io_loc.bdev = ic->dev->bdev;
2545	io_loc.sector = get_data_sector(ic, area, offset);
2546	io_loc.count = n_sectors;
2547
2548	r = dm_io(&io_req, 1, &io_loc, NULL);
2549	if (unlikely(r)) {
2550		dm_integrity_io_error(ic, "reading data", r);
2551		goto err;
2552	}
2553
2554	t = ic->recalc_tags;
2555	for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2556		integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2557		t += ic->tag_size;
2558	}
2559
2560	metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2561
2562	r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2563	if (unlikely(r)) {
2564		dm_integrity_io_error(ic, "writing tags", r);
2565		goto err;
2566	}
2567
2568	if (ic->mode == 'B') {
2569		sector_t start, end;
2570		start = (range.logical_sector >>
2571			 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2572			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2573		end = ((range.logical_sector + range.n_sectors) >>
2574		       (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2575			(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2576		block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2577	}
2578
2579advance_and_next:
2580	cond_resched();
2581
2582	spin_lock_irq(&ic->endio_wait.lock);
2583	remove_range_unlocked(ic, &range);
2584	ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2585	goto next_chunk;
2586
2587err:
2588	remove_range(ic, &range);
2589	return;
2590
2591unlock_ret:
2592	spin_unlock_irq(&ic->endio_wait.lock);
2593
2594	recalc_write_super(ic);
2595}
2596
2597static void bitmap_block_work(struct work_struct *w)
2598{
2599	struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2600	struct dm_integrity_c *ic = bbs->ic;
2601	struct bio *bio;
2602	struct bio_list bio_queue;
2603	struct bio_list waiting;
2604
2605	bio_list_init(&waiting);
2606
2607	spin_lock(&bbs->bio_queue_lock);
2608	bio_queue = bbs->bio_queue;
2609	bio_list_init(&bbs->bio_queue);
2610	spin_unlock(&bbs->bio_queue_lock);
2611
2612	while ((bio = bio_list_pop(&bio_queue))) {
2613		struct dm_integrity_io *dio;
2614
2615		dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2616
2617		if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2618				    dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2619			remove_range(ic, &dio->range);
2620			INIT_WORK(&dio->work, integrity_bio_wait);
2621			queue_work(ic->offload_wq, &dio->work);
2622		} else {
2623			block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2624					dio->range.n_sectors, BITMAP_OP_SET);
2625			bio_list_add(&waiting, bio);
2626		}
2627	}
2628
2629	if (bio_list_empty(&waiting))
2630		return;
2631
2632	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2633			   bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2634			   BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2635
2636	while ((bio = bio_list_pop(&waiting))) {
2637		struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2638
2639		block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2640				dio->range.n_sectors, BITMAP_OP_SET);
2641
2642		remove_range(ic, &dio->range);
2643		INIT_WORK(&dio->work, integrity_bio_wait);
2644		queue_work(ic->offload_wq, &dio->work);
2645	}
2646
2647	queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2648}
2649
2650static void bitmap_flush_work(struct work_struct *work)
2651{
2652	struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2653	struct dm_integrity_range range;
2654	unsigned long limit;
2655	struct bio *bio;
2656
2657	dm_integrity_flush_buffers(ic);
2658
2659	range.logical_sector = 0;
2660	range.n_sectors = ic->provided_data_sectors;
2661
2662	spin_lock_irq(&ic->endio_wait.lock);
2663	add_new_range_and_wait(ic, &range);
2664	spin_unlock_irq(&ic->endio_wait.lock);
2665
2666	dm_integrity_flush_buffers(ic);
2667	if (ic->meta_dev)
2668		blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
2669
2670	limit = ic->provided_data_sectors;
2671	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2672		limit = le64_to_cpu(ic->sb->recalc_sector)
2673			>> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2674			<< (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2675	}
2676	/*DEBUG_print("zeroing journal\n");*/
2677	block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2678	block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2679
2680	rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2681			   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2682
2683	spin_lock_irq(&ic->endio_wait.lock);
2684	remove_range_unlocked(ic, &range);
2685	while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2686		bio_endio(bio);
2687		spin_unlock_irq(&ic->endio_wait.lock);
2688		spin_lock_irq(&ic->endio_wait.lock);
2689	}
2690	spin_unlock_irq(&ic->endio_wait.lock);
2691}
2692
2693
2694static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2695			 unsigned n_sections, unsigned char commit_seq)
2696{
2697	unsigned i, j, n;
2698
2699	if (!n_sections)
2700		return;
2701
2702	for (n = 0; n < n_sections; n++) {
2703		i = start_section + n;
2704		wraparound_section(ic, &i);
2705		for (j = 0; j < ic->journal_section_sectors; j++) {
2706			struct journal_sector *js = access_journal(ic, i, j);
2707			memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
 
2708			js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2709		}
2710		for (j = 0; j < ic->journal_section_entries; j++) {
2711			struct journal_entry *je = access_journal_entry(ic, i, j);
2712			journal_entry_set_unused(je);
2713		}
2714	}
2715
2716	write_journal(ic, start_section, n_sections);
2717}
2718
2719static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2720{
2721	unsigned char k;
2722	for (k = 0; k < N_COMMIT_IDS; k++) {
2723		if (dm_integrity_commit_id(ic, i, j, k) == id)
2724			return k;
2725	}
2726	dm_integrity_io_error(ic, "journal commit id", -EIO);
2727	return -EIO;
2728}
2729
2730static void replay_journal(struct dm_integrity_c *ic)
2731{
2732	unsigned i, j;
2733	bool used_commit_ids[N_COMMIT_IDS];
2734	unsigned max_commit_id_sections[N_COMMIT_IDS];
2735	unsigned write_start, write_sections;
2736	unsigned continue_section;
2737	bool journal_empty;
2738	unsigned char unused, last_used, want_commit_seq;
2739
2740	if (ic->mode == 'R')
2741		return;
2742
2743	if (ic->journal_uptodate)
2744		return;
2745
2746	last_used = 0;
2747	write_start = 0;
2748
2749	if (!ic->just_formatted) {
2750		DEBUG_print("reading journal\n");
2751		rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2752		if (ic->journal_io)
2753			DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2754		if (ic->journal_io) {
2755			struct journal_completion crypt_comp;
2756			crypt_comp.ic = ic;
2757			init_completion(&crypt_comp.comp);
2758			crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2759			encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2760			wait_for_completion(&crypt_comp.comp);
2761		}
2762		DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2763	}
2764
2765	if (dm_integrity_failed(ic))
2766		goto clear_journal;
2767
2768	journal_empty = true;
2769	memset(used_commit_ids, 0, sizeof used_commit_ids);
2770	memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2771	for (i = 0; i < ic->journal_sections; i++) {
2772		for (j = 0; j < ic->journal_section_sectors; j++) {
2773			int k;
2774			struct journal_sector *js = access_journal(ic, i, j);
2775			k = find_commit_seq(ic, i, j, js->commit_id);
2776			if (k < 0)
2777				goto clear_journal;
2778			used_commit_ids[k] = true;
2779			max_commit_id_sections[k] = i;
2780		}
2781		if (journal_empty) {
2782			for (j = 0; j < ic->journal_section_entries; j++) {
2783				struct journal_entry *je = access_journal_entry(ic, i, j);
2784				if (!journal_entry_is_unused(je)) {
2785					journal_empty = false;
2786					break;
2787				}
2788			}
2789		}
2790	}
2791
2792	if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2793		unused = N_COMMIT_IDS - 1;
2794		while (unused && !used_commit_ids[unused - 1])
2795			unused--;
2796	} else {
2797		for (unused = 0; unused < N_COMMIT_IDS; unused++)
2798			if (!used_commit_ids[unused])
2799				break;
2800		if (unused == N_COMMIT_IDS) {
2801			dm_integrity_io_error(ic, "journal commit ids", -EIO);
2802			goto clear_journal;
2803		}
2804	}
2805	DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2806		    unused, used_commit_ids[0], used_commit_ids[1],
2807		    used_commit_ids[2], used_commit_ids[3]);
2808
2809	last_used = prev_commit_seq(unused);
2810	want_commit_seq = prev_commit_seq(last_used);
2811
2812	if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2813		journal_empty = true;
2814
2815	write_start = max_commit_id_sections[last_used] + 1;
2816	if (unlikely(write_start >= ic->journal_sections))
2817		want_commit_seq = next_commit_seq(want_commit_seq);
2818	wraparound_section(ic, &write_start);
2819
2820	i = write_start;
2821	for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2822		for (j = 0; j < ic->journal_section_sectors; j++) {
2823			struct journal_sector *js = access_journal(ic, i, j);
2824
2825			if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2826				/*
2827				 * This could be caused by crash during writing.
2828				 * We won't replay the inconsistent part of the
2829				 * journal.
2830				 */
2831				DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2832					    i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2833				goto brk;
2834			}
2835		}
2836		i++;
2837		if (unlikely(i >= ic->journal_sections))
2838			want_commit_seq = next_commit_seq(want_commit_seq);
2839		wraparound_section(ic, &i);
2840	}
2841brk:
2842
2843	if (!journal_empty) {
2844		DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2845			    write_sections, write_start, want_commit_seq);
2846		do_journal_write(ic, write_start, write_sections, true);
2847	}
2848
2849	if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2850		continue_section = write_start;
2851		ic->commit_seq = want_commit_seq;
2852		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2853	} else {
2854		unsigned s;
2855		unsigned char erase_seq;
2856clear_journal:
2857		DEBUG_print("clearing journal\n");
2858
2859		erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2860		s = write_start;
2861		init_journal(ic, s, 1, erase_seq);
2862		s++;
2863		wraparound_section(ic, &s);
2864		if (ic->journal_sections >= 2) {
2865			init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2866			s += ic->journal_sections - 2;
2867			wraparound_section(ic, &s);
2868			init_journal(ic, s, 1, erase_seq);
2869		}
2870
2871		continue_section = 0;
2872		ic->commit_seq = next_commit_seq(erase_seq);
2873	}
2874
2875	ic->committed_section = continue_section;
2876	ic->n_committed_sections = 0;
2877
2878	ic->uncommitted_section = continue_section;
2879	ic->n_uncommitted_sections = 0;
2880
2881	ic->free_section = continue_section;
2882	ic->free_section_entry = 0;
2883	ic->free_sectors = ic->journal_entries;
2884
2885	ic->journal_tree_root = RB_ROOT;
2886	for (i = 0; i < ic->journal_entries; i++)
2887		init_journal_node(&ic->journal_tree[i]);
2888}
2889
2890static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2891{
2892	DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2893
2894	if (ic->mode == 'B') {
2895		ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2896		ic->synchronous_mode = 1;
2897
2898		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2899		queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2900		flush_workqueue(ic->commit_wq);
2901	}
2902}
2903
2904static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2905{
2906	struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2907
2908	DEBUG_print("dm_integrity_reboot\n");
2909
2910	dm_integrity_enter_synchronous_mode(ic);
2911
2912	return NOTIFY_DONE;
2913}
2914
2915static void dm_integrity_postsuspend(struct dm_target *ti)
2916{
2917	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2918	int r;
2919
2920	WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2921
2922	del_timer_sync(&ic->autocommit_timer);
2923
2924	if (ic->recalc_wq)
2925		drain_workqueue(ic->recalc_wq);
2926
2927	if (ic->mode == 'B')
2928		cancel_delayed_work_sync(&ic->bitmap_flush_work);
2929
2930	queue_work(ic->commit_wq, &ic->commit_work);
2931	drain_workqueue(ic->commit_wq);
2932
2933	if (ic->mode == 'J') {
2934		if (ic->meta_dev)
2935			queue_work(ic->writer_wq, &ic->writer_work);
2936		drain_workqueue(ic->writer_wq);
2937		dm_integrity_flush_buffers(ic);
 
 
 
 
 
 
 
 
2938	}
2939
2940	if (ic->mode == 'B') {
2941		dm_integrity_flush_buffers(ic);
2942#if 1
2943		/* set to 0 to test bitmap replay code */
2944		init_journal(ic, 0, ic->journal_sections, 0);
2945		ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2946		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2947		if (unlikely(r))
2948			dm_integrity_io_error(ic, "writing superblock", r);
2949#endif
2950	}
2951
2952	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2953
2954	ic->journal_uptodate = true;
2955}
2956
2957static void dm_integrity_resume(struct dm_target *ti)
2958{
2959	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2960	__u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2961	int r;
2962
2963	DEBUG_print("resume\n");
2964
 
 
2965	if (ic->provided_data_sectors != old_provided_data_sectors) {
2966		if (ic->provided_data_sectors > old_provided_data_sectors &&
2967		    ic->mode == 'B' &&
2968		    ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2969			rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2970					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2971			block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
2972					ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
2973			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2974					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2975		}
2976
2977		ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2978		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2979		if (unlikely(r))
2980			dm_integrity_io_error(ic, "writing superblock", r);
2981	}
2982
2983	if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2984		DEBUG_print("resume dirty_bitmap\n");
2985		rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2986				   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2987		if (ic->mode == 'B') {
2988			if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
 
2989				block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2990				block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
2991				if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2992						     BITMAP_OP_TEST_ALL_CLEAR)) {
2993					ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2994					ic->sb->recalc_sector = cpu_to_le64(0);
2995				}
2996			} else {
2997				DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2998					    ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
2999				ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3000				block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3001				block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3002				block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3003				rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3004						   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3005				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3006				ic->sb->recalc_sector = cpu_to_le64(0);
3007			}
3008		} else {
3009			if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3010			      block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
 
3011				ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3012				ic->sb->recalc_sector = cpu_to_le64(0);
3013			}
3014			init_journal(ic, 0, ic->journal_sections, 0);
3015			replay_journal(ic);
3016			ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3017		}
3018		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3019		if (unlikely(r))
3020			dm_integrity_io_error(ic, "writing superblock", r);
3021	} else {
3022		replay_journal(ic);
 
 
 
 
3023		if (ic->mode == 'B') {
3024			ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3025			ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3026			r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3027			if (unlikely(r))
3028				dm_integrity_io_error(ic, "writing superblock", r);
3029
3030			block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3031			block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3032			block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3033			if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3034			    le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3035				block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3036						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3037				block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3038						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3039				block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3040						ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3041			}
3042			rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3043					   ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3044		}
3045	}
3046
3047	DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3048	if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3049		__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3050		DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3051		if (recalc_pos < ic->provided_data_sectors) {
3052			queue_work(ic->recalc_wq, &ic->recalc_work);
3053		} else if (recalc_pos > ic->provided_data_sectors) {
3054			ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3055			recalc_write_super(ic);
3056		}
3057	}
3058
3059	ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3060	ic->reboot_notifier.next = NULL;
3061	ic->reboot_notifier.priority = INT_MAX - 1;	/* be notified after md and before hardware drivers */
3062	WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3063
3064#if 0
3065	/* set to 1 to stress test synchronous mode */
3066	dm_integrity_enter_synchronous_mode(ic);
3067#endif
3068}
3069
3070static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3071				unsigned status_flags, char *result, unsigned maxlen)
3072{
3073	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3074	unsigned arg_count;
3075	size_t sz = 0;
3076
3077	switch (type) {
3078	case STATUSTYPE_INFO:
3079		DMEMIT("%llu %llu",
3080			(unsigned long long)atomic64_read(&ic->number_of_mismatches),
3081			ic->provided_data_sectors);
3082		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3083			DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3084		else
3085			DMEMIT(" -");
3086		break;
3087
3088	case STATUSTYPE_TABLE: {
3089		__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3090		watermark_percentage += ic->journal_entries / 2;
3091		do_div(watermark_percentage, ic->journal_entries);
3092		arg_count = 3;
3093		arg_count += !!ic->meta_dev;
3094		arg_count += ic->sectors_per_block != 1;
3095		arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
 
3096		arg_count += ic->discard;
3097		arg_count += ic->mode == 'J';
3098		arg_count += ic->mode == 'J';
3099		arg_count += ic->mode == 'B';
3100		arg_count += ic->mode == 'B';
3101		arg_count += !!ic->internal_hash_alg.alg_string;
3102		arg_count += !!ic->journal_crypt_alg.alg_string;
3103		arg_count += !!ic->journal_mac_alg.alg_string;
3104		arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
 
 
3105		DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3106		       ic->tag_size, ic->mode, arg_count);
3107		if (ic->meta_dev)
3108			DMEMIT(" meta_device:%s", ic->meta_dev->name);
3109		if (ic->sectors_per_block != 1)
3110			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3111		if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3112			DMEMIT(" recalculate");
 
 
3113		if (ic->discard)
3114			DMEMIT(" allow_discards");
3115		DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3116		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3117		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3118		if (ic->mode == 'J') {
3119			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3120			DMEMIT(" commit_time:%u", ic->autocommit_msec);
3121		}
3122		if (ic->mode == 'B') {
3123			DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3124			DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3125		}
3126		if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3127			DMEMIT(" fix_padding");
 
 
 
 
3128
3129#define EMIT_ALG(a, n)							\
3130		do {							\
3131			if (ic->a.alg_string) {				\
3132				DMEMIT(" %s:%s", n, ic->a.alg_string);	\
3133				if (ic->a.key_string)			\
3134					DMEMIT(":%s", ic->a.key_string);\
3135			}						\
3136		} while (0)
3137		EMIT_ALG(internal_hash_alg, "internal_hash");
3138		EMIT_ALG(journal_crypt_alg, "journal_crypt");
3139		EMIT_ALG(journal_mac_alg, "journal_mac");
3140		break;
3141	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3142	}
3143}
3144
3145static int dm_integrity_iterate_devices(struct dm_target *ti,
3146					iterate_devices_callout_fn fn, void *data)
3147{
3148	struct dm_integrity_c *ic = ti->private;
3149
3150	if (!ic->meta_dev)
3151		return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3152	else
3153		return fn(ti, ic->dev, 0, ti->len, data);
3154}
3155
3156static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3157{
3158	struct dm_integrity_c *ic = ti->private;
3159
3160	if (ic->sectors_per_block > 1) {
3161		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3162		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3163		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
 
3164	}
3165}
3166
3167static void calculate_journal_section_size(struct dm_integrity_c *ic)
3168{
3169	unsigned sector_space = JOURNAL_SECTOR_DATA;
3170
3171	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3172	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3173					 JOURNAL_ENTRY_ROUNDUP);
3174
3175	if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3176		sector_space -= JOURNAL_MAC_PER_SECTOR;
3177	ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3178	ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3179	ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3180	ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3181}
3182
3183static int calculate_device_limits(struct dm_integrity_c *ic)
3184{
3185	__u64 initial_sectors;
3186
3187	calculate_journal_section_size(ic);
3188	initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3189	if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3190		return -EINVAL;
3191	ic->initial_sectors = initial_sectors;
3192
3193	if (!ic->meta_dev) {
3194		sector_t last_sector, last_area, last_offset;
3195
3196		/* we have to maintain excessive padding for compatibility with existing volumes */
3197		__u64 metadata_run_padding =
3198			ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3199			(__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3200			(__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3201
3202		ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3203					    metadata_run_padding) >> SECTOR_SHIFT;
3204		if (!(ic->metadata_run & (ic->metadata_run - 1)))
3205			ic->log2_metadata_run = __ffs(ic->metadata_run);
3206		else
3207			ic->log2_metadata_run = -1;
3208
3209		get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3210		last_sector = get_data_sector(ic, last_area, last_offset);
3211		if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3212			return -EINVAL;
3213	} else {
3214		__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3215		meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3216				>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3217		meta_size <<= ic->log2_buffer_sectors;
3218		if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3219		    ic->initial_sectors + meta_size > ic->meta_device_sectors)
3220			return -EINVAL;
3221		ic->metadata_run = 1;
3222		ic->log2_metadata_run = 0;
3223	}
3224
3225	return 0;
3226}
3227
3228static void get_provided_data_sectors(struct dm_integrity_c *ic)
3229{
3230	if (!ic->meta_dev) {
3231		int test_bit;
3232		ic->provided_data_sectors = 0;
3233		for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3234			__u64 prev_data_sectors = ic->provided_data_sectors;
3235
3236			ic->provided_data_sectors |= (sector_t)1 << test_bit;
3237			if (calculate_device_limits(ic))
3238				ic->provided_data_sectors = prev_data_sectors;
3239		}
3240	} else {
3241		ic->provided_data_sectors = ic->data_device_sectors;
3242		ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3243	}
3244}
3245
3246static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3247{
3248	unsigned journal_sections;
3249	int test_bit;
3250
3251	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3252	memcpy(ic->sb->magic, SB_MAGIC, 8);
3253	ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3254	ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3255	if (ic->journal_mac_alg.alg_string)
3256		ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3257
3258	calculate_journal_section_size(ic);
3259	journal_sections = journal_sectors / ic->journal_section_sectors;
3260	if (!journal_sections)
3261		journal_sections = 1;
3262
 
 
 
 
 
3263	if (!ic->meta_dev) {
3264		if (ic->fix_padding)
3265			ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3266		ic->sb->journal_sections = cpu_to_le32(journal_sections);
3267		if (!interleave_sectors)
3268			interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3269		ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3270		ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3271		ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3272
3273		get_provided_data_sectors(ic);
3274		if (!ic->provided_data_sectors)
3275			return -EINVAL;
3276	} else {
3277		ic->sb->log2_interleave_sectors = 0;
3278
3279		get_provided_data_sectors(ic);
3280		if (!ic->provided_data_sectors)
3281			return -EINVAL;
3282
3283try_smaller_buffer:
3284		ic->sb->journal_sections = cpu_to_le32(0);
3285		for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3286			__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3287			__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3288			if (test_journal_sections > journal_sections)
3289				continue;
3290			ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3291			if (calculate_device_limits(ic))
3292				ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3293
3294		}
3295		if (!le32_to_cpu(ic->sb->journal_sections)) {
3296			if (ic->log2_buffer_sectors > 3) {
3297				ic->log2_buffer_sectors--;
3298				goto try_smaller_buffer;
3299			}
3300			return -EINVAL;
3301		}
3302	}
3303
3304	ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3305
3306	sb_set_version(ic);
3307
3308	return 0;
3309}
3310
3311static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3312{
3313	struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3314	struct blk_integrity bi;
3315
3316	memset(&bi, 0, sizeof(bi));
3317	bi.profile = &dm_integrity_profile;
3318	bi.tuple_size = ic->tag_size;
3319	bi.tag_size = bi.tuple_size;
3320	bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3321
3322	blk_integrity_register(disk, &bi);
3323	blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3324}
3325
3326static void dm_integrity_free_page_list(struct page_list *pl)
3327{
3328	unsigned i;
3329
3330	if (!pl)
3331		return;
3332	for (i = 0; pl[i].page; i++)
3333		__free_page(pl[i].page);
3334	kvfree(pl);
3335}
3336
3337static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3338{
3339	struct page_list *pl;
3340	unsigned i;
3341
3342	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3343	if (!pl)
3344		return NULL;
3345
3346	for (i = 0; i < n_pages; i++) {
3347		pl[i].page = alloc_page(GFP_KERNEL);
3348		if (!pl[i].page) {
3349			dm_integrity_free_page_list(pl);
3350			return NULL;
3351		}
3352		if (i)
3353			pl[i - 1].next = &pl[i];
3354	}
3355	pl[i].page = NULL;
3356	pl[i].next = NULL;
3357
3358	return pl;
3359}
3360
3361static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3362{
3363	unsigned i;
3364	for (i = 0; i < ic->journal_sections; i++)
3365		kvfree(sl[i]);
3366	kvfree(sl);
3367}
3368
3369static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3370								   struct page_list *pl)
3371{
3372	struct scatterlist **sl;
3373	unsigned i;
3374
3375	sl = kvmalloc_array(ic->journal_sections,
3376			    sizeof(struct scatterlist *),
3377			    GFP_KERNEL | __GFP_ZERO);
3378	if (!sl)
3379		return NULL;
3380
3381	for (i = 0; i < ic->journal_sections; i++) {
3382		struct scatterlist *s;
3383		unsigned start_index, start_offset;
3384		unsigned end_index, end_offset;
3385		unsigned n_pages;
3386		unsigned idx;
3387
3388		page_list_location(ic, i, 0, &start_index, &start_offset);
3389		page_list_location(ic, i, ic->journal_section_sectors - 1,
3390				   &end_index, &end_offset);
3391
3392		n_pages = (end_index - start_index + 1);
3393
3394		s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3395				   GFP_KERNEL);
3396		if (!s) {
3397			dm_integrity_free_journal_scatterlist(ic, sl);
3398			return NULL;
3399		}
3400
3401		sg_init_table(s, n_pages);
3402		for (idx = start_index; idx <= end_index; idx++) {
3403			char *va = lowmem_page_address(pl[idx].page);
3404			unsigned start = 0, end = PAGE_SIZE;
3405			if (idx == start_index)
3406				start = start_offset;
3407			if (idx == end_index)
3408				end = end_offset + (1 << SECTOR_SHIFT);
3409			sg_set_buf(&s[idx - start_index], va + start, end - start);
3410		}
3411
3412		sl[i] = s;
3413	}
3414
3415	return sl;
3416}
3417
3418static void free_alg(struct alg_spec *a)
3419{
3420	kfree_sensitive(a->alg_string);
3421	kfree_sensitive(a->key);
3422	memset(a, 0, sizeof *a);
3423}
3424
3425static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3426{
3427	char *k;
3428
3429	free_alg(a);
3430
3431	a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3432	if (!a->alg_string)
3433		goto nomem;
3434
3435	k = strchr(a->alg_string, ':');
3436	if (k) {
3437		*k = 0;
3438		a->key_string = k + 1;
3439		if (strlen(a->key_string) & 1)
3440			goto inval;
3441
3442		a->key_size = strlen(a->key_string) / 2;
3443		a->key = kmalloc(a->key_size, GFP_KERNEL);
3444		if (!a->key)
3445			goto nomem;
3446		if (hex2bin(a->key, a->key_string, a->key_size))
3447			goto inval;
3448	}
3449
3450	return 0;
3451inval:
3452	*error = error_inval;
3453	return -EINVAL;
3454nomem:
3455	*error = "Out of memory for an argument";
3456	return -ENOMEM;
3457}
3458
3459static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3460		   char *error_alg, char *error_key)
3461{
3462	int r;
3463
3464	if (a->alg_string) {
3465		*hash = crypto_alloc_shash(a->alg_string, 0, 0);
3466		if (IS_ERR(*hash)) {
3467			*error = error_alg;
3468			r = PTR_ERR(*hash);
3469			*hash = NULL;
3470			return r;
3471		}
3472
3473		if (a->key) {
3474			r = crypto_shash_setkey(*hash, a->key, a->key_size);
3475			if (r) {
3476				*error = error_key;
3477				return r;
3478			}
3479		} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3480			*error = error_key;
3481			return -ENOKEY;
3482		}
3483	}
3484
3485	return 0;
3486}
3487
3488static int create_journal(struct dm_integrity_c *ic, char **error)
3489{
3490	int r = 0;
3491	unsigned i;
3492	__u64 journal_pages, journal_desc_size, journal_tree_size;
3493	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3494	struct skcipher_request *req = NULL;
3495
3496	ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3497	ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3498	ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3499	ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3500
3501	journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3502				PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3503	journal_desc_size = journal_pages * sizeof(struct page_list);
3504	if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3505		*error = "Journal doesn't fit into memory";
3506		r = -ENOMEM;
3507		goto bad;
3508	}
3509	ic->journal_pages = journal_pages;
3510
3511	ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3512	if (!ic->journal) {
3513		*error = "Could not allocate memory for journal";
3514		r = -ENOMEM;
3515		goto bad;
3516	}
3517	if (ic->journal_crypt_alg.alg_string) {
3518		unsigned ivsize, blocksize;
3519		struct journal_completion comp;
3520
3521		comp.ic = ic;
3522		ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
3523		if (IS_ERR(ic->journal_crypt)) {
3524			*error = "Invalid journal cipher";
3525			r = PTR_ERR(ic->journal_crypt);
3526			ic->journal_crypt = NULL;
3527			goto bad;
3528		}
3529		ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3530		blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3531
3532		if (ic->journal_crypt_alg.key) {
3533			r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3534						   ic->journal_crypt_alg.key_size);
3535			if (r) {
3536				*error = "Error setting encryption key";
3537				goto bad;
3538			}
3539		}
3540		DEBUG_print("cipher %s, block size %u iv size %u\n",
3541			    ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3542
3543		ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3544		if (!ic->journal_io) {
3545			*error = "Could not allocate memory for journal io";
3546			r = -ENOMEM;
3547			goto bad;
3548		}
3549
3550		if (blocksize == 1) {
3551			struct scatterlist *sg;
3552
3553			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3554			if (!req) {
3555				*error = "Could not allocate crypt request";
3556				r = -ENOMEM;
3557				goto bad;
3558			}
3559
3560			crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3561			if (!crypt_iv) {
3562				*error = "Could not allocate iv";
3563				r = -ENOMEM;
3564				goto bad;
3565			}
3566
3567			ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3568			if (!ic->journal_xor) {
3569				*error = "Could not allocate memory for journal xor";
3570				r = -ENOMEM;
3571				goto bad;
3572			}
3573
3574			sg = kvmalloc_array(ic->journal_pages + 1,
3575					    sizeof(struct scatterlist),
3576					    GFP_KERNEL);
3577			if (!sg) {
3578				*error = "Unable to allocate sg list";
3579				r = -ENOMEM;
3580				goto bad;
3581			}
3582			sg_init_table(sg, ic->journal_pages + 1);
3583			for (i = 0; i < ic->journal_pages; i++) {
3584				char *va = lowmem_page_address(ic->journal_xor[i].page);
3585				clear_page(va);
3586				sg_set_buf(&sg[i], va, PAGE_SIZE);
3587			}
3588			sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3589
3590			skcipher_request_set_crypt(req, sg, sg,
3591						   PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3592			init_completion(&comp.comp);
3593			comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3594			if (do_crypt(true, req, &comp))
3595				wait_for_completion(&comp.comp);
3596			kvfree(sg);
3597			r = dm_integrity_failed(ic);
3598			if (r) {
3599				*error = "Unable to encrypt journal";
3600				goto bad;
3601			}
3602			DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3603
3604			crypto_free_skcipher(ic->journal_crypt);
3605			ic->journal_crypt = NULL;
3606		} else {
3607			unsigned crypt_len = roundup(ivsize, blocksize);
3608
3609			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3610			if (!req) {
3611				*error = "Could not allocate crypt request";
3612				r = -ENOMEM;
3613				goto bad;
3614			}
3615
3616			crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3617			if (!crypt_iv) {
3618				*error = "Could not allocate iv";
3619				r = -ENOMEM;
3620				goto bad;
3621			}
3622
3623			crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3624			if (!crypt_data) {
3625				*error = "Unable to allocate crypt data";
3626				r = -ENOMEM;
3627				goto bad;
3628			}
3629
3630			ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3631			if (!ic->journal_scatterlist) {
3632				*error = "Unable to allocate sg list";
3633				r = -ENOMEM;
3634				goto bad;
3635			}
3636			ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3637			if (!ic->journal_io_scatterlist) {
3638				*error = "Unable to allocate sg list";
3639				r = -ENOMEM;
3640				goto bad;
3641			}
3642			ic->sk_requests = kvmalloc_array(ic->journal_sections,
3643							 sizeof(struct skcipher_request *),
3644							 GFP_KERNEL | __GFP_ZERO);
3645			if (!ic->sk_requests) {
3646				*error = "Unable to allocate sk requests";
3647				r = -ENOMEM;
3648				goto bad;
3649			}
3650			for (i = 0; i < ic->journal_sections; i++) {
3651				struct scatterlist sg;
3652				struct skcipher_request *section_req;
3653				__u32 section_le = cpu_to_le32(i);
3654
3655				memset(crypt_iv, 0x00, ivsize);
3656				memset(crypt_data, 0x00, crypt_len);
3657				memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3658
3659				sg_init_one(&sg, crypt_data, crypt_len);
3660				skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3661				init_completion(&comp.comp);
3662				comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3663				if (do_crypt(true, req, &comp))
3664					wait_for_completion(&comp.comp);
3665
3666				r = dm_integrity_failed(ic);
3667				if (r) {
3668					*error = "Unable to generate iv";
3669					goto bad;
3670				}
3671
3672				section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3673				if (!section_req) {
3674					*error = "Unable to allocate crypt request";
3675					r = -ENOMEM;
3676					goto bad;
3677				}
3678				section_req->iv = kmalloc_array(ivsize, 2,
3679								GFP_KERNEL);
3680				if (!section_req->iv) {
3681					skcipher_request_free(section_req);
3682					*error = "Unable to allocate iv";
3683					r = -ENOMEM;
3684					goto bad;
3685				}
3686				memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3687				section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3688				ic->sk_requests[i] = section_req;
3689				DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3690			}
3691		}
3692	}
3693
3694	for (i = 0; i < N_COMMIT_IDS; i++) {
3695		unsigned j;
3696retest_commit_id:
3697		for (j = 0; j < i; j++) {
3698			if (ic->commit_ids[j] == ic->commit_ids[i]) {
3699				ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3700				goto retest_commit_id;
3701			}
3702		}
3703		DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3704	}
3705
3706	journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3707	if (journal_tree_size > ULONG_MAX) {
3708		*error = "Journal doesn't fit into memory";
3709		r = -ENOMEM;
3710		goto bad;
3711	}
3712	ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3713	if (!ic->journal_tree) {
3714		*error = "Could not allocate memory for journal tree";
3715		r = -ENOMEM;
3716	}
3717bad:
3718	kfree(crypt_data);
3719	kfree(crypt_iv);
3720	skcipher_request_free(req);
3721
3722	return r;
3723}
3724
3725/*
3726 * Construct a integrity mapping
3727 *
3728 * Arguments:
3729 *	device
3730 *	offset from the start of the device
3731 *	tag size
3732 *	D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3733 *	number of optional arguments
3734 *	optional arguments:
3735 *		journal_sectors
3736 *		interleave_sectors
3737 *		buffer_sectors
3738 *		journal_watermark
3739 *		commit_time
3740 *		meta_device
3741 *		block_size
3742 *		sectors_per_bit
3743 *		bitmap_flush_interval
3744 *		internal_hash
3745 *		journal_crypt
3746 *		journal_mac
3747 *		recalculate
3748 */
3749static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3750{
3751	struct dm_integrity_c *ic;
3752	char dummy;
3753	int r;
3754	unsigned extra_args;
3755	struct dm_arg_set as;
3756	static const struct dm_arg _args[] = {
3757		{0, 9, "Invalid number of feature args"},
3758	};
3759	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3760	bool should_write_sb;
3761	__u64 threshold;
3762	unsigned long long start;
3763	__s8 log2_sectors_per_bitmap_bit = -1;
3764	__s8 log2_blocks_per_bitmap_bit;
3765	__u64 bits_in_journal;
3766	__u64 n_bitmap_bits;
3767
3768#define DIRECT_ARGUMENTS	4
3769
3770	if (argc <= DIRECT_ARGUMENTS) {
3771		ti->error = "Invalid argument count";
3772		return -EINVAL;
3773	}
3774
3775	ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3776	if (!ic) {
3777		ti->error = "Cannot allocate integrity context";
3778		return -ENOMEM;
3779	}
3780	ti->private = ic;
3781	ti->per_io_data_size = sizeof(struct dm_integrity_io);
3782	ic->ti = ti;
3783
3784	ic->in_progress = RB_ROOT;
3785	INIT_LIST_HEAD(&ic->wait_list);
3786	init_waitqueue_head(&ic->endio_wait);
3787	bio_list_init(&ic->flush_bio_list);
3788	init_waitqueue_head(&ic->copy_to_journal_wait);
3789	init_completion(&ic->crypto_backoff);
3790	atomic64_set(&ic->number_of_mismatches, 0);
3791	ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3792
3793	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3794	if (r) {
3795		ti->error = "Device lookup failed";
3796		goto bad;
3797	}
3798
3799	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3800		ti->error = "Invalid starting offset";
3801		r = -EINVAL;
3802		goto bad;
3803	}
3804	ic->start = start;
3805
3806	if (strcmp(argv[2], "-")) {
3807		if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3808			ti->error = "Invalid tag size";
3809			r = -EINVAL;
3810			goto bad;
3811		}
3812	}
3813
3814	if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3815	    !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3816		ic->mode = argv[3][0];
3817	} else {
3818		ti->error = "Invalid mode (expecting J, B, D, R)";
3819		r = -EINVAL;
3820		goto bad;
3821	}
3822
3823	journal_sectors = 0;
3824	interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3825	buffer_sectors = DEFAULT_BUFFER_SECTORS;
3826	journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3827	sync_msec = DEFAULT_SYNC_MSEC;
3828	ic->sectors_per_block = 1;
3829
3830	as.argc = argc - DIRECT_ARGUMENTS;
3831	as.argv = argv + DIRECT_ARGUMENTS;
3832	r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3833	if (r)
3834		goto bad;
3835
3836	while (extra_args--) {
3837		const char *opt_string;
3838		unsigned val;
3839		unsigned long long llval;
3840		opt_string = dm_shift_arg(&as);
3841		if (!opt_string) {
3842			r = -EINVAL;
3843			ti->error = "Not enough feature arguments";
3844			goto bad;
3845		}
3846		if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3847			journal_sectors = val ? val : 1;
3848		else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3849			interleave_sectors = val;
3850		else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3851			buffer_sectors = val;
3852		else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3853			journal_watermark = val;
3854		else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3855			sync_msec = val;
3856		else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3857			if (ic->meta_dev) {
3858				dm_put_device(ti, ic->meta_dev);
3859				ic->meta_dev = NULL;
3860			}
3861			r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3862					  dm_table_get_mode(ti->table), &ic->meta_dev);
3863			if (r) {
3864				ti->error = "Device lookup failed";
3865				goto bad;
3866			}
3867		} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3868			if (val < 1 << SECTOR_SHIFT ||
3869			    val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3870			    (val & (val -1))) {
3871				r = -EINVAL;
3872				ti->error = "Invalid block_size argument";
3873				goto bad;
3874			}
3875			ic->sectors_per_block = val >> SECTOR_SHIFT;
3876		} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3877			log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3878		} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3879			if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3880				r = -EINVAL;
3881				ti->error = "Invalid bitmap_flush_interval argument";
 
3882			}
3883			ic->bitmap_flush_interval = msecs_to_jiffies(val);
3884		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3885			r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3886					    "Invalid internal_hash argument");
3887			if (r)
3888				goto bad;
3889		} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3890			r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3891					    "Invalid journal_crypt argument");
3892			if (r)
3893				goto bad;
3894		} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3895			r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
3896					    "Invalid journal_mac argument");
3897			if (r)
3898				goto bad;
3899		} else if (!strcmp(opt_string, "recalculate")) {
3900			ic->recalculate_flag = true;
 
 
 
3901		} else if (!strcmp(opt_string, "allow_discards")) {
3902			ic->discard = true;
3903		} else if (!strcmp(opt_string, "fix_padding")) {
3904			ic->fix_padding = true;
 
 
 
 
3905		} else {
3906			r = -EINVAL;
3907			ti->error = "Invalid argument";
3908			goto bad;
3909		}
3910	}
3911
3912	ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3913	if (!ic->meta_dev)
3914		ic->meta_device_sectors = ic->data_device_sectors;
3915	else
3916		ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3917
3918	if (!journal_sectors) {
3919		journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3920				      ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3921	}
3922
3923	if (!buffer_sectors)
3924		buffer_sectors = 1;
3925	ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3926
3927	r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3928		    "Invalid internal hash", "Error setting internal hash key");
3929	if (r)
3930		goto bad;
3931
3932	r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3933		    "Invalid journal mac", "Error setting journal mac key");
3934	if (r)
3935		goto bad;
3936
3937	if (!ic->tag_size) {
3938		if (!ic->internal_hash) {
3939			ti->error = "Unknown tag size";
3940			r = -EINVAL;
3941			goto bad;
3942		}
3943		ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3944	}
3945	if (ic->tag_size > MAX_TAG_SIZE) {
3946		ti->error = "Too big tag size";
3947		r = -EINVAL;
3948		goto bad;
3949	}
3950	if (!(ic->tag_size & (ic->tag_size - 1)))
3951		ic->log2_tag_size = __ffs(ic->tag_size);
3952	else
3953		ic->log2_tag_size = -1;
3954
3955	if (ic->mode == 'B' && !ic->internal_hash) {
3956		r = -EINVAL;
3957		ti->error = "Bitmap mode can be only used with internal hash";
3958		goto bad;
3959	}
3960
3961	if (ic->discard && !ic->internal_hash) {
3962		r = -EINVAL;
3963		ti->error = "Discard can be only used with internal hash";
3964		goto bad;
3965	}
3966
3967	ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3968	ic->autocommit_msec = sync_msec;
3969	timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3970
3971	ic->io = dm_io_client_create();
3972	if (IS_ERR(ic->io)) {
3973		r = PTR_ERR(ic->io);
3974		ic->io = NULL;
3975		ti->error = "Cannot allocate dm io";
3976		goto bad;
3977	}
3978
3979	r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3980	if (r) {
3981		ti->error = "Cannot allocate mempool";
3982		goto bad;
3983	}
3984
3985	ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3986					  WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3987	if (!ic->metadata_wq) {
3988		ti->error = "Cannot allocate workqueue";
3989		r = -ENOMEM;
3990		goto bad;
3991	}
3992
3993	/*
3994	 * If this workqueue were percpu, it would cause bio reordering
3995	 * and reduced performance.
3996	 */
3997	ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3998	if (!ic->wait_wq) {
3999		ti->error = "Cannot allocate workqueue";
4000		r = -ENOMEM;
4001		goto bad;
4002	}
4003
4004	ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4005					  METADATA_WORKQUEUE_MAX_ACTIVE);
4006	if (!ic->offload_wq) {
4007		ti->error = "Cannot allocate workqueue";
4008		r = -ENOMEM;
4009		goto bad;
4010	}
4011
4012	ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4013	if (!ic->commit_wq) {
4014		ti->error = "Cannot allocate workqueue";
4015		r = -ENOMEM;
4016		goto bad;
4017	}
4018	INIT_WORK(&ic->commit_work, integrity_commit);
4019
4020	if (ic->mode == 'J' || ic->mode == 'B') {
4021		ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4022		if (!ic->writer_wq) {
4023			ti->error = "Cannot allocate workqueue";
4024			r = -ENOMEM;
4025			goto bad;
4026		}
4027		INIT_WORK(&ic->writer_work, integrity_writer);
4028	}
4029
4030	ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4031	if (!ic->sb) {
4032		r = -ENOMEM;
4033		ti->error = "Cannot allocate superblock area";
4034		goto bad;
4035	}
4036
4037	r = sync_rw_sb(ic, REQ_OP_READ, 0);
4038	if (r) {
4039		ti->error = "Error reading superblock";
4040		goto bad;
4041	}
4042	should_write_sb = false;
4043	if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4044		if (ic->mode != 'R') {
4045			if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4046				r = -EINVAL;
4047				ti->error = "The device is not initialized";
4048				goto bad;
4049			}
4050		}
4051
4052		r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4053		if (r) {
4054			ti->error = "Could not initialize superblock";
4055			goto bad;
4056		}
4057		if (ic->mode != 'R')
4058			should_write_sb = true;
4059	}
4060
4061	if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4062		r = -EINVAL;
4063		ti->error = "Unknown version";
4064		goto bad;
4065	}
4066	if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4067		r = -EINVAL;
4068		ti->error = "Tag size doesn't match the information in superblock";
4069		goto bad;
4070	}
4071	if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4072		r = -EINVAL;
4073		ti->error = "Block size doesn't match the information in superblock";
4074		goto bad;
4075	}
4076	if (!le32_to_cpu(ic->sb->journal_sections)) {
4077		r = -EINVAL;
4078		ti->error = "Corrupted superblock, journal_sections is 0";
4079		goto bad;
4080	}
4081	/* make sure that ti->max_io_len doesn't overflow */
4082	if (!ic->meta_dev) {
4083		if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4084		    ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4085			r = -EINVAL;
4086			ti->error = "Invalid interleave_sectors in the superblock";
4087			goto bad;
4088		}
4089	} else {
4090		if (ic->sb->log2_interleave_sectors) {
4091			r = -EINVAL;
4092			ti->error = "Invalid interleave_sectors in the superblock";
4093			goto bad;
4094		}
4095	}
4096	if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4097		r = -EINVAL;
4098		ti->error = "Journal mac mismatch";
4099		goto bad;
4100	}
4101
4102	get_provided_data_sectors(ic);
4103	if (!ic->provided_data_sectors) {
4104		r = -EINVAL;
4105		ti->error = "The device is too small";
4106		goto bad;
4107	}
4108
4109try_smaller_buffer:
4110	r = calculate_device_limits(ic);
4111	if (r) {
4112		if (ic->meta_dev) {
4113			if (ic->log2_buffer_sectors > 3) {
4114				ic->log2_buffer_sectors--;
4115				goto try_smaller_buffer;
4116			}
4117		}
4118		ti->error = "The device is too small";
4119		goto bad;
4120	}
4121
4122	if (log2_sectors_per_bitmap_bit < 0)
4123		log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4124	if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4125		log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4126
4127	bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4128	if (bits_in_journal > UINT_MAX)
4129		bits_in_journal = UINT_MAX;
4130	while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4131		log2_sectors_per_bitmap_bit++;
4132
4133	log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4134	ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4135	if (should_write_sb) {
4136		ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4137	}
4138	n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4139				+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4140	ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4141
4142	if (!ic->meta_dev)
4143		ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4144
4145	if (ti->len > ic->provided_data_sectors) {
4146		r = -EINVAL;
4147		ti->error = "Not enough provided sectors for requested mapping size";
4148		goto bad;
4149	}
4150
4151
4152	threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4153	threshold += 50;
4154	do_div(threshold, 100);
4155	ic->free_sectors_threshold = threshold;
4156
4157	DEBUG_print("initialized:\n");
4158	DEBUG_print("	integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4159	DEBUG_print("	journal_entry_size %u\n", ic->journal_entry_size);
4160	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4161	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
4162	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
4163	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4164	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
4165	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4166	DEBUG_print("	data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4167	DEBUG_print("	initial_sectors 0x%x\n", ic->initial_sectors);
4168	DEBUG_print("	metadata_run 0x%x\n", ic->metadata_run);
4169	DEBUG_print("	log2_metadata_run %d\n", ic->log2_metadata_run);
4170	DEBUG_print("	provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4171	DEBUG_print("	log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4172	DEBUG_print("	bits_in_journal %llu\n", bits_in_journal);
4173
4174	if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4175		ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4176		ic->sb->recalc_sector = cpu_to_le64(0);
4177	}
4178
4179	if (ic->internal_hash) {
 
4180		ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4181		if (!ic->recalc_wq ) {
4182			ti->error = "Cannot allocate workqueue";
4183			r = -ENOMEM;
4184			goto bad;
4185		}
4186		INIT_WORK(&ic->recalc_work, integrity_recalc);
4187		ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4188		if (!ic->recalc_buffer) {
4189			ti->error = "Cannot allocate buffer for recalculating";
4190			r = -ENOMEM;
4191			goto bad;
4192		}
4193		ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4194						 ic->tag_size, GFP_KERNEL);
 
 
4195		if (!ic->recalc_tags) {
4196			ti->error = "Cannot allocate tags for recalculating";
4197			r = -ENOMEM;
4198			goto bad;
4199		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4200	}
4201
4202	ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4203			1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4204	if (IS_ERR(ic->bufio)) {
4205		r = PTR_ERR(ic->bufio);
4206		ti->error = "Cannot initialize dm-bufio";
4207		ic->bufio = NULL;
4208		goto bad;
4209	}
4210	dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4211
4212	if (ic->mode != 'R') {
4213		r = create_journal(ic, &ti->error);
4214		if (r)
4215			goto bad;
4216
4217	}
4218
4219	if (ic->mode == 'B') {
4220		unsigned i;
4221		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4222
4223		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4224		if (!ic->recalc_bitmap) {
4225			r = -ENOMEM;
4226			goto bad;
4227		}
4228		ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4229		if (!ic->may_write_bitmap) {
4230			r = -ENOMEM;
4231			goto bad;
4232		}
4233		ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4234		if (!ic->bbs) {
4235			r = -ENOMEM;
4236			goto bad;
4237		}
4238		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4239		for (i = 0; i < ic->n_bitmap_blocks; i++) {
4240			struct bitmap_block_status *bbs = &ic->bbs[i];
4241			unsigned sector, pl_index, pl_offset;
4242
4243			INIT_WORK(&bbs->work, bitmap_block_work);
4244			bbs->ic = ic;
4245			bbs->idx = i;
4246			bio_list_init(&bbs->bio_queue);
4247			spin_lock_init(&bbs->bio_queue_lock);
4248
4249			sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4250			pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4251			pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4252
4253			bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4254		}
4255	}
4256
4257	if (should_write_sb) {
4258		int r;
4259
4260		init_journal(ic, 0, ic->journal_sections, 0);
4261		r = dm_integrity_failed(ic);
4262		if (unlikely(r)) {
4263			ti->error = "Error initializing journal";
4264			goto bad;
4265		}
4266		r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4267		if (r) {
4268			ti->error = "Error initializing superblock";
4269			goto bad;
4270		}
4271		ic->just_formatted = true;
4272	}
4273
4274	if (!ic->meta_dev) {
4275		r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4276		if (r)
4277			goto bad;
4278	}
4279	if (ic->mode == 'B') {
4280		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4281		if (!max_io_len)
4282			max_io_len = 1U << 31;
4283		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4284		if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4285			r = dm_set_target_max_io_len(ti, max_io_len);
4286			if (r)
4287				goto bad;
4288		}
4289	}
4290
4291	if (!ic->internal_hash)
4292		dm_integrity_set(ti, ic);
4293
4294	ti->num_flush_bios = 1;
4295	ti->flush_supported = true;
4296	if (ic->discard)
4297		ti->num_discard_bios = 1;
4298
 
4299	return 0;
4300
4301bad:
 
4302	dm_integrity_dtr(ti);
4303	return r;
4304}
4305
4306static void dm_integrity_dtr(struct dm_target *ti)
4307{
4308	struct dm_integrity_c *ic = ti->private;
4309
4310	BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4311	BUG_ON(!list_empty(&ic->wait_list));
4312
 
 
4313	if (ic->metadata_wq)
4314		destroy_workqueue(ic->metadata_wq);
4315	if (ic->wait_wq)
4316		destroy_workqueue(ic->wait_wq);
4317	if (ic->offload_wq)
4318		destroy_workqueue(ic->offload_wq);
4319	if (ic->commit_wq)
4320		destroy_workqueue(ic->commit_wq);
4321	if (ic->writer_wq)
4322		destroy_workqueue(ic->writer_wq);
4323	if (ic->recalc_wq)
4324		destroy_workqueue(ic->recalc_wq);
4325	vfree(ic->recalc_buffer);
4326	kvfree(ic->recalc_tags);
4327	kvfree(ic->bbs);
4328	if (ic->bufio)
4329		dm_bufio_client_destroy(ic->bufio);
4330	mempool_exit(&ic->journal_io_mempool);
4331	if (ic->io)
4332		dm_io_client_destroy(ic->io);
4333	if (ic->dev)
4334		dm_put_device(ti, ic->dev);
4335	if (ic->meta_dev)
4336		dm_put_device(ti, ic->meta_dev);
4337	dm_integrity_free_page_list(ic->journal);
4338	dm_integrity_free_page_list(ic->journal_io);
4339	dm_integrity_free_page_list(ic->journal_xor);
4340	dm_integrity_free_page_list(ic->recalc_bitmap);
4341	dm_integrity_free_page_list(ic->may_write_bitmap);
4342	if (ic->journal_scatterlist)
4343		dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4344	if (ic->journal_io_scatterlist)
4345		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4346	if (ic->sk_requests) {
4347		unsigned i;
4348
4349		for (i = 0; i < ic->journal_sections; i++) {
4350			struct skcipher_request *req = ic->sk_requests[i];
4351			if (req) {
4352				kfree_sensitive(req->iv);
4353				skcipher_request_free(req);
4354			}
4355		}
4356		kvfree(ic->sk_requests);
4357	}
4358	kvfree(ic->journal_tree);
4359	if (ic->sb)
4360		free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4361
4362	if (ic->internal_hash)
4363		crypto_free_shash(ic->internal_hash);
4364	free_alg(&ic->internal_hash_alg);
4365
4366	if (ic->journal_crypt)
4367		crypto_free_skcipher(ic->journal_crypt);
4368	free_alg(&ic->journal_crypt_alg);
4369
4370	if (ic->journal_mac)
4371		crypto_free_shash(ic->journal_mac);
4372	free_alg(&ic->journal_mac_alg);
4373
4374	kfree(ic);
 
4375}
4376
4377static struct target_type integrity_target = {
4378	.name			= "integrity",
4379	.version		= {1, 6, 0},
4380	.module			= THIS_MODULE,
4381	.features		= DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4382	.ctr			= dm_integrity_ctr,
4383	.dtr			= dm_integrity_dtr,
4384	.map			= dm_integrity_map,
4385	.postsuspend		= dm_integrity_postsuspend,
4386	.resume			= dm_integrity_resume,
4387	.status			= dm_integrity_status,
4388	.iterate_devices	= dm_integrity_iterate_devices,
4389	.io_hints		= dm_integrity_io_hints,
4390};
4391
4392static int __init dm_integrity_init(void)
4393{
4394	int r;
4395
4396	journal_io_cache = kmem_cache_create("integrity_journal_io",
4397					     sizeof(struct journal_io), 0, 0, NULL);
4398	if (!journal_io_cache) {
4399		DMERR("can't allocate journal io cache");
4400		return -ENOMEM;
4401	}
4402
4403	r = dm_register_target(&integrity_target);
4404
4405	if (r < 0)
4406		DMERR("register failed %d", r);
4407
4408	return r;
4409}
4410
4411static void __exit dm_integrity_exit(void)
4412{
4413	dm_unregister_target(&integrity_target);
4414	kmem_cache_destroy(journal_io_cache);
4415}
4416
4417module_init(dm_integrity_init);
4418module_exit(dm_integrity_exit);
4419
4420MODULE_AUTHOR("Milan Broz");
4421MODULE_AUTHOR("Mikulas Patocka");
4422MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4423MODULE_LICENSE("GPL");