Loading...
1/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
9#include "dm-bio-record.h"
10
11#include <linux/compiler.h>
12#include <linux/module.h>
13#include <linux/device-mapper.h>
14#include <linux/dm-io.h>
15#include <linux/vmalloc.h>
16#include <linux/sort.h>
17#include <linux/rbtree.h>
18#include <linux/delay.h>
19#include <linux/random.h>
20#include <linux/reboot.h>
21#include <crypto/hash.h>
22#include <crypto/skcipher.h>
23#include <linux/async_tx.h>
24#include <linux/dm-bufio.h>
25
26#define DM_MSG_PREFIX "integrity"
27
28#define DEFAULT_INTERLEAVE_SECTORS 32768
29#define DEFAULT_JOURNAL_SIZE_FACTOR 7
30#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
31#define DEFAULT_BUFFER_SECTORS 128
32#define DEFAULT_JOURNAL_WATERMARK 50
33#define DEFAULT_SYNC_MSEC 10000
34#define DEFAULT_MAX_JOURNAL_SECTORS 131072
35#define MIN_LOG2_INTERLEAVE_SECTORS 3
36#define MAX_LOG2_INTERLEAVE_SECTORS 31
37#define METADATA_WORKQUEUE_MAX_ACTIVE 16
38#define RECALC_SECTORS 8192
39#define RECALC_WRITE_SUPER 16
40#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
41#define BITMAP_FLUSH_INTERVAL (10 * HZ)
42#define DISCARD_FILLER 0xf6
43
44/*
45 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
46 * so it should not be enabled in the official kernel
47 */
48//#define DEBUG_PRINT
49//#define INTERNAL_VERIFY
50
51/*
52 * On disk structures
53 */
54
55#define SB_MAGIC "integrt"
56#define SB_VERSION_1 1
57#define SB_VERSION_2 2
58#define SB_VERSION_3 3
59#define SB_VERSION_4 4
60#define SB_SECTORS 8
61#define MAX_SECTORS_PER_BLOCK 8
62
63struct superblock {
64 __u8 magic[8];
65 __u8 version;
66 __u8 log2_interleave_sectors;
67 __u16 integrity_tag_size;
68 __u32 journal_sections;
69 __u64 provided_data_sectors; /* userspace uses this value */
70 __u32 flags;
71 __u8 log2_sectors_per_block;
72 __u8 log2_blocks_per_bitmap_bit;
73 __u8 pad[2];
74 __u64 recalc_sector;
75};
76
77#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
78#define SB_FLAG_RECALCULATING 0x2
79#define SB_FLAG_DIRTY_BITMAP 0x4
80#define SB_FLAG_FIXED_PADDING 0x8
81
82#define JOURNAL_ENTRY_ROUNDUP 8
83
84typedef __u64 commit_id_t;
85#define JOURNAL_MAC_PER_SECTOR 8
86
87struct journal_entry {
88 union {
89 struct {
90 __u32 sector_lo;
91 __u32 sector_hi;
92 } s;
93 __u64 sector;
94 } u;
95 commit_id_t last_bytes[];
96 /* __u8 tag[0]; */
97};
98
99#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
100
101#if BITS_PER_LONG == 64
102#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
103#else
104#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
105#endif
106#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
107#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
108#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
109#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
110#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
111
112#define JOURNAL_BLOCK_SECTORS 8
113#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
114#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
115
116struct journal_sector {
117 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
118 __u8 mac[JOURNAL_MAC_PER_SECTOR];
119 commit_id_t commit_id;
120};
121
122#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
123
124#define METADATA_PADDING_SECTORS 8
125
126#define N_COMMIT_IDS 4
127
128static unsigned char prev_commit_seq(unsigned char seq)
129{
130 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
131}
132
133static unsigned char next_commit_seq(unsigned char seq)
134{
135 return (seq + 1) % N_COMMIT_IDS;
136}
137
138/*
139 * In-memory structures
140 */
141
142struct journal_node {
143 struct rb_node node;
144 sector_t sector;
145};
146
147struct alg_spec {
148 char *alg_string;
149 char *key_string;
150 __u8 *key;
151 unsigned key_size;
152};
153
154struct dm_integrity_c {
155 struct dm_dev *dev;
156 struct dm_dev *meta_dev;
157 unsigned tag_size;
158 __s8 log2_tag_size;
159 sector_t start;
160 mempool_t journal_io_mempool;
161 struct dm_io_client *io;
162 struct dm_bufio_client *bufio;
163 struct workqueue_struct *metadata_wq;
164 struct superblock *sb;
165 unsigned journal_pages;
166 unsigned n_bitmap_blocks;
167
168 struct page_list *journal;
169 struct page_list *journal_io;
170 struct page_list *journal_xor;
171 struct page_list *recalc_bitmap;
172 struct page_list *may_write_bitmap;
173 struct bitmap_block_status *bbs;
174 unsigned bitmap_flush_interval;
175 int synchronous_mode;
176 struct bio_list synchronous_bios;
177 struct delayed_work bitmap_flush_work;
178
179 struct crypto_skcipher *journal_crypt;
180 struct scatterlist **journal_scatterlist;
181 struct scatterlist **journal_io_scatterlist;
182 struct skcipher_request **sk_requests;
183
184 struct crypto_shash *journal_mac;
185
186 struct journal_node *journal_tree;
187 struct rb_root journal_tree_root;
188
189 sector_t provided_data_sectors;
190
191 unsigned short journal_entry_size;
192 unsigned char journal_entries_per_sector;
193 unsigned char journal_section_entries;
194 unsigned short journal_section_sectors;
195 unsigned journal_sections;
196 unsigned journal_entries;
197 sector_t data_device_sectors;
198 sector_t meta_device_sectors;
199 unsigned initial_sectors;
200 unsigned metadata_run;
201 __s8 log2_metadata_run;
202 __u8 log2_buffer_sectors;
203 __u8 sectors_per_block;
204 __u8 log2_blocks_per_bitmap_bit;
205
206 unsigned char mode;
207
208 int failed;
209
210 struct crypto_shash *internal_hash;
211
212 struct dm_target *ti;
213
214 /* these variables are locked with endio_wait.lock */
215 struct rb_root in_progress;
216 struct list_head wait_list;
217 wait_queue_head_t endio_wait;
218 struct workqueue_struct *wait_wq;
219 struct workqueue_struct *offload_wq;
220
221 unsigned char commit_seq;
222 commit_id_t commit_ids[N_COMMIT_IDS];
223
224 unsigned committed_section;
225 unsigned n_committed_sections;
226
227 unsigned uncommitted_section;
228 unsigned n_uncommitted_sections;
229
230 unsigned free_section;
231 unsigned char free_section_entry;
232 unsigned free_sectors;
233
234 unsigned free_sectors_threshold;
235
236 struct workqueue_struct *commit_wq;
237 struct work_struct commit_work;
238
239 struct workqueue_struct *writer_wq;
240 struct work_struct writer_work;
241
242 struct workqueue_struct *recalc_wq;
243 struct work_struct recalc_work;
244 u8 *recalc_buffer;
245 u8 *recalc_tags;
246
247 struct bio_list flush_bio_list;
248
249 unsigned long autocommit_jiffies;
250 struct timer_list autocommit_timer;
251 unsigned autocommit_msec;
252
253 wait_queue_head_t copy_to_journal_wait;
254
255 struct completion crypto_backoff;
256
257 bool journal_uptodate;
258 bool just_formatted;
259 bool recalculate_flag;
260 bool fix_padding;
261 bool discard;
262
263 struct alg_spec internal_hash_alg;
264 struct alg_spec journal_crypt_alg;
265 struct alg_spec journal_mac_alg;
266
267 atomic64_t number_of_mismatches;
268
269 struct notifier_block reboot_notifier;
270};
271
272struct dm_integrity_range {
273 sector_t logical_sector;
274 sector_t n_sectors;
275 bool waiting;
276 union {
277 struct rb_node node;
278 struct {
279 struct task_struct *task;
280 struct list_head wait_entry;
281 };
282 };
283};
284
285struct dm_integrity_io {
286 struct work_struct work;
287
288 struct dm_integrity_c *ic;
289 enum req_opf op;
290 bool fua;
291
292 struct dm_integrity_range range;
293
294 sector_t metadata_block;
295 unsigned metadata_offset;
296
297 atomic_t in_flight;
298 blk_status_t bi_status;
299
300 struct completion *completion;
301
302 struct dm_bio_details bio_details;
303};
304
305struct journal_completion {
306 struct dm_integrity_c *ic;
307 atomic_t in_flight;
308 struct completion comp;
309};
310
311struct journal_io {
312 struct dm_integrity_range range;
313 struct journal_completion *comp;
314};
315
316struct bitmap_block_status {
317 struct work_struct work;
318 struct dm_integrity_c *ic;
319 unsigned idx;
320 unsigned long *bitmap;
321 struct bio_list bio_queue;
322 spinlock_t bio_queue_lock;
323
324};
325
326static struct kmem_cache *journal_io_cache;
327
328#define JOURNAL_IO_MEMPOOL 32
329
330#ifdef DEBUG_PRINT
331#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
332static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
333{
334 va_list args;
335 va_start(args, msg);
336 vprintk(msg, args);
337 va_end(args);
338 if (len)
339 pr_cont(":");
340 while (len) {
341 pr_cont(" %02x", *bytes);
342 bytes++;
343 len--;
344 }
345 pr_cont("\n");
346}
347#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
348#else
349#define DEBUG_print(x, ...) do { } while (0)
350#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
351#endif
352
353static void dm_integrity_prepare(struct request *rq)
354{
355}
356
357static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
358{
359}
360
361/*
362 * DM Integrity profile, protection is performed layer above (dm-crypt)
363 */
364static const struct blk_integrity_profile dm_integrity_profile = {
365 .name = "DM-DIF-EXT-TAG",
366 .generate_fn = NULL,
367 .verify_fn = NULL,
368 .prepare_fn = dm_integrity_prepare,
369 .complete_fn = dm_integrity_complete,
370};
371
372static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
373static void integrity_bio_wait(struct work_struct *w);
374static void dm_integrity_dtr(struct dm_target *ti);
375
376static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
377{
378 if (err == -EILSEQ)
379 atomic64_inc(&ic->number_of_mismatches);
380 if (!cmpxchg(&ic->failed, 0, err))
381 DMERR("Error on %s: %d", msg, err);
382}
383
384static int dm_integrity_failed(struct dm_integrity_c *ic)
385{
386 return READ_ONCE(ic->failed);
387}
388
389static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
390 unsigned j, unsigned char seq)
391{
392 /*
393 * Xor the number with section and sector, so that if a piece of
394 * journal is written at wrong place, it is detected.
395 */
396 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
397}
398
399static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
400 sector_t *area, sector_t *offset)
401{
402 if (!ic->meta_dev) {
403 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
404 *area = data_sector >> log2_interleave_sectors;
405 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
406 } else {
407 *area = 0;
408 *offset = data_sector;
409 }
410}
411
412#define sector_to_block(ic, n) \
413do { \
414 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
415 (n) >>= (ic)->sb->log2_sectors_per_block; \
416} while (0)
417
418static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
419 sector_t offset, unsigned *metadata_offset)
420{
421 __u64 ms;
422 unsigned mo;
423
424 ms = area << ic->sb->log2_interleave_sectors;
425 if (likely(ic->log2_metadata_run >= 0))
426 ms += area << ic->log2_metadata_run;
427 else
428 ms += area * ic->metadata_run;
429 ms >>= ic->log2_buffer_sectors;
430
431 sector_to_block(ic, offset);
432
433 if (likely(ic->log2_tag_size >= 0)) {
434 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
435 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
436 } else {
437 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
438 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
439 }
440 *metadata_offset = mo;
441 return ms;
442}
443
444static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
445{
446 sector_t result;
447
448 if (ic->meta_dev)
449 return offset;
450
451 result = area << ic->sb->log2_interleave_sectors;
452 if (likely(ic->log2_metadata_run >= 0))
453 result += (area + 1) << ic->log2_metadata_run;
454 else
455 result += (area + 1) * ic->metadata_run;
456
457 result += (sector_t)ic->initial_sectors + offset;
458 result += ic->start;
459
460 return result;
461}
462
463static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
464{
465 if (unlikely(*sec_ptr >= ic->journal_sections))
466 *sec_ptr -= ic->journal_sections;
467}
468
469static void sb_set_version(struct dm_integrity_c *ic)
470{
471 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
472 ic->sb->version = SB_VERSION_4;
473 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
474 ic->sb->version = SB_VERSION_3;
475 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
476 ic->sb->version = SB_VERSION_2;
477 else
478 ic->sb->version = SB_VERSION_1;
479}
480
481static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
482{
483 struct dm_io_request io_req;
484 struct dm_io_region io_loc;
485
486 io_req.bi_op = op;
487 io_req.bi_op_flags = op_flags;
488 io_req.mem.type = DM_IO_KMEM;
489 io_req.mem.ptr.addr = ic->sb;
490 io_req.notify.fn = NULL;
491 io_req.client = ic->io;
492 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
493 io_loc.sector = ic->start;
494 io_loc.count = SB_SECTORS;
495
496 if (op == REQ_OP_WRITE)
497 sb_set_version(ic);
498
499 return dm_io(&io_req, 1, &io_loc, NULL);
500}
501
502#define BITMAP_OP_TEST_ALL_SET 0
503#define BITMAP_OP_TEST_ALL_CLEAR 1
504#define BITMAP_OP_SET 2
505#define BITMAP_OP_CLEAR 3
506
507static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
508 sector_t sector, sector_t n_sectors, int mode)
509{
510 unsigned long bit, end_bit, this_end_bit, page, end_page;
511 unsigned long *data;
512
513 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
514 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
515 sector,
516 n_sectors,
517 ic->sb->log2_sectors_per_block,
518 ic->log2_blocks_per_bitmap_bit,
519 mode);
520 BUG();
521 }
522
523 if (unlikely(!n_sectors))
524 return true;
525
526 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
527 end_bit = (sector + n_sectors - 1) >>
528 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
529
530 page = bit / (PAGE_SIZE * 8);
531 bit %= PAGE_SIZE * 8;
532
533 end_page = end_bit / (PAGE_SIZE * 8);
534 end_bit %= PAGE_SIZE * 8;
535
536repeat:
537 if (page < end_page) {
538 this_end_bit = PAGE_SIZE * 8 - 1;
539 } else {
540 this_end_bit = end_bit;
541 }
542
543 data = lowmem_page_address(bitmap[page].page);
544
545 if (mode == BITMAP_OP_TEST_ALL_SET) {
546 while (bit <= this_end_bit) {
547 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
548 do {
549 if (data[bit / BITS_PER_LONG] != -1)
550 return false;
551 bit += BITS_PER_LONG;
552 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
553 continue;
554 }
555 if (!test_bit(bit, data))
556 return false;
557 bit++;
558 }
559 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
560 while (bit <= this_end_bit) {
561 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
562 do {
563 if (data[bit / BITS_PER_LONG] != 0)
564 return false;
565 bit += BITS_PER_LONG;
566 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
567 continue;
568 }
569 if (test_bit(bit, data))
570 return false;
571 bit++;
572 }
573 } else if (mode == BITMAP_OP_SET) {
574 while (bit <= this_end_bit) {
575 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
576 do {
577 data[bit / BITS_PER_LONG] = -1;
578 bit += BITS_PER_LONG;
579 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
580 continue;
581 }
582 __set_bit(bit, data);
583 bit++;
584 }
585 } else if (mode == BITMAP_OP_CLEAR) {
586 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
587 clear_page(data);
588 else while (bit <= this_end_bit) {
589 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
590 do {
591 data[bit / BITS_PER_LONG] = 0;
592 bit += BITS_PER_LONG;
593 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
594 continue;
595 }
596 __clear_bit(bit, data);
597 bit++;
598 }
599 } else {
600 BUG();
601 }
602
603 if (unlikely(page < end_page)) {
604 bit = 0;
605 page++;
606 goto repeat;
607 }
608
609 return true;
610}
611
612static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
613{
614 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
615 unsigned i;
616
617 for (i = 0; i < n_bitmap_pages; i++) {
618 unsigned long *dst_data = lowmem_page_address(dst[i].page);
619 unsigned long *src_data = lowmem_page_address(src[i].page);
620 copy_page(dst_data, src_data);
621 }
622}
623
624static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
625{
626 unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
627 unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
628
629 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
630 return &ic->bbs[bitmap_block];
631}
632
633static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
634 bool e, const char *function)
635{
636#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
637 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
638
639 if (unlikely(section >= ic->journal_sections) ||
640 unlikely(offset >= limit)) {
641 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
642 function, section, offset, ic->journal_sections, limit);
643 BUG();
644 }
645#endif
646}
647
648static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
649 unsigned *pl_index, unsigned *pl_offset)
650{
651 unsigned sector;
652
653 access_journal_check(ic, section, offset, false, "page_list_location");
654
655 sector = section * ic->journal_section_sectors + offset;
656
657 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
658 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
659}
660
661static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
662 unsigned section, unsigned offset, unsigned *n_sectors)
663{
664 unsigned pl_index, pl_offset;
665 char *va;
666
667 page_list_location(ic, section, offset, &pl_index, &pl_offset);
668
669 if (n_sectors)
670 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
671
672 va = lowmem_page_address(pl[pl_index].page);
673
674 return (struct journal_sector *)(va + pl_offset);
675}
676
677static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
678{
679 return access_page_list(ic, ic->journal, section, offset, NULL);
680}
681
682static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
683{
684 unsigned rel_sector, offset;
685 struct journal_sector *js;
686
687 access_journal_check(ic, section, n, true, "access_journal_entry");
688
689 rel_sector = n % JOURNAL_BLOCK_SECTORS;
690 offset = n / JOURNAL_BLOCK_SECTORS;
691
692 js = access_journal(ic, section, rel_sector);
693 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
694}
695
696static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
697{
698 n <<= ic->sb->log2_sectors_per_block;
699
700 n += JOURNAL_BLOCK_SECTORS;
701
702 access_journal_check(ic, section, n, false, "access_journal_data");
703
704 return access_journal(ic, section, n);
705}
706
707static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
708{
709 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
710 int r;
711 unsigned j, size;
712
713 desc->tfm = ic->journal_mac;
714
715 r = crypto_shash_init(desc);
716 if (unlikely(r)) {
717 dm_integrity_io_error(ic, "crypto_shash_init", r);
718 goto err;
719 }
720
721 for (j = 0; j < ic->journal_section_entries; j++) {
722 struct journal_entry *je = access_journal_entry(ic, section, j);
723 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
724 if (unlikely(r)) {
725 dm_integrity_io_error(ic, "crypto_shash_update", r);
726 goto err;
727 }
728 }
729
730 size = crypto_shash_digestsize(ic->journal_mac);
731
732 if (likely(size <= JOURNAL_MAC_SIZE)) {
733 r = crypto_shash_final(desc, result);
734 if (unlikely(r)) {
735 dm_integrity_io_error(ic, "crypto_shash_final", r);
736 goto err;
737 }
738 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
739 } else {
740 __u8 digest[HASH_MAX_DIGESTSIZE];
741
742 if (WARN_ON(size > sizeof(digest))) {
743 dm_integrity_io_error(ic, "digest_size", -EINVAL);
744 goto err;
745 }
746 r = crypto_shash_final(desc, digest);
747 if (unlikely(r)) {
748 dm_integrity_io_error(ic, "crypto_shash_final", r);
749 goto err;
750 }
751 memcpy(result, digest, JOURNAL_MAC_SIZE);
752 }
753
754 return;
755err:
756 memset(result, 0, JOURNAL_MAC_SIZE);
757}
758
759static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
760{
761 __u8 result[JOURNAL_MAC_SIZE];
762 unsigned j;
763
764 if (!ic->journal_mac)
765 return;
766
767 section_mac(ic, section, result);
768
769 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
770 struct journal_sector *js = access_journal(ic, section, j);
771
772 if (likely(wr))
773 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
774 else {
775 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
776 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
777 }
778 }
779}
780
781static void complete_journal_op(void *context)
782{
783 struct journal_completion *comp = context;
784 BUG_ON(!atomic_read(&comp->in_flight));
785 if (likely(atomic_dec_and_test(&comp->in_flight)))
786 complete(&comp->comp);
787}
788
789static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
790 unsigned n_sections, struct journal_completion *comp)
791{
792 struct async_submit_ctl submit;
793 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
794 unsigned pl_index, pl_offset, section_index;
795 struct page_list *source_pl, *target_pl;
796
797 if (likely(encrypt)) {
798 source_pl = ic->journal;
799 target_pl = ic->journal_io;
800 } else {
801 source_pl = ic->journal_io;
802 target_pl = ic->journal;
803 }
804
805 page_list_location(ic, section, 0, &pl_index, &pl_offset);
806
807 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
808
809 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
810
811 section_index = pl_index;
812
813 do {
814 size_t this_step;
815 struct page *src_pages[2];
816 struct page *dst_page;
817
818 while (unlikely(pl_index == section_index)) {
819 unsigned dummy;
820 if (likely(encrypt))
821 rw_section_mac(ic, section, true);
822 section++;
823 n_sections--;
824 if (!n_sections)
825 break;
826 page_list_location(ic, section, 0, §ion_index, &dummy);
827 }
828
829 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
830 dst_page = target_pl[pl_index].page;
831 src_pages[0] = source_pl[pl_index].page;
832 src_pages[1] = ic->journal_xor[pl_index].page;
833
834 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
835
836 pl_index++;
837 pl_offset = 0;
838 n_bytes -= this_step;
839 } while (n_bytes);
840
841 BUG_ON(n_sections);
842
843 async_tx_issue_pending_all();
844}
845
846static void complete_journal_encrypt(struct crypto_async_request *req, int err)
847{
848 struct journal_completion *comp = req->data;
849 if (unlikely(err)) {
850 if (likely(err == -EINPROGRESS)) {
851 complete(&comp->ic->crypto_backoff);
852 return;
853 }
854 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
855 }
856 complete_journal_op(comp);
857}
858
859static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
860{
861 int r;
862 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
863 complete_journal_encrypt, comp);
864 if (likely(encrypt))
865 r = crypto_skcipher_encrypt(req);
866 else
867 r = crypto_skcipher_decrypt(req);
868 if (likely(!r))
869 return false;
870 if (likely(r == -EINPROGRESS))
871 return true;
872 if (likely(r == -EBUSY)) {
873 wait_for_completion(&comp->ic->crypto_backoff);
874 reinit_completion(&comp->ic->crypto_backoff);
875 return true;
876 }
877 dm_integrity_io_error(comp->ic, "encrypt", r);
878 return false;
879}
880
881static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
882 unsigned n_sections, struct journal_completion *comp)
883{
884 struct scatterlist **source_sg;
885 struct scatterlist **target_sg;
886
887 atomic_add(2, &comp->in_flight);
888
889 if (likely(encrypt)) {
890 source_sg = ic->journal_scatterlist;
891 target_sg = ic->journal_io_scatterlist;
892 } else {
893 source_sg = ic->journal_io_scatterlist;
894 target_sg = ic->journal_scatterlist;
895 }
896
897 do {
898 struct skcipher_request *req;
899 unsigned ivsize;
900 char *iv;
901
902 if (likely(encrypt))
903 rw_section_mac(ic, section, true);
904
905 req = ic->sk_requests[section];
906 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
907 iv = req->iv;
908
909 memcpy(iv, iv + ivsize, ivsize);
910
911 req->src = source_sg[section];
912 req->dst = target_sg[section];
913
914 if (unlikely(do_crypt(encrypt, req, comp)))
915 atomic_inc(&comp->in_flight);
916
917 section++;
918 n_sections--;
919 } while (n_sections);
920
921 atomic_dec(&comp->in_flight);
922 complete_journal_op(comp);
923}
924
925static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
926 unsigned n_sections, struct journal_completion *comp)
927{
928 if (ic->journal_xor)
929 return xor_journal(ic, encrypt, section, n_sections, comp);
930 else
931 return crypt_journal(ic, encrypt, section, n_sections, comp);
932}
933
934static void complete_journal_io(unsigned long error, void *context)
935{
936 struct journal_completion *comp = context;
937 if (unlikely(error != 0))
938 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
939 complete_journal_op(comp);
940}
941
942static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
943 unsigned sector, unsigned n_sectors, struct journal_completion *comp)
944{
945 struct dm_io_request io_req;
946 struct dm_io_region io_loc;
947 unsigned pl_index, pl_offset;
948 int r;
949
950 if (unlikely(dm_integrity_failed(ic))) {
951 if (comp)
952 complete_journal_io(-1UL, comp);
953 return;
954 }
955
956 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
957 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
958
959 io_req.bi_op = op;
960 io_req.bi_op_flags = op_flags;
961 io_req.mem.type = DM_IO_PAGE_LIST;
962 if (ic->journal_io)
963 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
964 else
965 io_req.mem.ptr.pl = &ic->journal[pl_index];
966 io_req.mem.offset = pl_offset;
967 if (likely(comp != NULL)) {
968 io_req.notify.fn = complete_journal_io;
969 io_req.notify.context = comp;
970 } else {
971 io_req.notify.fn = NULL;
972 }
973 io_req.client = ic->io;
974 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
975 io_loc.sector = ic->start + SB_SECTORS + sector;
976 io_loc.count = n_sectors;
977
978 r = dm_io(&io_req, 1, &io_loc, NULL);
979 if (unlikely(r)) {
980 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
981 if (comp) {
982 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
983 complete_journal_io(-1UL, comp);
984 }
985 }
986}
987
988static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
989 unsigned n_sections, struct journal_completion *comp)
990{
991 unsigned sector, n_sectors;
992
993 sector = section * ic->journal_section_sectors;
994 n_sectors = n_sections * ic->journal_section_sectors;
995
996 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
997}
998
999static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1000{
1001 struct journal_completion io_comp;
1002 struct journal_completion crypt_comp_1;
1003 struct journal_completion crypt_comp_2;
1004 unsigned i;
1005
1006 io_comp.ic = ic;
1007 init_completion(&io_comp.comp);
1008
1009 if (commit_start + commit_sections <= ic->journal_sections) {
1010 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1011 if (ic->journal_io) {
1012 crypt_comp_1.ic = ic;
1013 init_completion(&crypt_comp_1.comp);
1014 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1015 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1016 wait_for_completion_io(&crypt_comp_1.comp);
1017 } else {
1018 for (i = 0; i < commit_sections; i++)
1019 rw_section_mac(ic, commit_start + i, true);
1020 }
1021 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1022 commit_sections, &io_comp);
1023 } else {
1024 unsigned to_end;
1025 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1026 to_end = ic->journal_sections - commit_start;
1027 if (ic->journal_io) {
1028 crypt_comp_1.ic = ic;
1029 init_completion(&crypt_comp_1.comp);
1030 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1031 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1032 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1033 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1034 reinit_completion(&crypt_comp_1.comp);
1035 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1036 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1037 wait_for_completion_io(&crypt_comp_1.comp);
1038 } else {
1039 crypt_comp_2.ic = ic;
1040 init_completion(&crypt_comp_2.comp);
1041 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1042 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1043 wait_for_completion_io(&crypt_comp_1.comp);
1044 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1045 wait_for_completion_io(&crypt_comp_2.comp);
1046 }
1047 } else {
1048 for (i = 0; i < to_end; i++)
1049 rw_section_mac(ic, commit_start + i, true);
1050 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1051 for (i = 0; i < commit_sections - to_end; i++)
1052 rw_section_mac(ic, i, true);
1053 }
1054 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1055 }
1056
1057 wait_for_completion_io(&io_comp.comp);
1058}
1059
1060static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1061 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1062{
1063 struct dm_io_request io_req;
1064 struct dm_io_region io_loc;
1065 int r;
1066 unsigned sector, pl_index, pl_offset;
1067
1068 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1069
1070 if (unlikely(dm_integrity_failed(ic))) {
1071 fn(-1UL, data);
1072 return;
1073 }
1074
1075 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1076
1077 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1078 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1079
1080 io_req.bi_op = REQ_OP_WRITE;
1081 io_req.bi_op_flags = 0;
1082 io_req.mem.type = DM_IO_PAGE_LIST;
1083 io_req.mem.ptr.pl = &ic->journal[pl_index];
1084 io_req.mem.offset = pl_offset;
1085 io_req.notify.fn = fn;
1086 io_req.notify.context = data;
1087 io_req.client = ic->io;
1088 io_loc.bdev = ic->dev->bdev;
1089 io_loc.sector = target;
1090 io_loc.count = n_sectors;
1091
1092 r = dm_io(&io_req, 1, &io_loc, NULL);
1093 if (unlikely(r)) {
1094 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1095 fn(-1UL, data);
1096 }
1097}
1098
1099static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1100{
1101 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1102 range1->logical_sector + range1->n_sectors > range2->logical_sector;
1103}
1104
1105static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1106{
1107 struct rb_node **n = &ic->in_progress.rb_node;
1108 struct rb_node *parent;
1109
1110 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1111
1112 if (likely(check_waiting)) {
1113 struct dm_integrity_range *range;
1114 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1115 if (unlikely(ranges_overlap(range, new_range)))
1116 return false;
1117 }
1118 }
1119
1120 parent = NULL;
1121
1122 while (*n) {
1123 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1124
1125 parent = *n;
1126 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1127 n = &range->node.rb_left;
1128 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1129 n = &range->node.rb_right;
1130 } else {
1131 return false;
1132 }
1133 }
1134
1135 rb_link_node(&new_range->node, parent, n);
1136 rb_insert_color(&new_range->node, &ic->in_progress);
1137
1138 return true;
1139}
1140
1141static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1142{
1143 rb_erase(&range->node, &ic->in_progress);
1144 while (unlikely(!list_empty(&ic->wait_list))) {
1145 struct dm_integrity_range *last_range =
1146 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1147 struct task_struct *last_range_task;
1148 last_range_task = last_range->task;
1149 list_del(&last_range->wait_entry);
1150 if (!add_new_range(ic, last_range, false)) {
1151 last_range->task = last_range_task;
1152 list_add(&last_range->wait_entry, &ic->wait_list);
1153 break;
1154 }
1155 last_range->waiting = false;
1156 wake_up_process(last_range_task);
1157 }
1158}
1159
1160static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1161{
1162 unsigned long flags;
1163
1164 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1165 remove_range_unlocked(ic, range);
1166 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1167}
1168
1169static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1170{
1171 new_range->waiting = true;
1172 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1173 new_range->task = current;
1174 do {
1175 __set_current_state(TASK_UNINTERRUPTIBLE);
1176 spin_unlock_irq(&ic->endio_wait.lock);
1177 io_schedule();
1178 spin_lock_irq(&ic->endio_wait.lock);
1179 } while (unlikely(new_range->waiting));
1180}
1181
1182static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1183{
1184 if (unlikely(!add_new_range(ic, new_range, true)))
1185 wait_and_add_new_range(ic, new_range);
1186}
1187
1188static void init_journal_node(struct journal_node *node)
1189{
1190 RB_CLEAR_NODE(&node->node);
1191 node->sector = (sector_t)-1;
1192}
1193
1194static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1195{
1196 struct rb_node **link;
1197 struct rb_node *parent;
1198
1199 node->sector = sector;
1200 BUG_ON(!RB_EMPTY_NODE(&node->node));
1201
1202 link = &ic->journal_tree_root.rb_node;
1203 parent = NULL;
1204
1205 while (*link) {
1206 struct journal_node *j;
1207 parent = *link;
1208 j = container_of(parent, struct journal_node, node);
1209 if (sector < j->sector)
1210 link = &j->node.rb_left;
1211 else
1212 link = &j->node.rb_right;
1213 }
1214
1215 rb_link_node(&node->node, parent, link);
1216 rb_insert_color(&node->node, &ic->journal_tree_root);
1217}
1218
1219static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1220{
1221 BUG_ON(RB_EMPTY_NODE(&node->node));
1222 rb_erase(&node->node, &ic->journal_tree_root);
1223 init_journal_node(node);
1224}
1225
1226#define NOT_FOUND (-1U)
1227
1228static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1229{
1230 struct rb_node *n = ic->journal_tree_root.rb_node;
1231 unsigned found = NOT_FOUND;
1232 *next_sector = (sector_t)-1;
1233 while (n) {
1234 struct journal_node *j = container_of(n, struct journal_node, node);
1235 if (sector == j->sector) {
1236 found = j - ic->journal_tree;
1237 }
1238 if (sector < j->sector) {
1239 *next_sector = j->sector;
1240 n = j->node.rb_left;
1241 } else {
1242 n = j->node.rb_right;
1243 }
1244 }
1245
1246 return found;
1247}
1248
1249static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1250{
1251 struct journal_node *node, *next_node;
1252 struct rb_node *next;
1253
1254 if (unlikely(pos >= ic->journal_entries))
1255 return false;
1256 node = &ic->journal_tree[pos];
1257 if (unlikely(RB_EMPTY_NODE(&node->node)))
1258 return false;
1259 if (unlikely(node->sector != sector))
1260 return false;
1261
1262 next = rb_next(&node->node);
1263 if (unlikely(!next))
1264 return true;
1265
1266 next_node = container_of(next, struct journal_node, node);
1267 return next_node->sector != sector;
1268}
1269
1270static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1271{
1272 struct rb_node *next;
1273 struct journal_node *next_node;
1274 unsigned next_section;
1275
1276 BUG_ON(RB_EMPTY_NODE(&node->node));
1277
1278 next = rb_next(&node->node);
1279 if (unlikely(!next))
1280 return false;
1281
1282 next_node = container_of(next, struct journal_node, node);
1283
1284 if (next_node->sector != node->sector)
1285 return false;
1286
1287 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1288 if (next_section >= ic->committed_section &&
1289 next_section < ic->committed_section + ic->n_committed_sections)
1290 return true;
1291 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1292 return true;
1293
1294 return false;
1295}
1296
1297#define TAG_READ 0
1298#define TAG_WRITE 1
1299#define TAG_CMP 2
1300
1301static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1302 unsigned *metadata_offset, unsigned total_size, int op)
1303{
1304#define MAY_BE_FILLER 1
1305#define MAY_BE_HASH 2
1306 unsigned hash_offset = 0;
1307 unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1308
1309 do {
1310 unsigned char *data, *dp;
1311 struct dm_buffer *b;
1312 unsigned to_copy;
1313 int r;
1314
1315 r = dm_integrity_failed(ic);
1316 if (unlikely(r))
1317 return r;
1318
1319 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1320 if (IS_ERR(data))
1321 return PTR_ERR(data);
1322
1323 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1324 dp = data + *metadata_offset;
1325 if (op == TAG_READ) {
1326 memcpy(tag, dp, to_copy);
1327 } else if (op == TAG_WRITE) {
1328 memcpy(dp, tag, to_copy);
1329 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1330 } else {
1331 /* e.g.: op == TAG_CMP */
1332
1333 if (likely(is_power_of_2(ic->tag_size))) {
1334 if (unlikely(memcmp(dp, tag, to_copy)))
1335 if (unlikely(!ic->discard) ||
1336 unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1337 goto thorough_test;
1338 }
1339 } else {
1340 unsigned i, ts;
1341thorough_test:
1342 ts = total_size;
1343
1344 for (i = 0; i < to_copy; i++, ts--) {
1345 if (unlikely(dp[i] != tag[i]))
1346 may_be &= ~MAY_BE_HASH;
1347 if (likely(dp[i] != DISCARD_FILLER))
1348 may_be &= ~MAY_BE_FILLER;
1349 hash_offset++;
1350 if (unlikely(hash_offset == ic->tag_size)) {
1351 if (unlikely(!may_be)) {
1352 dm_bufio_release(b);
1353 return ts;
1354 }
1355 hash_offset = 0;
1356 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1357 }
1358 }
1359 }
1360 }
1361 dm_bufio_release(b);
1362
1363 tag += to_copy;
1364 *metadata_offset += to_copy;
1365 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1366 (*metadata_block)++;
1367 *metadata_offset = 0;
1368 }
1369
1370 if (unlikely(!is_power_of_2(ic->tag_size))) {
1371 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1372 }
1373
1374 total_size -= to_copy;
1375 } while (unlikely(total_size));
1376
1377 return 0;
1378#undef MAY_BE_FILLER
1379#undef MAY_BE_HASH
1380}
1381
1382static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1383{
1384 int r;
1385 r = dm_bufio_write_dirty_buffers(ic->bufio);
1386 if (unlikely(r))
1387 dm_integrity_io_error(ic, "writing tags", r);
1388}
1389
1390static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1391{
1392 DECLARE_WAITQUEUE(wait, current);
1393 __add_wait_queue(&ic->endio_wait, &wait);
1394 __set_current_state(TASK_UNINTERRUPTIBLE);
1395 spin_unlock_irq(&ic->endio_wait.lock);
1396 io_schedule();
1397 spin_lock_irq(&ic->endio_wait.lock);
1398 __remove_wait_queue(&ic->endio_wait, &wait);
1399}
1400
1401static void autocommit_fn(struct timer_list *t)
1402{
1403 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1404
1405 if (likely(!dm_integrity_failed(ic)))
1406 queue_work(ic->commit_wq, &ic->commit_work);
1407}
1408
1409static void schedule_autocommit(struct dm_integrity_c *ic)
1410{
1411 if (!timer_pending(&ic->autocommit_timer))
1412 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1413}
1414
1415static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1416{
1417 struct bio *bio;
1418 unsigned long flags;
1419
1420 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1421 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1422 bio_list_add(&ic->flush_bio_list, bio);
1423 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1424
1425 queue_work(ic->commit_wq, &ic->commit_work);
1426}
1427
1428static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1429{
1430 int r = dm_integrity_failed(ic);
1431 if (unlikely(r) && !bio->bi_status)
1432 bio->bi_status = errno_to_blk_status(r);
1433 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1434 unsigned long flags;
1435 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1436 bio_list_add(&ic->synchronous_bios, bio);
1437 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1438 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1439 return;
1440 }
1441 bio_endio(bio);
1442}
1443
1444static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1445{
1446 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1447
1448 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1449 submit_flush_bio(ic, dio);
1450 else
1451 do_endio(ic, bio);
1452}
1453
1454static void dec_in_flight(struct dm_integrity_io *dio)
1455{
1456 if (atomic_dec_and_test(&dio->in_flight)) {
1457 struct dm_integrity_c *ic = dio->ic;
1458 struct bio *bio;
1459
1460 remove_range(ic, &dio->range);
1461
1462 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1463 schedule_autocommit(ic);
1464
1465 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1466
1467 if (unlikely(dio->bi_status) && !bio->bi_status)
1468 bio->bi_status = dio->bi_status;
1469 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1470 dio->range.logical_sector += dio->range.n_sectors;
1471 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1472 INIT_WORK(&dio->work, integrity_bio_wait);
1473 queue_work(ic->offload_wq, &dio->work);
1474 return;
1475 }
1476 do_endio_flush(ic, dio);
1477 }
1478}
1479
1480static void integrity_end_io(struct bio *bio)
1481{
1482 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1483
1484 dm_bio_restore(&dio->bio_details, bio);
1485 if (bio->bi_integrity)
1486 bio->bi_opf |= REQ_INTEGRITY;
1487
1488 if (dio->completion)
1489 complete(dio->completion);
1490
1491 dec_in_flight(dio);
1492}
1493
1494static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1495 const char *data, char *result)
1496{
1497 __u64 sector_le = cpu_to_le64(sector);
1498 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1499 int r;
1500 unsigned digest_size;
1501
1502 req->tfm = ic->internal_hash;
1503
1504 r = crypto_shash_init(req);
1505 if (unlikely(r < 0)) {
1506 dm_integrity_io_error(ic, "crypto_shash_init", r);
1507 goto failed;
1508 }
1509
1510 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1511 if (unlikely(r < 0)) {
1512 dm_integrity_io_error(ic, "crypto_shash_update", r);
1513 goto failed;
1514 }
1515
1516 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1517 if (unlikely(r < 0)) {
1518 dm_integrity_io_error(ic, "crypto_shash_update", r);
1519 goto failed;
1520 }
1521
1522 r = crypto_shash_final(req, result);
1523 if (unlikely(r < 0)) {
1524 dm_integrity_io_error(ic, "crypto_shash_final", r);
1525 goto failed;
1526 }
1527
1528 digest_size = crypto_shash_digestsize(ic->internal_hash);
1529 if (unlikely(digest_size < ic->tag_size))
1530 memset(result + digest_size, 0, ic->tag_size - digest_size);
1531
1532 return;
1533
1534failed:
1535 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1536 get_random_bytes(result, ic->tag_size);
1537}
1538
1539static void integrity_metadata(struct work_struct *w)
1540{
1541 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1542 struct dm_integrity_c *ic = dio->ic;
1543
1544 int r;
1545
1546 if (ic->internal_hash) {
1547 struct bvec_iter iter;
1548 struct bio_vec bv;
1549 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1550 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1551 char *checksums;
1552 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1553 char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1554 sector_t sector;
1555 unsigned sectors_to_process;
1556
1557 if (unlikely(ic->mode == 'R'))
1558 goto skip_io;
1559
1560 if (likely(dio->op != REQ_OP_DISCARD))
1561 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1562 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1563 else
1564 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1565 if (!checksums) {
1566 checksums = checksums_onstack;
1567 if (WARN_ON(extra_space &&
1568 digest_size > sizeof(checksums_onstack))) {
1569 r = -EINVAL;
1570 goto error;
1571 }
1572 }
1573
1574 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1575 sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1576 unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1577 unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1578 unsigned max_blocks = max_size / ic->tag_size;
1579 memset(checksums, DISCARD_FILLER, max_size);
1580
1581 while (bi_size) {
1582 unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1583 this_step_blocks = min(this_step_blocks, max_blocks);
1584 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1585 this_step_blocks * ic->tag_size, TAG_WRITE);
1586 if (unlikely(r)) {
1587 if (likely(checksums != checksums_onstack))
1588 kfree(checksums);
1589 goto error;
1590 }
1591
1592 /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1593 printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1594 printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1595 BUG();
1596 }*/
1597 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1598 bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1599 }
1600
1601 if (likely(checksums != checksums_onstack))
1602 kfree(checksums);
1603 goto skip_io;
1604 }
1605
1606 sector = dio->range.logical_sector;
1607 sectors_to_process = dio->range.n_sectors;
1608
1609 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1610 unsigned pos;
1611 char *mem, *checksums_ptr;
1612
1613again:
1614 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1615 pos = 0;
1616 checksums_ptr = checksums;
1617 do {
1618 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1619 checksums_ptr += ic->tag_size;
1620 sectors_to_process -= ic->sectors_per_block;
1621 pos += ic->sectors_per_block << SECTOR_SHIFT;
1622 sector += ic->sectors_per_block;
1623 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1624 kunmap_atomic(mem);
1625
1626 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1627 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1628 if (unlikely(r)) {
1629 if (r > 0) {
1630 char b[BDEVNAME_SIZE];
1631 DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1632 (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1633 r = -EILSEQ;
1634 atomic64_inc(&ic->number_of_mismatches);
1635 }
1636 if (likely(checksums != checksums_onstack))
1637 kfree(checksums);
1638 goto error;
1639 }
1640
1641 if (!sectors_to_process)
1642 break;
1643
1644 if (unlikely(pos < bv.bv_len)) {
1645 bv.bv_offset += pos;
1646 bv.bv_len -= pos;
1647 goto again;
1648 }
1649 }
1650
1651 if (likely(checksums != checksums_onstack))
1652 kfree(checksums);
1653 } else {
1654 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1655
1656 if (bip) {
1657 struct bio_vec biv;
1658 struct bvec_iter iter;
1659 unsigned data_to_process = dio->range.n_sectors;
1660 sector_to_block(ic, data_to_process);
1661 data_to_process *= ic->tag_size;
1662
1663 bip_for_each_vec(biv, bip, iter) {
1664 unsigned char *tag;
1665 unsigned this_len;
1666
1667 BUG_ON(PageHighMem(biv.bv_page));
1668 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1669 this_len = min(biv.bv_len, data_to_process);
1670 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1671 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1672 if (unlikely(r))
1673 goto error;
1674 data_to_process -= this_len;
1675 if (!data_to_process)
1676 break;
1677 }
1678 }
1679 }
1680skip_io:
1681 dec_in_flight(dio);
1682 return;
1683error:
1684 dio->bi_status = errno_to_blk_status(r);
1685 dec_in_flight(dio);
1686}
1687
1688static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1689{
1690 struct dm_integrity_c *ic = ti->private;
1691 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1692 struct bio_integrity_payload *bip;
1693
1694 sector_t area, offset;
1695
1696 dio->ic = ic;
1697 dio->bi_status = 0;
1698 dio->op = bio_op(bio);
1699
1700 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1701 if (ti->max_io_len) {
1702 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1703 unsigned log2_max_io_len = __fls(ti->max_io_len);
1704 sector_t start_boundary = sec >> log2_max_io_len;
1705 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1706 if (start_boundary < end_boundary) {
1707 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1708 dm_accept_partial_bio(bio, len);
1709 }
1710 }
1711 }
1712
1713 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1714 submit_flush_bio(ic, dio);
1715 return DM_MAPIO_SUBMITTED;
1716 }
1717
1718 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1719 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1720 if (unlikely(dio->fua)) {
1721 /*
1722 * Don't pass down the FUA flag because we have to flush
1723 * disk cache anyway.
1724 */
1725 bio->bi_opf &= ~REQ_FUA;
1726 }
1727 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1728 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1729 dio->range.logical_sector, bio_sectors(bio),
1730 ic->provided_data_sectors);
1731 return DM_MAPIO_KILL;
1732 }
1733 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1734 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1735 ic->sectors_per_block,
1736 dio->range.logical_sector, bio_sectors(bio));
1737 return DM_MAPIO_KILL;
1738 }
1739
1740 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1741 struct bvec_iter iter;
1742 struct bio_vec bv;
1743 bio_for_each_segment(bv, bio, iter) {
1744 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1745 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1746 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1747 return DM_MAPIO_KILL;
1748 }
1749 }
1750 }
1751
1752 bip = bio_integrity(bio);
1753 if (!ic->internal_hash) {
1754 if (bip) {
1755 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1756 if (ic->log2_tag_size >= 0)
1757 wanted_tag_size <<= ic->log2_tag_size;
1758 else
1759 wanted_tag_size *= ic->tag_size;
1760 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1761 DMERR("Invalid integrity data size %u, expected %u",
1762 bip->bip_iter.bi_size, wanted_tag_size);
1763 return DM_MAPIO_KILL;
1764 }
1765 }
1766 } else {
1767 if (unlikely(bip != NULL)) {
1768 DMERR("Unexpected integrity data when using internal hash");
1769 return DM_MAPIO_KILL;
1770 }
1771 }
1772
1773 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1774 return DM_MAPIO_KILL;
1775
1776 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1777 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1778 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1779
1780 dm_integrity_map_continue(dio, true);
1781 return DM_MAPIO_SUBMITTED;
1782}
1783
1784static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1785 unsigned journal_section, unsigned journal_entry)
1786{
1787 struct dm_integrity_c *ic = dio->ic;
1788 sector_t logical_sector;
1789 unsigned n_sectors;
1790
1791 logical_sector = dio->range.logical_sector;
1792 n_sectors = dio->range.n_sectors;
1793 do {
1794 struct bio_vec bv = bio_iovec(bio);
1795 char *mem;
1796
1797 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1798 bv.bv_len = n_sectors << SECTOR_SHIFT;
1799 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1800 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1801retry_kmap:
1802 mem = kmap_atomic(bv.bv_page);
1803 if (likely(dio->op == REQ_OP_WRITE))
1804 flush_dcache_page(bv.bv_page);
1805
1806 do {
1807 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1808
1809 if (unlikely(dio->op == REQ_OP_READ)) {
1810 struct journal_sector *js;
1811 char *mem_ptr;
1812 unsigned s;
1813
1814 if (unlikely(journal_entry_is_inprogress(je))) {
1815 flush_dcache_page(bv.bv_page);
1816 kunmap_atomic(mem);
1817
1818 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1819 goto retry_kmap;
1820 }
1821 smp_rmb();
1822 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1823 js = access_journal_data(ic, journal_section, journal_entry);
1824 mem_ptr = mem + bv.bv_offset;
1825 s = 0;
1826 do {
1827 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1828 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1829 js++;
1830 mem_ptr += 1 << SECTOR_SHIFT;
1831 } while (++s < ic->sectors_per_block);
1832#ifdef INTERNAL_VERIFY
1833 if (ic->internal_hash) {
1834 char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1835
1836 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1837 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1838 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1839 logical_sector);
1840 }
1841 }
1842#endif
1843 }
1844
1845 if (!ic->internal_hash) {
1846 struct bio_integrity_payload *bip = bio_integrity(bio);
1847 unsigned tag_todo = ic->tag_size;
1848 char *tag_ptr = journal_entry_tag(ic, je);
1849
1850 if (bip) do {
1851 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1852 unsigned tag_now = min(biv.bv_len, tag_todo);
1853 char *tag_addr;
1854 BUG_ON(PageHighMem(biv.bv_page));
1855 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1856 if (likely(dio->op == REQ_OP_WRITE))
1857 memcpy(tag_ptr, tag_addr, tag_now);
1858 else
1859 memcpy(tag_addr, tag_ptr, tag_now);
1860 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1861 tag_ptr += tag_now;
1862 tag_todo -= tag_now;
1863 } while (unlikely(tag_todo)); else {
1864 if (likely(dio->op == REQ_OP_WRITE))
1865 memset(tag_ptr, 0, tag_todo);
1866 }
1867 }
1868
1869 if (likely(dio->op == REQ_OP_WRITE)) {
1870 struct journal_sector *js;
1871 unsigned s;
1872
1873 js = access_journal_data(ic, journal_section, journal_entry);
1874 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1875
1876 s = 0;
1877 do {
1878 je->last_bytes[s] = js[s].commit_id;
1879 } while (++s < ic->sectors_per_block);
1880
1881 if (ic->internal_hash) {
1882 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1883 if (unlikely(digest_size > ic->tag_size)) {
1884 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1885 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1886 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1887 } else
1888 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1889 }
1890
1891 journal_entry_set_sector(je, logical_sector);
1892 }
1893 logical_sector += ic->sectors_per_block;
1894
1895 journal_entry++;
1896 if (unlikely(journal_entry == ic->journal_section_entries)) {
1897 journal_entry = 0;
1898 journal_section++;
1899 wraparound_section(ic, &journal_section);
1900 }
1901
1902 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1903 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1904
1905 if (unlikely(dio->op == REQ_OP_READ))
1906 flush_dcache_page(bv.bv_page);
1907 kunmap_atomic(mem);
1908 } while (n_sectors);
1909
1910 if (likely(dio->op == REQ_OP_WRITE)) {
1911 smp_mb();
1912 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1913 wake_up(&ic->copy_to_journal_wait);
1914 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1915 queue_work(ic->commit_wq, &ic->commit_work);
1916 } else {
1917 schedule_autocommit(ic);
1918 }
1919 } else {
1920 remove_range(ic, &dio->range);
1921 }
1922
1923 if (unlikely(bio->bi_iter.bi_size)) {
1924 sector_t area, offset;
1925
1926 dio->range.logical_sector = logical_sector;
1927 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1928 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1929 return true;
1930 }
1931
1932 return false;
1933}
1934
1935static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1936{
1937 struct dm_integrity_c *ic = dio->ic;
1938 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1939 unsigned journal_section, journal_entry;
1940 unsigned journal_read_pos;
1941 struct completion read_comp;
1942 bool discard_retried = false;
1943 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
1944 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
1945 need_sync_io = true;
1946
1947 if (need_sync_io && from_map) {
1948 INIT_WORK(&dio->work, integrity_bio_wait);
1949 queue_work(ic->offload_wq, &dio->work);
1950 return;
1951 }
1952
1953lock_retry:
1954 spin_lock_irq(&ic->endio_wait.lock);
1955retry:
1956 if (unlikely(dm_integrity_failed(ic))) {
1957 spin_unlock_irq(&ic->endio_wait.lock);
1958 do_endio(ic, bio);
1959 return;
1960 }
1961 dio->range.n_sectors = bio_sectors(bio);
1962 journal_read_pos = NOT_FOUND;
1963 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
1964 if (dio->op == REQ_OP_WRITE) {
1965 unsigned next_entry, i, pos;
1966 unsigned ws, we, range_sectors;
1967
1968 dio->range.n_sectors = min(dio->range.n_sectors,
1969 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1970 if (unlikely(!dio->range.n_sectors)) {
1971 if (from_map)
1972 goto offload_to_thread;
1973 sleep_on_endio_wait(ic);
1974 goto retry;
1975 }
1976 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1977 ic->free_sectors -= range_sectors;
1978 journal_section = ic->free_section;
1979 journal_entry = ic->free_section_entry;
1980
1981 next_entry = ic->free_section_entry + range_sectors;
1982 ic->free_section_entry = next_entry % ic->journal_section_entries;
1983 ic->free_section += next_entry / ic->journal_section_entries;
1984 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1985 wraparound_section(ic, &ic->free_section);
1986
1987 pos = journal_section * ic->journal_section_entries + journal_entry;
1988 ws = journal_section;
1989 we = journal_entry;
1990 i = 0;
1991 do {
1992 struct journal_entry *je;
1993
1994 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1995 pos++;
1996 if (unlikely(pos >= ic->journal_entries))
1997 pos = 0;
1998
1999 je = access_journal_entry(ic, ws, we);
2000 BUG_ON(!journal_entry_is_unused(je));
2001 journal_entry_set_inprogress(je);
2002 we++;
2003 if (unlikely(we == ic->journal_section_entries)) {
2004 we = 0;
2005 ws++;
2006 wraparound_section(ic, &ws);
2007 }
2008 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2009
2010 spin_unlock_irq(&ic->endio_wait.lock);
2011 goto journal_read_write;
2012 } else {
2013 sector_t next_sector;
2014 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2015 if (likely(journal_read_pos == NOT_FOUND)) {
2016 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2017 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2018 } else {
2019 unsigned i;
2020 unsigned jp = journal_read_pos + 1;
2021 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2022 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2023 break;
2024 }
2025 dio->range.n_sectors = i;
2026 }
2027 }
2028 }
2029 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2030 /*
2031 * We must not sleep in the request routine because it could
2032 * stall bios on current->bio_list.
2033 * So, we offload the bio to a workqueue if we have to sleep.
2034 */
2035 if (from_map) {
2036offload_to_thread:
2037 spin_unlock_irq(&ic->endio_wait.lock);
2038 INIT_WORK(&dio->work, integrity_bio_wait);
2039 queue_work(ic->wait_wq, &dio->work);
2040 return;
2041 }
2042 if (journal_read_pos != NOT_FOUND)
2043 dio->range.n_sectors = ic->sectors_per_block;
2044 wait_and_add_new_range(ic, &dio->range);
2045 /*
2046 * wait_and_add_new_range drops the spinlock, so the journal
2047 * may have been changed arbitrarily. We need to recheck.
2048 * To simplify the code, we restrict I/O size to just one block.
2049 */
2050 if (journal_read_pos != NOT_FOUND) {
2051 sector_t next_sector;
2052 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2053 if (unlikely(new_pos != journal_read_pos)) {
2054 remove_range_unlocked(ic, &dio->range);
2055 goto retry;
2056 }
2057 }
2058 }
2059 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2060 sector_t next_sector;
2061 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2062 if (unlikely(new_pos != NOT_FOUND) ||
2063 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2064 remove_range_unlocked(ic, &dio->range);
2065 spin_unlock_irq(&ic->endio_wait.lock);
2066 queue_work(ic->commit_wq, &ic->commit_work);
2067 flush_workqueue(ic->commit_wq);
2068 queue_work(ic->writer_wq, &ic->writer_work);
2069 flush_workqueue(ic->writer_wq);
2070 discard_retried = true;
2071 goto lock_retry;
2072 }
2073 }
2074 spin_unlock_irq(&ic->endio_wait.lock);
2075
2076 if (unlikely(journal_read_pos != NOT_FOUND)) {
2077 journal_section = journal_read_pos / ic->journal_section_entries;
2078 journal_entry = journal_read_pos % ic->journal_section_entries;
2079 goto journal_read_write;
2080 }
2081
2082 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2083 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2084 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2085 struct bitmap_block_status *bbs;
2086
2087 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2088 spin_lock(&bbs->bio_queue_lock);
2089 bio_list_add(&bbs->bio_queue, bio);
2090 spin_unlock(&bbs->bio_queue_lock);
2091 queue_work(ic->writer_wq, &bbs->work);
2092 return;
2093 }
2094 }
2095
2096 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2097
2098 if (need_sync_io) {
2099 init_completion(&read_comp);
2100 dio->completion = &read_comp;
2101 } else
2102 dio->completion = NULL;
2103
2104 dm_bio_record(&dio->bio_details, bio);
2105 bio_set_dev(bio, ic->dev->bdev);
2106 bio->bi_integrity = NULL;
2107 bio->bi_opf &= ~REQ_INTEGRITY;
2108 bio->bi_end_io = integrity_end_io;
2109 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2110
2111 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2112 integrity_metadata(&dio->work);
2113 dm_integrity_flush_buffers(ic);
2114
2115 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2116 dio->completion = NULL;
2117
2118 submit_bio_noacct(bio);
2119
2120 return;
2121 }
2122
2123 submit_bio_noacct(bio);
2124
2125 if (need_sync_io) {
2126 wait_for_completion_io(&read_comp);
2127 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2128 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2129 goto skip_check;
2130 if (ic->mode == 'B') {
2131 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2132 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2133 goto skip_check;
2134 }
2135
2136 if (likely(!bio->bi_status))
2137 integrity_metadata(&dio->work);
2138 else
2139skip_check:
2140 dec_in_flight(dio);
2141
2142 } else {
2143 INIT_WORK(&dio->work, integrity_metadata);
2144 queue_work(ic->metadata_wq, &dio->work);
2145 }
2146
2147 return;
2148
2149journal_read_write:
2150 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2151 goto lock_retry;
2152
2153 do_endio_flush(ic, dio);
2154}
2155
2156
2157static void integrity_bio_wait(struct work_struct *w)
2158{
2159 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2160
2161 dm_integrity_map_continue(dio, false);
2162}
2163
2164static void pad_uncommitted(struct dm_integrity_c *ic)
2165{
2166 if (ic->free_section_entry) {
2167 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2168 ic->free_section_entry = 0;
2169 ic->free_section++;
2170 wraparound_section(ic, &ic->free_section);
2171 ic->n_uncommitted_sections++;
2172 }
2173 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2174 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2175 ic->journal_section_entries + ic->free_sectors)) {
2176 DMCRIT("journal_sections %u, journal_section_entries %u, "
2177 "n_uncommitted_sections %u, n_committed_sections %u, "
2178 "journal_section_entries %u, free_sectors %u",
2179 ic->journal_sections, ic->journal_section_entries,
2180 ic->n_uncommitted_sections, ic->n_committed_sections,
2181 ic->journal_section_entries, ic->free_sectors);
2182 }
2183}
2184
2185static void integrity_commit(struct work_struct *w)
2186{
2187 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2188 unsigned commit_start, commit_sections;
2189 unsigned i, j, n;
2190 struct bio *flushes;
2191
2192 del_timer(&ic->autocommit_timer);
2193
2194 spin_lock_irq(&ic->endio_wait.lock);
2195 flushes = bio_list_get(&ic->flush_bio_list);
2196 if (unlikely(ic->mode != 'J')) {
2197 spin_unlock_irq(&ic->endio_wait.lock);
2198 dm_integrity_flush_buffers(ic);
2199 goto release_flush_bios;
2200 }
2201
2202 pad_uncommitted(ic);
2203 commit_start = ic->uncommitted_section;
2204 commit_sections = ic->n_uncommitted_sections;
2205 spin_unlock_irq(&ic->endio_wait.lock);
2206
2207 if (!commit_sections)
2208 goto release_flush_bios;
2209
2210 i = commit_start;
2211 for (n = 0; n < commit_sections; n++) {
2212 for (j = 0; j < ic->journal_section_entries; j++) {
2213 struct journal_entry *je;
2214 je = access_journal_entry(ic, i, j);
2215 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2216 }
2217 for (j = 0; j < ic->journal_section_sectors; j++) {
2218 struct journal_sector *js;
2219 js = access_journal(ic, i, j);
2220 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2221 }
2222 i++;
2223 if (unlikely(i >= ic->journal_sections))
2224 ic->commit_seq = next_commit_seq(ic->commit_seq);
2225 wraparound_section(ic, &i);
2226 }
2227 smp_rmb();
2228
2229 write_journal(ic, commit_start, commit_sections);
2230
2231 spin_lock_irq(&ic->endio_wait.lock);
2232 ic->uncommitted_section += commit_sections;
2233 wraparound_section(ic, &ic->uncommitted_section);
2234 ic->n_uncommitted_sections -= commit_sections;
2235 ic->n_committed_sections += commit_sections;
2236 spin_unlock_irq(&ic->endio_wait.lock);
2237
2238 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2239 queue_work(ic->writer_wq, &ic->writer_work);
2240
2241release_flush_bios:
2242 while (flushes) {
2243 struct bio *next = flushes->bi_next;
2244 flushes->bi_next = NULL;
2245 do_endio(ic, flushes);
2246 flushes = next;
2247 }
2248}
2249
2250static void complete_copy_from_journal(unsigned long error, void *context)
2251{
2252 struct journal_io *io = context;
2253 struct journal_completion *comp = io->comp;
2254 struct dm_integrity_c *ic = comp->ic;
2255 remove_range(ic, &io->range);
2256 mempool_free(io, &ic->journal_io_mempool);
2257 if (unlikely(error != 0))
2258 dm_integrity_io_error(ic, "copying from journal", -EIO);
2259 complete_journal_op(comp);
2260}
2261
2262static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2263 struct journal_entry *je)
2264{
2265 unsigned s = 0;
2266 do {
2267 js->commit_id = je->last_bytes[s];
2268 js++;
2269 } while (++s < ic->sectors_per_block);
2270}
2271
2272static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2273 unsigned write_sections, bool from_replay)
2274{
2275 unsigned i, j, n;
2276 struct journal_completion comp;
2277 struct blk_plug plug;
2278
2279 blk_start_plug(&plug);
2280
2281 comp.ic = ic;
2282 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2283 init_completion(&comp.comp);
2284
2285 i = write_start;
2286 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2287#ifndef INTERNAL_VERIFY
2288 if (unlikely(from_replay))
2289#endif
2290 rw_section_mac(ic, i, false);
2291 for (j = 0; j < ic->journal_section_entries; j++) {
2292 struct journal_entry *je = access_journal_entry(ic, i, j);
2293 sector_t sec, area, offset;
2294 unsigned k, l, next_loop;
2295 sector_t metadata_block;
2296 unsigned metadata_offset;
2297 struct journal_io *io;
2298
2299 if (journal_entry_is_unused(je))
2300 continue;
2301 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2302 sec = journal_entry_get_sector(je);
2303 if (unlikely(from_replay)) {
2304 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2305 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2306 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2307 }
2308 }
2309 if (unlikely(sec >= ic->provided_data_sectors))
2310 continue;
2311 get_area_and_offset(ic, sec, &area, &offset);
2312 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2313 for (k = j + 1; k < ic->journal_section_entries; k++) {
2314 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2315 sector_t sec2, area2, offset2;
2316 if (journal_entry_is_unused(je2))
2317 break;
2318 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2319 sec2 = journal_entry_get_sector(je2);
2320 if (unlikely(sec2 >= ic->provided_data_sectors))
2321 break;
2322 get_area_and_offset(ic, sec2, &area2, &offset2);
2323 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2324 break;
2325 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2326 }
2327 next_loop = k - 1;
2328
2329 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2330 io->comp = ∁
2331 io->range.logical_sector = sec;
2332 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2333
2334 spin_lock_irq(&ic->endio_wait.lock);
2335 add_new_range_and_wait(ic, &io->range);
2336
2337 if (likely(!from_replay)) {
2338 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2339
2340 /* don't write if there is newer committed sector */
2341 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2342 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2343
2344 journal_entry_set_unused(je2);
2345 remove_journal_node(ic, §ion_node[j]);
2346 j++;
2347 sec += ic->sectors_per_block;
2348 offset += ic->sectors_per_block;
2349 }
2350 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2351 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2352
2353 journal_entry_set_unused(je2);
2354 remove_journal_node(ic, §ion_node[k - 1]);
2355 k--;
2356 }
2357 if (j == k) {
2358 remove_range_unlocked(ic, &io->range);
2359 spin_unlock_irq(&ic->endio_wait.lock);
2360 mempool_free(io, &ic->journal_io_mempool);
2361 goto skip_io;
2362 }
2363 for (l = j; l < k; l++) {
2364 remove_journal_node(ic, §ion_node[l]);
2365 }
2366 }
2367 spin_unlock_irq(&ic->endio_wait.lock);
2368
2369 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2370 for (l = j; l < k; l++) {
2371 int r;
2372 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2373
2374 if (
2375#ifndef INTERNAL_VERIFY
2376 unlikely(from_replay) &&
2377#endif
2378 ic->internal_hash) {
2379 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2380
2381 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2382 (char *)access_journal_data(ic, i, l), test_tag);
2383 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2384 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2385 }
2386
2387 journal_entry_set_unused(je2);
2388 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2389 ic->tag_size, TAG_WRITE);
2390 if (unlikely(r)) {
2391 dm_integrity_io_error(ic, "reading tags", r);
2392 }
2393 }
2394
2395 atomic_inc(&comp.in_flight);
2396 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2397 (k - j) << ic->sb->log2_sectors_per_block,
2398 get_data_sector(ic, area, offset),
2399 complete_copy_from_journal, io);
2400skip_io:
2401 j = next_loop;
2402 }
2403 }
2404
2405 dm_bufio_write_dirty_buffers_async(ic->bufio);
2406
2407 blk_finish_plug(&plug);
2408
2409 complete_journal_op(&comp);
2410 wait_for_completion_io(&comp.comp);
2411
2412 dm_integrity_flush_buffers(ic);
2413}
2414
2415static void integrity_writer(struct work_struct *w)
2416{
2417 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2418 unsigned write_start, write_sections;
2419
2420 unsigned prev_free_sectors;
2421
2422 /* the following test is not needed, but it tests the replay code */
2423 if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2424 return;
2425
2426 spin_lock_irq(&ic->endio_wait.lock);
2427 write_start = ic->committed_section;
2428 write_sections = ic->n_committed_sections;
2429 spin_unlock_irq(&ic->endio_wait.lock);
2430
2431 if (!write_sections)
2432 return;
2433
2434 do_journal_write(ic, write_start, write_sections, false);
2435
2436 spin_lock_irq(&ic->endio_wait.lock);
2437
2438 ic->committed_section += write_sections;
2439 wraparound_section(ic, &ic->committed_section);
2440 ic->n_committed_sections -= write_sections;
2441
2442 prev_free_sectors = ic->free_sectors;
2443 ic->free_sectors += write_sections * ic->journal_section_entries;
2444 if (unlikely(!prev_free_sectors))
2445 wake_up_locked(&ic->endio_wait);
2446
2447 spin_unlock_irq(&ic->endio_wait.lock);
2448}
2449
2450static void recalc_write_super(struct dm_integrity_c *ic)
2451{
2452 int r;
2453
2454 dm_integrity_flush_buffers(ic);
2455 if (dm_integrity_failed(ic))
2456 return;
2457
2458 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2459 if (unlikely(r))
2460 dm_integrity_io_error(ic, "writing superblock", r);
2461}
2462
2463static void integrity_recalc(struct work_struct *w)
2464{
2465 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2466 struct dm_integrity_range range;
2467 struct dm_io_request io_req;
2468 struct dm_io_region io_loc;
2469 sector_t area, offset;
2470 sector_t metadata_block;
2471 unsigned metadata_offset;
2472 sector_t logical_sector, n_sectors;
2473 __u8 *t;
2474 unsigned i;
2475 int r;
2476 unsigned super_counter = 0;
2477
2478 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2479
2480 spin_lock_irq(&ic->endio_wait.lock);
2481
2482next_chunk:
2483
2484 if (unlikely(dm_post_suspending(ic->ti)))
2485 goto unlock_ret;
2486
2487 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2488 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2489 if (ic->mode == 'B') {
2490 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2491 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2492 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2493 }
2494 goto unlock_ret;
2495 }
2496
2497 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2498 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2499 if (!ic->meta_dev)
2500 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2501
2502 add_new_range_and_wait(ic, &range);
2503 spin_unlock_irq(&ic->endio_wait.lock);
2504 logical_sector = range.logical_sector;
2505 n_sectors = range.n_sectors;
2506
2507 if (ic->mode == 'B') {
2508 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2509 goto advance_and_next;
2510 }
2511 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2512 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2513 logical_sector += ic->sectors_per_block;
2514 n_sectors -= ic->sectors_per_block;
2515 cond_resched();
2516 }
2517 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2518 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2519 n_sectors -= ic->sectors_per_block;
2520 cond_resched();
2521 }
2522 get_area_and_offset(ic, logical_sector, &area, &offset);
2523 }
2524
2525 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2526
2527 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2528 recalc_write_super(ic);
2529 if (ic->mode == 'B') {
2530 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2531 }
2532 super_counter = 0;
2533 }
2534
2535 if (unlikely(dm_integrity_failed(ic)))
2536 goto err;
2537
2538 io_req.bi_op = REQ_OP_READ;
2539 io_req.bi_op_flags = 0;
2540 io_req.mem.type = DM_IO_VMA;
2541 io_req.mem.ptr.addr = ic->recalc_buffer;
2542 io_req.notify.fn = NULL;
2543 io_req.client = ic->io;
2544 io_loc.bdev = ic->dev->bdev;
2545 io_loc.sector = get_data_sector(ic, area, offset);
2546 io_loc.count = n_sectors;
2547
2548 r = dm_io(&io_req, 1, &io_loc, NULL);
2549 if (unlikely(r)) {
2550 dm_integrity_io_error(ic, "reading data", r);
2551 goto err;
2552 }
2553
2554 t = ic->recalc_tags;
2555 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2556 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2557 t += ic->tag_size;
2558 }
2559
2560 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2561
2562 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2563 if (unlikely(r)) {
2564 dm_integrity_io_error(ic, "writing tags", r);
2565 goto err;
2566 }
2567
2568 if (ic->mode == 'B') {
2569 sector_t start, end;
2570 start = (range.logical_sector >>
2571 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2572 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2573 end = ((range.logical_sector + range.n_sectors) >>
2574 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2575 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2576 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2577 }
2578
2579advance_and_next:
2580 cond_resched();
2581
2582 spin_lock_irq(&ic->endio_wait.lock);
2583 remove_range_unlocked(ic, &range);
2584 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2585 goto next_chunk;
2586
2587err:
2588 remove_range(ic, &range);
2589 return;
2590
2591unlock_ret:
2592 spin_unlock_irq(&ic->endio_wait.lock);
2593
2594 recalc_write_super(ic);
2595}
2596
2597static void bitmap_block_work(struct work_struct *w)
2598{
2599 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2600 struct dm_integrity_c *ic = bbs->ic;
2601 struct bio *bio;
2602 struct bio_list bio_queue;
2603 struct bio_list waiting;
2604
2605 bio_list_init(&waiting);
2606
2607 spin_lock(&bbs->bio_queue_lock);
2608 bio_queue = bbs->bio_queue;
2609 bio_list_init(&bbs->bio_queue);
2610 spin_unlock(&bbs->bio_queue_lock);
2611
2612 while ((bio = bio_list_pop(&bio_queue))) {
2613 struct dm_integrity_io *dio;
2614
2615 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2616
2617 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2618 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2619 remove_range(ic, &dio->range);
2620 INIT_WORK(&dio->work, integrity_bio_wait);
2621 queue_work(ic->offload_wq, &dio->work);
2622 } else {
2623 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2624 dio->range.n_sectors, BITMAP_OP_SET);
2625 bio_list_add(&waiting, bio);
2626 }
2627 }
2628
2629 if (bio_list_empty(&waiting))
2630 return;
2631
2632 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2633 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2634 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2635
2636 while ((bio = bio_list_pop(&waiting))) {
2637 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2638
2639 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2640 dio->range.n_sectors, BITMAP_OP_SET);
2641
2642 remove_range(ic, &dio->range);
2643 INIT_WORK(&dio->work, integrity_bio_wait);
2644 queue_work(ic->offload_wq, &dio->work);
2645 }
2646
2647 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2648}
2649
2650static void bitmap_flush_work(struct work_struct *work)
2651{
2652 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2653 struct dm_integrity_range range;
2654 unsigned long limit;
2655 struct bio *bio;
2656
2657 dm_integrity_flush_buffers(ic);
2658
2659 range.logical_sector = 0;
2660 range.n_sectors = ic->provided_data_sectors;
2661
2662 spin_lock_irq(&ic->endio_wait.lock);
2663 add_new_range_and_wait(ic, &range);
2664 spin_unlock_irq(&ic->endio_wait.lock);
2665
2666 dm_integrity_flush_buffers(ic);
2667 if (ic->meta_dev)
2668 blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
2669
2670 limit = ic->provided_data_sectors;
2671 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2672 limit = le64_to_cpu(ic->sb->recalc_sector)
2673 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2674 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2675 }
2676 /*DEBUG_print("zeroing journal\n");*/
2677 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2678 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2679
2680 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2681 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2682
2683 spin_lock_irq(&ic->endio_wait.lock);
2684 remove_range_unlocked(ic, &range);
2685 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2686 bio_endio(bio);
2687 spin_unlock_irq(&ic->endio_wait.lock);
2688 spin_lock_irq(&ic->endio_wait.lock);
2689 }
2690 spin_unlock_irq(&ic->endio_wait.lock);
2691}
2692
2693
2694static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2695 unsigned n_sections, unsigned char commit_seq)
2696{
2697 unsigned i, j, n;
2698
2699 if (!n_sections)
2700 return;
2701
2702 for (n = 0; n < n_sections; n++) {
2703 i = start_section + n;
2704 wraparound_section(ic, &i);
2705 for (j = 0; j < ic->journal_section_sectors; j++) {
2706 struct journal_sector *js = access_journal(ic, i, j);
2707 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2708 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2709 }
2710 for (j = 0; j < ic->journal_section_entries; j++) {
2711 struct journal_entry *je = access_journal_entry(ic, i, j);
2712 journal_entry_set_unused(je);
2713 }
2714 }
2715
2716 write_journal(ic, start_section, n_sections);
2717}
2718
2719static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2720{
2721 unsigned char k;
2722 for (k = 0; k < N_COMMIT_IDS; k++) {
2723 if (dm_integrity_commit_id(ic, i, j, k) == id)
2724 return k;
2725 }
2726 dm_integrity_io_error(ic, "journal commit id", -EIO);
2727 return -EIO;
2728}
2729
2730static void replay_journal(struct dm_integrity_c *ic)
2731{
2732 unsigned i, j;
2733 bool used_commit_ids[N_COMMIT_IDS];
2734 unsigned max_commit_id_sections[N_COMMIT_IDS];
2735 unsigned write_start, write_sections;
2736 unsigned continue_section;
2737 bool journal_empty;
2738 unsigned char unused, last_used, want_commit_seq;
2739
2740 if (ic->mode == 'R')
2741 return;
2742
2743 if (ic->journal_uptodate)
2744 return;
2745
2746 last_used = 0;
2747 write_start = 0;
2748
2749 if (!ic->just_formatted) {
2750 DEBUG_print("reading journal\n");
2751 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2752 if (ic->journal_io)
2753 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2754 if (ic->journal_io) {
2755 struct journal_completion crypt_comp;
2756 crypt_comp.ic = ic;
2757 init_completion(&crypt_comp.comp);
2758 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2759 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2760 wait_for_completion(&crypt_comp.comp);
2761 }
2762 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2763 }
2764
2765 if (dm_integrity_failed(ic))
2766 goto clear_journal;
2767
2768 journal_empty = true;
2769 memset(used_commit_ids, 0, sizeof used_commit_ids);
2770 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2771 for (i = 0; i < ic->journal_sections; i++) {
2772 for (j = 0; j < ic->journal_section_sectors; j++) {
2773 int k;
2774 struct journal_sector *js = access_journal(ic, i, j);
2775 k = find_commit_seq(ic, i, j, js->commit_id);
2776 if (k < 0)
2777 goto clear_journal;
2778 used_commit_ids[k] = true;
2779 max_commit_id_sections[k] = i;
2780 }
2781 if (journal_empty) {
2782 for (j = 0; j < ic->journal_section_entries; j++) {
2783 struct journal_entry *je = access_journal_entry(ic, i, j);
2784 if (!journal_entry_is_unused(je)) {
2785 journal_empty = false;
2786 break;
2787 }
2788 }
2789 }
2790 }
2791
2792 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2793 unused = N_COMMIT_IDS - 1;
2794 while (unused && !used_commit_ids[unused - 1])
2795 unused--;
2796 } else {
2797 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2798 if (!used_commit_ids[unused])
2799 break;
2800 if (unused == N_COMMIT_IDS) {
2801 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2802 goto clear_journal;
2803 }
2804 }
2805 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2806 unused, used_commit_ids[0], used_commit_ids[1],
2807 used_commit_ids[2], used_commit_ids[3]);
2808
2809 last_used = prev_commit_seq(unused);
2810 want_commit_seq = prev_commit_seq(last_used);
2811
2812 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2813 journal_empty = true;
2814
2815 write_start = max_commit_id_sections[last_used] + 1;
2816 if (unlikely(write_start >= ic->journal_sections))
2817 want_commit_seq = next_commit_seq(want_commit_seq);
2818 wraparound_section(ic, &write_start);
2819
2820 i = write_start;
2821 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2822 for (j = 0; j < ic->journal_section_sectors; j++) {
2823 struct journal_sector *js = access_journal(ic, i, j);
2824
2825 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2826 /*
2827 * This could be caused by crash during writing.
2828 * We won't replay the inconsistent part of the
2829 * journal.
2830 */
2831 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2832 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2833 goto brk;
2834 }
2835 }
2836 i++;
2837 if (unlikely(i >= ic->journal_sections))
2838 want_commit_seq = next_commit_seq(want_commit_seq);
2839 wraparound_section(ic, &i);
2840 }
2841brk:
2842
2843 if (!journal_empty) {
2844 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2845 write_sections, write_start, want_commit_seq);
2846 do_journal_write(ic, write_start, write_sections, true);
2847 }
2848
2849 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2850 continue_section = write_start;
2851 ic->commit_seq = want_commit_seq;
2852 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2853 } else {
2854 unsigned s;
2855 unsigned char erase_seq;
2856clear_journal:
2857 DEBUG_print("clearing journal\n");
2858
2859 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2860 s = write_start;
2861 init_journal(ic, s, 1, erase_seq);
2862 s++;
2863 wraparound_section(ic, &s);
2864 if (ic->journal_sections >= 2) {
2865 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2866 s += ic->journal_sections - 2;
2867 wraparound_section(ic, &s);
2868 init_journal(ic, s, 1, erase_seq);
2869 }
2870
2871 continue_section = 0;
2872 ic->commit_seq = next_commit_seq(erase_seq);
2873 }
2874
2875 ic->committed_section = continue_section;
2876 ic->n_committed_sections = 0;
2877
2878 ic->uncommitted_section = continue_section;
2879 ic->n_uncommitted_sections = 0;
2880
2881 ic->free_section = continue_section;
2882 ic->free_section_entry = 0;
2883 ic->free_sectors = ic->journal_entries;
2884
2885 ic->journal_tree_root = RB_ROOT;
2886 for (i = 0; i < ic->journal_entries; i++)
2887 init_journal_node(&ic->journal_tree[i]);
2888}
2889
2890static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
2891{
2892 DEBUG_print("dm_integrity_enter_synchronous_mode\n");
2893
2894 if (ic->mode == 'B') {
2895 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
2896 ic->synchronous_mode = 1;
2897
2898 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2899 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2900 flush_workqueue(ic->commit_wq);
2901 }
2902}
2903
2904static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
2905{
2906 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
2907
2908 DEBUG_print("dm_integrity_reboot\n");
2909
2910 dm_integrity_enter_synchronous_mode(ic);
2911
2912 return NOTIFY_DONE;
2913}
2914
2915static void dm_integrity_postsuspend(struct dm_target *ti)
2916{
2917 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2918 int r;
2919
2920 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
2921
2922 del_timer_sync(&ic->autocommit_timer);
2923
2924 if (ic->recalc_wq)
2925 drain_workqueue(ic->recalc_wq);
2926
2927 if (ic->mode == 'B')
2928 cancel_delayed_work_sync(&ic->bitmap_flush_work);
2929
2930 queue_work(ic->commit_wq, &ic->commit_work);
2931 drain_workqueue(ic->commit_wq);
2932
2933 if (ic->mode == 'J') {
2934 if (ic->meta_dev)
2935 queue_work(ic->writer_wq, &ic->writer_work);
2936 drain_workqueue(ic->writer_wq);
2937 dm_integrity_flush_buffers(ic);
2938 }
2939
2940 if (ic->mode == 'B') {
2941 dm_integrity_flush_buffers(ic);
2942#if 1
2943 /* set to 0 to test bitmap replay code */
2944 init_journal(ic, 0, ic->journal_sections, 0);
2945 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
2946 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2947 if (unlikely(r))
2948 dm_integrity_io_error(ic, "writing superblock", r);
2949#endif
2950 }
2951
2952 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2953
2954 ic->journal_uptodate = true;
2955}
2956
2957static void dm_integrity_resume(struct dm_target *ti)
2958{
2959 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2960 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
2961 int r;
2962
2963 DEBUG_print("resume\n");
2964
2965 if (ic->provided_data_sectors != old_provided_data_sectors) {
2966 if (ic->provided_data_sectors > old_provided_data_sectors &&
2967 ic->mode == 'B' &&
2968 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2969 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2970 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2971 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
2972 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
2973 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2974 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2975 }
2976
2977 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2978 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
2979 if (unlikely(r))
2980 dm_integrity_io_error(ic, "writing superblock", r);
2981 }
2982
2983 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
2984 DEBUG_print("resume dirty_bitmap\n");
2985 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
2986 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2987 if (ic->mode == 'B') {
2988 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
2989 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
2990 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
2991 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
2992 BITMAP_OP_TEST_ALL_CLEAR)) {
2993 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
2994 ic->sb->recalc_sector = cpu_to_le64(0);
2995 }
2996 } else {
2997 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
2998 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
2999 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3000 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3001 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3002 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3003 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3004 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3005 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3006 ic->sb->recalc_sector = cpu_to_le64(0);
3007 }
3008 } else {
3009 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3010 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
3011 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3012 ic->sb->recalc_sector = cpu_to_le64(0);
3013 }
3014 init_journal(ic, 0, ic->journal_sections, 0);
3015 replay_journal(ic);
3016 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3017 }
3018 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3019 if (unlikely(r))
3020 dm_integrity_io_error(ic, "writing superblock", r);
3021 } else {
3022 replay_journal(ic);
3023 if (ic->mode == 'B') {
3024 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3025 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3026 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3027 if (unlikely(r))
3028 dm_integrity_io_error(ic, "writing superblock", r);
3029
3030 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3031 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3032 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3033 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3034 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3035 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3036 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3037 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3038 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3039 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3040 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3041 }
3042 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3043 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3044 }
3045 }
3046
3047 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3048 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3049 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3050 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3051 if (recalc_pos < ic->provided_data_sectors) {
3052 queue_work(ic->recalc_wq, &ic->recalc_work);
3053 } else if (recalc_pos > ic->provided_data_sectors) {
3054 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3055 recalc_write_super(ic);
3056 }
3057 }
3058
3059 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3060 ic->reboot_notifier.next = NULL;
3061 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3062 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3063
3064#if 0
3065 /* set to 1 to stress test synchronous mode */
3066 dm_integrity_enter_synchronous_mode(ic);
3067#endif
3068}
3069
3070static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3071 unsigned status_flags, char *result, unsigned maxlen)
3072{
3073 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3074 unsigned arg_count;
3075 size_t sz = 0;
3076
3077 switch (type) {
3078 case STATUSTYPE_INFO:
3079 DMEMIT("%llu %llu",
3080 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3081 ic->provided_data_sectors);
3082 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3083 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3084 else
3085 DMEMIT(" -");
3086 break;
3087
3088 case STATUSTYPE_TABLE: {
3089 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3090 watermark_percentage += ic->journal_entries / 2;
3091 do_div(watermark_percentage, ic->journal_entries);
3092 arg_count = 3;
3093 arg_count += !!ic->meta_dev;
3094 arg_count += ic->sectors_per_block != 1;
3095 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3096 arg_count += ic->discard;
3097 arg_count += ic->mode == 'J';
3098 arg_count += ic->mode == 'J';
3099 arg_count += ic->mode == 'B';
3100 arg_count += ic->mode == 'B';
3101 arg_count += !!ic->internal_hash_alg.alg_string;
3102 arg_count += !!ic->journal_crypt_alg.alg_string;
3103 arg_count += !!ic->journal_mac_alg.alg_string;
3104 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3105 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3106 ic->tag_size, ic->mode, arg_count);
3107 if (ic->meta_dev)
3108 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3109 if (ic->sectors_per_block != 1)
3110 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3111 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3112 DMEMIT(" recalculate");
3113 if (ic->discard)
3114 DMEMIT(" allow_discards");
3115 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3116 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3117 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3118 if (ic->mode == 'J') {
3119 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3120 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3121 }
3122 if (ic->mode == 'B') {
3123 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3124 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3125 }
3126 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3127 DMEMIT(" fix_padding");
3128
3129#define EMIT_ALG(a, n) \
3130 do { \
3131 if (ic->a.alg_string) { \
3132 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3133 if (ic->a.key_string) \
3134 DMEMIT(":%s", ic->a.key_string);\
3135 } \
3136 } while (0)
3137 EMIT_ALG(internal_hash_alg, "internal_hash");
3138 EMIT_ALG(journal_crypt_alg, "journal_crypt");
3139 EMIT_ALG(journal_mac_alg, "journal_mac");
3140 break;
3141 }
3142 }
3143}
3144
3145static int dm_integrity_iterate_devices(struct dm_target *ti,
3146 iterate_devices_callout_fn fn, void *data)
3147{
3148 struct dm_integrity_c *ic = ti->private;
3149
3150 if (!ic->meta_dev)
3151 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3152 else
3153 return fn(ti, ic->dev, 0, ti->len, data);
3154}
3155
3156static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3157{
3158 struct dm_integrity_c *ic = ti->private;
3159
3160 if (ic->sectors_per_block > 1) {
3161 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3162 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3163 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3164 }
3165}
3166
3167static void calculate_journal_section_size(struct dm_integrity_c *ic)
3168{
3169 unsigned sector_space = JOURNAL_SECTOR_DATA;
3170
3171 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3172 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3173 JOURNAL_ENTRY_ROUNDUP);
3174
3175 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3176 sector_space -= JOURNAL_MAC_PER_SECTOR;
3177 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3178 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3179 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3180 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3181}
3182
3183static int calculate_device_limits(struct dm_integrity_c *ic)
3184{
3185 __u64 initial_sectors;
3186
3187 calculate_journal_section_size(ic);
3188 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3189 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3190 return -EINVAL;
3191 ic->initial_sectors = initial_sectors;
3192
3193 if (!ic->meta_dev) {
3194 sector_t last_sector, last_area, last_offset;
3195
3196 /* we have to maintain excessive padding for compatibility with existing volumes */
3197 __u64 metadata_run_padding =
3198 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3199 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3200 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3201
3202 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3203 metadata_run_padding) >> SECTOR_SHIFT;
3204 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3205 ic->log2_metadata_run = __ffs(ic->metadata_run);
3206 else
3207 ic->log2_metadata_run = -1;
3208
3209 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3210 last_sector = get_data_sector(ic, last_area, last_offset);
3211 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3212 return -EINVAL;
3213 } else {
3214 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3215 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3216 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3217 meta_size <<= ic->log2_buffer_sectors;
3218 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3219 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3220 return -EINVAL;
3221 ic->metadata_run = 1;
3222 ic->log2_metadata_run = 0;
3223 }
3224
3225 return 0;
3226}
3227
3228static void get_provided_data_sectors(struct dm_integrity_c *ic)
3229{
3230 if (!ic->meta_dev) {
3231 int test_bit;
3232 ic->provided_data_sectors = 0;
3233 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3234 __u64 prev_data_sectors = ic->provided_data_sectors;
3235
3236 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3237 if (calculate_device_limits(ic))
3238 ic->provided_data_sectors = prev_data_sectors;
3239 }
3240 } else {
3241 ic->provided_data_sectors = ic->data_device_sectors;
3242 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3243 }
3244}
3245
3246static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3247{
3248 unsigned journal_sections;
3249 int test_bit;
3250
3251 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3252 memcpy(ic->sb->magic, SB_MAGIC, 8);
3253 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3254 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3255 if (ic->journal_mac_alg.alg_string)
3256 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3257
3258 calculate_journal_section_size(ic);
3259 journal_sections = journal_sectors / ic->journal_section_sectors;
3260 if (!journal_sections)
3261 journal_sections = 1;
3262
3263 if (!ic->meta_dev) {
3264 if (ic->fix_padding)
3265 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3266 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3267 if (!interleave_sectors)
3268 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3269 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3270 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3271 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3272
3273 get_provided_data_sectors(ic);
3274 if (!ic->provided_data_sectors)
3275 return -EINVAL;
3276 } else {
3277 ic->sb->log2_interleave_sectors = 0;
3278
3279 get_provided_data_sectors(ic);
3280 if (!ic->provided_data_sectors)
3281 return -EINVAL;
3282
3283try_smaller_buffer:
3284 ic->sb->journal_sections = cpu_to_le32(0);
3285 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3286 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3287 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3288 if (test_journal_sections > journal_sections)
3289 continue;
3290 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3291 if (calculate_device_limits(ic))
3292 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3293
3294 }
3295 if (!le32_to_cpu(ic->sb->journal_sections)) {
3296 if (ic->log2_buffer_sectors > 3) {
3297 ic->log2_buffer_sectors--;
3298 goto try_smaller_buffer;
3299 }
3300 return -EINVAL;
3301 }
3302 }
3303
3304 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3305
3306 sb_set_version(ic);
3307
3308 return 0;
3309}
3310
3311static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3312{
3313 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3314 struct blk_integrity bi;
3315
3316 memset(&bi, 0, sizeof(bi));
3317 bi.profile = &dm_integrity_profile;
3318 bi.tuple_size = ic->tag_size;
3319 bi.tag_size = bi.tuple_size;
3320 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3321
3322 blk_integrity_register(disk, &bi);
3323 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3324}
3325
3326static void dm_integrity_free_page_list(struct page_list *pl)
3327{
3328 unsigned i;
3329
3330 if (!pl)
3331 return;
3332 for (i = 0; pl[i].page; i++)
3333 __free_page(pl[i].page);
3334 kvfree(pl);
3335}
3336
3337static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3338{
3339 struct page_list *pl;
3340 unsigned i;
3341
3342 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3343 if (!pl)
3344 return NULL;
3345
3346 for (i = 0; i < n_pages; i++) {
3347 pl[i].page = alloc_page(GFP_KERNEL);
3348 if (!pl[i].page) {
3349 dm_integrity_free_page_list(pl);
3350 return NULL;
3351 }
3352 if (i)
3353 pl[i - 1].next = &pl[i];
3354 }
3355 pl[i].page = NULL;
3356 pl[i].next = NULL;
3357
3358 return pl;
3359}
3360
3361static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3362{
3363 unsigned i;
3364 for (i = 0; i < ic->journal_sections; i++)
3365 kvfree(sl[i]);
3366 kvfree(sl);
3367}
3368
3369static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3370 struct page_list *pl)
3371{
3372 struct scatterlist **sl;
3373 unsigned i;
3374
3375 sl = kvmalloc_array(ic->journal_sections,
3376 sizeof(struct scatterlist *),
3377 GFP_KERNEL | __GFP_ZERO);
3378 if (!sl)
3379 return NULL;
3380
3381 for (i = 0; i < ic->journal_sections; i++) {
3382 struct scatterlist *s;
3383 unsigned start_index, start_offset;
3384 unsigned end_index, end_offset;
3385 unsigned n_pages;
3386 unsigned idx;
3387
3388 page_list_location(ic, i, 0, &start_index, &start_offset);
3389 page_list_location(ic, i, ic->journal_section_sectors - 1,
3390 &end_index, &end_offset);
3391
3392 n_pages = (end_index - start_index + 1);
3393
3394 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3395 GFP_KERNEL);
3396 if (!s) {
3397 dm_integrity_free_journal_scatterlist(ic, sl);
3398 return NULL;
3399 }
3400
3401 sg_init_table(s, n_pages);
3402 for (idx = start_index; idx <= end_index; idx++) {
3403 char *va = lowmem_page_address(pl[idx].page);
3404 unsigned start = 0, end = PAGE_SIZE;
3405 if (idx == start_index)
3406 start = start_offset;
3407 if (idx == end_index)
3408 end = end_offset + (1 << SECTOR_SHIFT);
3409 sg_set_buf(&s[idx - start_index], va + start, end - start);
3410 }
3411
3412 sl[i] = s;
3413 }
3414
3415 return sl;
3416}
3417
3418static void free_alg(struct alg_spec *a)
3419{
3420 kfree_sensitive(a->alg_string);
3421 kfree_sensitive(a->key);
3422 memset(a, 0, sizeof *a);
3423}
3424
3425static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3426{
3427 char *k;
3428
3429 free_alg(a);
3430
3431 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3432 if (!a->alg_string)
3433 goto nomem;
3434
3435 k = strchr(a->alg_string, ':');
3436 if (k) {
3437 *k = 0;
3438 a->key_string = k + 1;
3439 if (strlen(a->key_string) & 1)
3440 goto inval;
3441
3442 a->key_size = strlen(a->key_string) / 2;
3443 a->key = kmalloc(a->key_size, GFP_KERNEL);
3444 if (!a->key)
3445 goto nomem;
3446 if (hex2bin(a->key, a->key_string, a->key_size))
3447 goto inval;
3448 }
3449
3450 return 0;
3451inval:
3452 *error = error_inval;
3453 return -EINVAL;
3454nomem:
3455 *error = "Out of memory for an argument";
3456 return -ENOMEM;
3457}
3458
3459static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3460 char *error_alg, char *error_key)
3461{
3462 int r;
3463
3464 if (a->alg_string) {
3465 *hash = crypto_alloc_shash(a->alg_string, 0, 0);
3466 if (IS_ERR(*hash)) {
3467 *error = error_alg;
3468 r = PTR_ERR(*hash);
3469 *hash = NULL;
3470 return r;
3471 }
3472
3473 if (a->key) {
3474 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3475 if (r) {
3476 *error = error_key;
3477 return r;
3478 }
3479 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3480 *error = error_key;
3481 return -ENOKEY;
3482 }
3483 }
3484
3485 return 0;
3486}
3487
3488static int create_journal(struct dm_integrity_c *ic, char **error)
3489{
3490 int r = 0;
3491 unsigned i;
3492 __u64 journal_pages, journal_desc_size, journal_tree_size;
3493 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3494 struct skcipher_request *req = NULL;
3495
3496 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3497 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3498 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3499 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3500
3501 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3502 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3503 journal_desc_size = journal_pages * sizeof(struct page_list);
3504 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3505 *error = "Journal doesn't fit into memory";
3506 r = -ENOMEM;
3507 goto bad;
3508 }
3509 ic->journal_pages = journal_pages;
3510
3511 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3512 if (!ic->journal) {
3513 *error = "Could not allocate memory for journal";
3514 r = -ENOMEM;
3515 goto bad;
3516 }
3517 if (ic->journal_crypt_alg.alg_string) {
3518 unsigned ivsize, blocksize;
3519 struct journal_completion comp;
3520
3521 comp.ic = ic;
3522 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
3523 if (IS_ERR(ic->journal_crypt)) {
3524 *error = "Invalid journal cipher";
3525 r = PTR_ERR(ic->journal_crypt);
3526 ic->journal_crypt = NULL;
3527 goto bad;
3528 }
3529 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3530 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3531
3532 if (ic->journal_crypt_alg.key) {
3533 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3534 ic->journal_crypt_alg.key_size);
3535 if (r) {
3536 *error = "Error setting encryption key";
3537 goto bad;
3538 }
3539 }
3540 DEBUG_print("cipher %s, block size %u iv size %u\n",
3541 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3542
3543 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3544 if (!ic->journal_io) {
3545 *error = "Could not allocate memory for journal io";
3546 r = -ENOMEM;
3547 goto bad;
3548 }
3549
3550 if (blocksize == 1) {
3551 struct scatterlist *sg;
3552
3553 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3554 if (!req) {
3555 *error = "Could not allocate crypt request";
3556 r = -ENOMEM;
3557 goto bad;
3558 }
3559
3560 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3561 if (!crypt_iv) {
3562 *error = "Could not allocate iv";
3563 r = -ENOMEM;
3564 goto bad;
3565 }
3566
3567 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3568 if (!ic->journal_xor) {
3569 *error = "Could not allocate memory for journal xor";
3570 r = -ENOMEM;
3571 goto bad;
3572 }
3573
3574 sg = kvmalloc_array(ic->journal_pages + 1,
3575 sizeof(struct scatterlist),
3576 GFP_KERNEL);
3577 if (!sg) {
3578 *error = "Unable to allocate sg list";
3579 r = -ENOMEM;
3580 goto bad;
3581 }
3582 sg_init_table(sg, ic->journal_pages + 1);
3583 for (i = 0; i < ic->journal_pages; i++) {
3584 char *va = lowmem_page_address(ic->journal_xor[i].page);
3585 clear_page(va);
3586 sg_set_buf(&sg[i], va, PAGE_SIZE);
3587 }
3588 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3589
3590 skcipher_request_set_crypt(req, sg, sg,
3591 PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3592 init_completion(&comp.comp);
3593 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3594 if (do_crypt(true, req, &comp))
3595 wait_for_completion(&comp.comp);
3596 kvfree(sg);
3597 r = dm_integrity_failed(ic);
3598 if (r) {
3599 *error = "Unable to encrypt journal";
3600 goto bad;
3601 }
3602 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3603
3604 crypto_free_skcipher(ic->journal_crypt);
3605 ic->journal_crypt = NULL;
3606 } else {
3607 unsigned crypt_len = roundup(ivsize, blocksize);
3608
3609 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3610 if (!req) {
3611 *error = "Could not allocate crypt request";
3612 r = -ENOMEM;
3613 goto bad;
3614 }
3615
3616 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3617 if (!crypt_iv) {
3618 *error = "Could not allocate iv";
3619 r = -ENOMEM;
3620 goto bad;
3621 }
3622
3623 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3624 if (!crypt_data) {
3625 *error = "Unable to allocate crypt data";
3626 r = -ENOMEM;
3627 goto bad;
3628 }
3629
3630 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3631 if (!ic->journal_scatterlist) {
3632 *error = "Unable to allocate sg list";
3633 r = -ENOMEM;
3634 goto bad;
3635 }
3636 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3637 if (!ic->journal_io_scatterlist) {
3638 *error = "Unable to allocate sg list";
3639 r = -ENOMEM;
3640 goto bad;
3641 }
3642 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3643 sizeof(struct skcipher_request *),
3644 GFP_KERNEL | __GFP_ZERO);
3645 if (!ic->sk_requests) {
3646 *error = "Unable to allocate sk requests";
3647 r = -ENOMEM;
3648 goto bad;
3649 }
3650 for (i = 0; i < ic->journal_sections; i++) {
3651 struct scatterlist sg;
3652 struct skcipher_request *section_req;
3653 __u32 section_le = cpu_to_le32(i);
3654
3655 memset(crypt_iv, 0x00, ivsize);
3656 memset(crypt_data, 0x00, crypt_len);
3657 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
3658
3659 sg_init_one(&sg, crypt_data, crypt_len);
3660 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3661 init_completion(&comp.comp);
3662 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3663 if (do_crypt(true, req, &comp))
3664 wait_for_completion(&comp.comp);
3665
3666 r = dm_integrity_failed(ic);
3667 if (r) {
3668 *error = "Unable to generate iv";
3669 goto bad;
3670 }
3671
3672 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3673 if (!section_req) {
3674 *error = "Unable to allocate crypt request";
3675 r = -ENOMEM;
3676 goto bad;
3677 }
3678 section_req->iv = kmalloc_array(ivsize, 2,
3679 GFP_KERNEL);
3680 if (!section_req->iv) {
3681 skcipher_request_free(section_req);
3682 *error = "Unable to allocate iv";
3683 r = -ENOMEM;
3684 goto bad;
3685 }
3686 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3687 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3688 ic->sk_requests[i] = section_req;
3689 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3690 }
3691 }
3692 }
3693
3694 for (i = 0; i < N_COMMIT_IDS; i++) {
3695 unsigned j;
3696retest_commit_id:
3697 for (j = 0; j < i; j++) {
3698 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3699 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3700 goto retest_commit_id;
3701 }
3702 }
3703 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3704 }
3705
3706 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3707 if (journal_tree_size > ULONG_MAX) {
3708 *error = "Journal doesn't fit into memory";
3709 r = -ENOMEM;
3710 goto bad;
3711 }
3712 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3713 if (!ic->journal_tree) {
3714 *error = "Could not allocate memory for journal tree";
3715 r = -ENOMEM;
3716 }
3717bad:
3718 kfree(crypt_data);
3719 kfree(crypt_iv);
3720 skcipher_request_free(req);
3721
3722 return r;
3723}
3724
3725/*
3726 * Construct a integrity mapping
3727 *
3728 * Arguments:
3729 * device
3730 * offset from the start of the device
3731 * tag size
3732 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3733 * number of optional arguments
3734 * optional arguments:
3735 * journal_sectors
3736 * interleave_sectors
3737 * buffer_sectors
3738 * journal_watermark
3739 * commit_time
3740 * meta_device
3741 * block_size
3742 * sectors_per_bit
3743 * bitmap_flush_interval
3744 * internal_hash
3745 * journal_crypt
3746 * journal_mac
3747 * recalculate
3748 */
3749static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3750{
3751 struct dm_integrity_c *ic;
3752 char dummy;
3753 int r;
3754 unsigned extra_args;
3755 struct dm_arg_set as;
3756 static const struct dm_arg _args[] = {
3757 {0, 9, "Invalid number of feature args"},
3758 };
3759 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3760 bool should_write_sb;
3761 __u64 threshold;
3762 unsigned long long start;
3763 __s8 log2_sectors_per_bitmap_bit = -1;
3764 __s8 log2_blocks_per_bitmap_bit;
3765 __u64 bits_in_journal;
3766 __u64 n_bitmap_bits;
3767
3768#define DIRECT_ARGUMENTS 4
3769
3770 if (argc <= DIRECT_ARGUMENTS) {
3771 ti->error = "Invalid argument count";
3772 return -EINVAL;
3773 }
3774
3775 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3776 if (!ic) {
3777 ti->error = "Cannot allocate integrity context";
3778 return -ENOMEM;
3779 }
3780 ti->private = ic;
3781 ti->per_io_data_size = sizeof(struct dm_integrity_io);
3782 ic->ti = ti;
3783
3784 ic->in_progress = RB_ROOT;
3785 INIT_LIST_HEAD(&ic->wait_list);
3786 init_waitqueue_head(&ic->endio_wait);
3787 bio_list_init(&ic->flush_bio_list);
3788 init_waitqueue_head(&ic->copy_to_journal_wait);
3789 init_completion(&ic->crypto_backoff);
3790 atomic64_set(&ic->number_of_mismatches, 0);
3791 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3792
3793 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3794 if (r) {
3795 ti->error = "Device lookup failed";
3796 goto bad;
3797 }
3798
3799 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3800 ti->error = "Invalid starting offset";
3801 r = -EINVAL;
3802 goto bad;
3803 }
3804 ic->start = start;
3805
3806 if (strcmp(argv[2], "-")) {
3807 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3808 ti->error = "Invalid tag size";
3809 r = -EINVAL;
3810 goto bad;
3811 }
3812 }
3813
3814 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3815 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3816 ic->mode = argv[3][0];
3817 } else {
3818 ti->error = "Invalid mode (expecting J, B, D, R)";
3819 r = -EINVAL;
3820 goto bad;
3821 }
3822
3823 journal_sectors = 0;
3824 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3825 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3826 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3827 sync_msec = DEFAULT_SYNC_MSEC;
3828 ic->sectors_per_block = 1;
3829
3830 as.argc = argc - DIRECT_ARGUMENTS;
3831 as.argv = argv + DIRECT_ARGUMENTS;
3832 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3833 if (r)
3834 goto bad;
3835
3836 while (extra_args--) {
3837 const char *opt_string;
3838 unsigned val;
3839 unsigned long long llval;
3840 opt_string = dm_shift_arg(&as);
3841 if (!opt_string) {
3842 r = -EINVAL;
3843 ti->error = "Not enough feature arguments";
3844 goto bad;
3845 }
3846 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3847 journal_sectors = val ? val : 1;
3848 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3849 interleave_sectors = val;
3850 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3851 buffer_sectors = val;
3852 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3853 journal_watermark = val;
3854 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3855 sync_msec = val;
3856 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3857 if (ic->meta_dev) {
3858 dm_put_device(ti, ic->meta_dev);
3859 ic->meta_dev = NULL;
3860 }
3861 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
3862 dm_table_get_mode(ti->table), &ic->meta_dev);
3863 if (r) {
3864 ti->error = "Device lookup failed";
3865 goto bad;
3866 }
3867 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3868 if (val < 1 << SECTOR_SHIFT ||
3869 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3870 (val & (val -1))) {
3871 r = -EINVAL;
3872 ti->error = "Invalid block_size argument";
3873 goto bad;
3874 }
3875 ic->sectors_per_block = val >> SECTOR_SHIFT;
3876 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
3877 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
3878 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
3879 if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
3880 r = -EINVAL;
3881 ti->error = "Invalid bitmap_flush_interval argument";
3882 }
3883 ic->bitmap_flush_interval = msecs_to_jiffies(val);
3884 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3885 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3886 "Invalid internal_hash argument");
3887 if (r)
3888 goto bad;
3889 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3890 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3891 "Invalid journal_crypt argument");
3892 if (r)
3893 goto bad;
3894 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3895 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
3896 "Invalid journal_mac argument");
3897 if (r)
3898 goto bad;
3899 } else if (!strcmp(opt_string, "recalculate")) {
3900 ic->recalculate_flag = true;
3901 } else if (!strcmp(opt_string, "allow_discards")) {
3902 ic->discard = true;
3903 } else if (!strcmp(opt_string, "fix_padding")) {
3904 ic->fix_padding = true;
3905 } else {
3906 r = -EINVAL;
3907 ti->error = "Invalid argument";
3908 goto bad;
3909 }
3910 }
3911
3912 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3913 if (!ic->meta_dev)
3914 ic->meta_device_sectors = ic->data_device_sectors;
3915 else
3916 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3917
3918 if (!journal_sectors) {
3919 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3920 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3921 }
3922
3923 if (!buffer_sectors)
3924 buffer_sectors = 1;
3925 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3926
3927 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3928 "Invalid internal hash", "Error setting internal hash key");
3929 if (r)
3930 goto bad;
3931
3932 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3933 "Invalid journal mac", "Error setting journal mac key");
3934 if (r)
3935 goto bad;
3936
3937 if (!ic->tag_size) {
3938 if (!ic->internal_hash) {
3939 ti->error = "Unknown tag size";
3940 r = -EINVAL;
3941 goto bad;
3942 }
3943 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3944 }
3945 if (ic->tag_size > MAX_TAG_SIZE) {
3946 ti->error = "Too big tag size";
3947 r = -EINVAL;
3948 goto bad;
3949 }
3950 if (!(ic->tag_size & (ic->tag_size - 1)))
3951 ic->log2_tag_size = __ffs(ic->tag_size);
3952 else
3953 ic->log2_tag_size = -1;
3954
3955 if (ic->mode == 'B' && !ic->internal_hash) {
3956 r = -EINVAL;
3957 ti->error = "Bitmap mode can be only used with internal hash";
3958 goto bad;
3959 }
3960
3961 if (ic->discard && !ic->internal_hash) {
3962 r = -EINVAL;
3963 ti->error = "Discard can be only used with internal hash";
3964 goto bad;
3965 }
3966
3967 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3968 ic->autocommit_msec = sync_msec;
3969 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3970
3971 ic->io = dm_io_client_create();
3972 if (IS_ERR(ic->io)) {
3973 r = PTR_ERR(ic->io);
3974 ic->io = NULL;
3975 ti->error = "Cannot allocate dm io";
3976 goto bad;
3977 }
3978
3979 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3980 if (r) {
3981 ti->error = "Cannot allocate mempool";
3982 goto bad;
3983 }
3984
3985 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3986 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3987 if (!ic->metadata_wq) {
3988 ti->error = "Cannot allocate workqueue";
3989 r = -ENOMEM;
3990 goto bad;
3991 }
3992
3993 /*
3994 * If this workqueue were percpu, it would cause bio reordering
3995 * and reduced performance.
3996 */
3997 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3998 if (!ic->wait_wq) {
3999 ti->error = "Cannot allocate workqueue";
4000 r = -ENOMEM;
4001 goto bad;
4002 }
4003
4004 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4005 METADATA_WORKQUEUE_MAX_ACTIVE);
4006 if (!ic->offload_wq) {
4007 ti->error = "Cannot allocate workqueue";
4008 r = -ENOMEM;
4009 goto bad;
4010 }
4011
4012 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4013 if (!ic->commit_wq) {
4014 ti->error = "Cannot allocate workqueue";
4015 r = -ENOMEM;
4016 goto bad;
4017 }
4018 INIT_WORK(&ic->commit_work, integrity_commit);
4019
4020 if (ic->mode == 'J' || ic->mode == 'B') {
4021 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4022 if (!ic->writer_wq) {
4023 ti->error = "Cannot allocate workqueue";
4024 r = -ENOMEM;
4025 goto bad;
4026 }
4027 INIT_WORK(&ic->writer_work, integrity_writer);
4028 }
4029
4030 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4031 if (!ic->sb) {
4032 r = -ENOMEM;
4033 ti->error = "Cannot allocate superblock area";
4034 goto bad;
4035 }
4036
4037 r = sync_rw_sb(ic, REQ_OP_READ, 0);
4038 if (r) {
4039 ti->error = "Error reading superblock";
4040 goto bad;
4041 }
4042 should_write_sb = false;
4043 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4044 if (ic->mode != 'R') {
4045 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4046 r = -EINVAL;
4047 ti->error = "The device is not initialized";
4048 goto bad;
4049 }
4050 }
4051
4052 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4053 if (r) {
4054 ti->error = "Could not initialize superblock";
4055 goto bad;
4056 }
4057 if (ic->mode != 'R')
4058 should_write_sb = true;
4059 }
4060
4061 if (!ic->sb->version || ic->sb->version > SB_VERSION_4) {
4062 r = -EINVAL;
4063 ti->error = "Unknown version";
4064 goto bad;
4065 }
4066 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4067 r = -EINVAL;
4068 ti->error = "Tag size doesn't match the information in superblock";
4069 goto bad;
4070 }
4071 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4072 r = -EINVAL;
4073 ti->error = "Block size doesn't match the information in superblock";
4074 goto bad;
4075 }
4076 if (!le32_to_cpu(ic->sb->journal_sections)) {
4077 r = -EINVAL;
4078 ti->error = "Corrupted superblock, journal_sections is 0";
4079 goto bad;
4080 }
4081 /* make sure that ti->max_io_len doesn't overflow */
4082 if (!ic->meta_dev) {
4083 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4084 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4085 r = -EINVAL;
4086 ti->error = "Invalid interleave_sectors in the superblock";
4087 goto bad;
4088 }
4089 } else {
4090 if (ic->sb->log2_interleave_sectors) {
4091 r = -EINVAL;
4092 ti->error = "Invalid interleave_sectors in the superblock";
4093 goto bad;
4094 }
4095 }
4096 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4097 r = -EINVAL;
4098 ti->error = "Journal mac mismatch";
4099 goto bad;
4100 }
4101
4102 get_provided_data_sectors(ic);
4103 if (!ic->provided_data_sectors) {
4104 r = -EINVAL;
4105 ti->error = "The device is too small";
4106 goto bad;
4107 }
4108
4109try_smaller_buffer:
4110 r = calculate_device_limits(ic);
4111 if (r) {
4112 if (ic->meta_dev) {
4113 if (ic->log2_buffer_sectors > 3) {
4114 ic->log2_buffer_sectors--;
4115 goto try_smaller_buffer;
4116 }
4117 }
4118 ti->error = "The device is too small";
4119 goto bad;
4120 }
4121
4122 if (log2_sectors_per_bitmap_bit < 0)
4123 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4124 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4125 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4126
4127 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4128 if (bits_in_journal > UINT_MAX)
4129 bits_in_journal = UINT_MAX;
4130 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4131 log2_sectors_per_bitmap_bit++;
4132
4133 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4134 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4135 if (should_write_sb) {
4136 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4137 }
4138 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4139 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4140 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4141
4142 if (!ic->meta_dev)
4143 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4144
4145 if (ti->len > ic->provided_data_sectors) {
4146 r = -EINVAL;
4147 ti->error = "Not enough provided sectors for requested mapping size";
4148 goto bad;
4149 }
4150
4151
4152 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4153 threshold += 50;
4154 do_div(threshold, 100);
4155 ic->free_sectors_threshold = threshold;
4156
4157 DEBUG_print("initialized:\n");
4158 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4159 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4160 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4161 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4162 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4163 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4164 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4165 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4166 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4167 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4168 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4169 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4170 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4171 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4172 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
4173
4174 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4175 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4176 ic->sb->recalc_sector = cpu_to_le64(0);
4177 }
4178
4179 if (ic->internal_hash) {
4180 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4181 if (!ic->recalc_wq ) {
4182 ti->error = "Cannot allocate workqueue";
4183 r = -ENOMEM;
4184 goto bad;
4185 }
4186 INIT_WORK(&ic->recalc_work, integrity_recalc);
4187 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4188 if (!ic->recalc_buffer) {
4189 ti->error = "Cannot allocate buffer for recalculating";
4190 r = -ENOMEM;
4191 goto bad;
4192 }
4193 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4194 ic->tag_size, GFP_KERNEL);
4195 if (!ic->recalc_tags) {
4196 ti->error = "Cannot allocate tags for recalculating";
4197 r = -ENOMEM;
4198 goto bad;
4199 }
4200 }
4201
4202 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4203 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4204 if (IS_ERR(ic->bufio)) {
4205 r = PTR_ERR(ic->bufio);
4206 ti->error = "Cannot initialize dm-bufio";
4207 ic->bufio = NULL;
4208 goto bad;
4209 }
4210 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4211
4212 if (ic->mode != 'R') {
4213 r = create_journal(ic, &ti->error);
4214 if (r)
4215 goto bad;
4216
4217 }
4218
4219 if (ic->mode == 'B') {
4220 unsigned i;
4221 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4222
4223 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4224 if (!ic->recalc_bitmap) {
4225 r = -ENOMEM;
4226 goto bad;
4227 }
4228 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4229 if (!ic->may_write_bitmap) {
4230 r = -ENOMEM;
4231 goto bad;
4232 }
4233 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4234 if (!ic->bbs) {
4235 r = -ENOMEM;
4236 goto bad;
4237 }
4238 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4239 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4240 struct bitmap_block_status *bbs = &ic->bbs[i];
4241 unsigned sector, pl_index, pl_offset;
4242
4243 INIT_WORK(&bbs->work, bitmap_block_work);
4244 bbs->ic = ic;
4245 bbs->idx = i;
4246 bio_list_init(&bbs->bio_queue);
4247 spin_lock_init(&bbs->bio_queue_lock);
4248
4249 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4250 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4251 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4252
4253 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4254 }
4255 }
4256
4257 if (should_write_sb) {
4258 int r;
4259
4260 init_journal(ic, 0, ic->journal_sections, 0);
4261 r = dm_integrity_failed(ic);
4262 if (unlikely(r)) {
4263 ti->error = "Error initializing journal";
4264 goto bad;
4265 }
4266 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4267 if (r) {
4268 ti->error = "Error initializing superblock";
4269 goto bad;
4270 }
4271 ic->just_formatted = true;
4272 }
4273
4274 if (!ic->meta_dev) {
4275 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4276 if (r)
4277 goto bad;
4278 }
4279 if (ic->mode == 'B') {
4280 unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4281 if (!max_io_len)
4282 max_io_len = 1U << 31;
4283 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4284 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4285 r = dm_set_target_max_io_len(ti, max_io_len);
4286 if (r)
4287 goto bad;
4288 }
4289 }
4290
4291 if (!ic->internal_hash)
4292 dm_integrity_set(ti, ic);
4293
4294 ti->num_flush_bios = 1;
4295 ti->flush_supported = true;
4296 if (ic->discard)
4297 ti->num_discard_bios = 1;
4298
4299 return 0;
4300
4301bad:
4302 dm_integrity_dtr(ti);
4303 return r;
4304}
4305
4306static void dm_integrity_dtr(struct dm_target *ti)
4307{
4308 struct dm_integrity_c *ic = ti->private;
4309
4310 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4311 BUG_ON(!list_empty(&ic->wait_list));
4312
4313 if (ic->metadata_wq)
4314 destroy_workqueue(ic->metadata_wq);
4315 if (ic->wait_wq)
4316 destroy_workqueue(ic->wait_wq);
4317 if (ic->offload_wq)
4318 destroy_workqueue(ic->offload_wq);
4319 if (ic->commit_wq)
4320 destroy_workqueue(ic->commit_wq);
4321 if (ic->writer_wq)
4322 destroy_workqueue(ic->writer_wq);
4323 if (ic->recalc_wq)
4324 destroy_workqueue(ic->recalc_wq);
4325 vfree(ic->recalc_buffer);
4326 kvfree(ic->recalc_tags);
4327 kvfree(ic->bbs);
4328 if (ic->bufio)
4329 dm_bufio_client_destroy(ic->bufio);
4330 mempool_exit(&ic->journal_io_mempool);
4331 if (ic->io)
4332 dm_io_client_destroy(ic->io);
4333 if (ic->dev)
4334 dm_put_device(ti, ic->dev);
4335 if (ic->meta_dev)
4336 dm_put_device(ti, ic->meta_dev);
4337 dm_integrity_free_page_list(ic->journal);
4338 dm_integrity_free_page_list(ic->journal_io);
4339 dm_integrity_free_page_list(ic->journal_xor);
4340 dm_integrity_free_page_list(ic->recalc_bitmap);
4341 dm_integrity_free_page_list(ic->may_write_bitmap);
4342 if (ic->journal_scatterlist)
4343 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4344 if (ic->journal_io_scatterlist)
4345 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4346 if (ic->sk_requests) {
4347 unsigned i;
4348
4349 for (i = 0; i < ic->journal_sections; i++) {
4350 struct skcipher_request *req = ic->sk_requests[i];
4351 if (req) {
4352 kfree_sensitive(req->iv);
4353 skcipher_request_free(req);
4354 }
4355 }
4356 kvfree(ic->sk_requests);
4357 }
4358 kvfree(ic->journal_tree);
4359 if (ic->sb)
4360 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4361
4362 if (ic->internal_hash)
4363 crypto_free_shash(ic->internal_hash);
4364 free_alg(&ic->internal_hash_alg);
4365
4366 if (ic->journal_crypt)
4367 crypto_free_skcipher(ic->journal_crypt);
4368 free_alg(&ic->journal_crypt_alg);
4369
4370 if (ic->journal_mac)
4371 crypto_free_shash(ic->journal_mac);
4372 free_alg(&ic->journal_mac_alg);
4373
4374 kfree(ic);
4375}
4376
4377static struct target_type integrity_target = {
4378 .name = "integrity",
4379 .version = {1, 6, 0},
4380 .module = THIS_MODULE,
4381 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4382 .ctr = dm_integrity_ctr,
4383 .dtr = dm_integrity_dtr,
4384 .map = dm_integrity_map,
4385 .postsuspend = dm_integrity_postsuspend,
4386 .resume = dm_integrity_resume,
4387 .status = dm_integrity_status,
4388 .iterate_devices = dm_integrity_iterate_devices,
4389 .io_hints = dm_integrity_io_hints,
4390};
4391
4392static int __init dm_integrity_init(void)
4393{
4394 int r;
4395
4396 journal_io_cache = kmem_cache_create("integrity_journal_io",
4397 sizeof(struct journal_io), 0, 0, NULL);
4398 if (!journal_io_cache) {
4399 DMERR("can't allocate journal io cache");
4400 return -ENOMEM;
4401 }
4402
4403 r = dm_register_target(&integrity_target);
4404
4405 if (r < 0)
4406 DMERR("register failed %d", r);
4407
4408 return r;
4409}
4410
4411static void __exit dm_integrity_exit(void)
4412{
4413 dm_unregister_target(&integrity_target);
4414 kmem_cache_destroy(journal_io_cache);
4415}
4416
4417module_init(dm_integrity_init);
4418module_exit(dm_integrity_exit);
4419
4420MODULE_AUTHOR("Milan Broz");
4421MODULE_AUTHOR("Mikulas Patocka");
4422MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4423MODULE_LICENSE("GPL");
1/*
2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/compiler.h>
10#include <linux/module.h>
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/vmalloc.h>
14#include <linux/sort.h>
15#include <linux/rbtree.h>
16#include <linux/delay.h>
17#include <linux/random.h>
18#include <crypto/hash.h>
19#include <crypto/skcipher.h>
20#include <linux/async_tx.h>
21#include <linux/dm-bufio.h>
22
23#define DM_MSG_PREFIX "integrity"
24
25#define DEFAULT_INTERLEAVE_SECTORS 32768
26#define DEFAULT_JOURNAL_SIZE_FACTOR 7
27#define DEFAULT_BUFFER_SECTORS 128
28#define DEFAULT_JOURNAL_WATERMARK 50
29#define DEFAULT_SYNC_MSEC 10000
30#define DEFAULT_MAX_JOURNAL_SECTORS 131072
31#define MIN_LOG2_INTERLEAVE_SECTORS 3
32#define MAX_LOG2_INTERLEAVE_SECTORS 31
33#define METADATA_WORKQUEUE_MAX_ACTIVE 16
34
35/*
36 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
37 * so it should not be enabled in the official kernel
38 */
39//#define DEBUG_PRINT
40//#define INTERNAL_VERIFY
41
42/*
43 * On disk structures
44 */
45
46#define SB_MAGIC "integrt"
47#define SB_VERSION 1
48#define SB_SECTORS 8
49#define MAX_SECTORS_PER_BLOCK 8
50
51struct superblock {
52 __u8 magic[8];
53 __u8 version;
54 __u8 log2_interleave_sectors;
55 __u16 integrity_tag_size;
56 __u32 journal_sections;
57 __u64 provided_data_sectors; /* userspace uses this value */
58 __u32 flags;
59 __u8 log2_sectors_per_block;
60};
61
62#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
63
64#define JOURNAL_ENTRY_ROUNDUP 8
65
66typedef __u64 commit_id_t;
67#define JOURNAL_MAC_PER_SECTOR 8
68
69struct journal_entry {
70 union {
71 struct {
72 __u32 sector_lo;
73 __u32 sector_hi;
74 } s;
75 __u64 sector;
76 } u;
77 commit_id_t last_bytes[0];
78 /* __u8 tag[0]; */
79};
80
81#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
82
83#if BITS_PER_LONG == 64
84#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
85#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
86#elif defined(CONFIG_LBDAF)
87#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
88#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
89#else
90#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
91#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
92#endif
93#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
94#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
95#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
96#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
97
98#define JOURNAL_BLOCK_SECTORS 8
99#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
100#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
101
102struct journal_sector {
103 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
104 __u8 mac[JOURNAL_MAC_PER_SECTOR];
105 commit_id_t commit_id;
106};
107
108#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
109
110#define METADATA_PADDING_SECTORS 8
111
112#define N_COMMIT_IDS 4
113
114static unsigned char prev_commit_seq(unsigned char seq)
115{
116 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
117}
118
119static unsigned char next_commit_seq(unsigned char seq)
120{
121 return (seq + 1) % N_COMMIT_IDS;
122}
123
124/*
125 * In-memory structures
126 */
127
128struct journal_node {
129 struct rb_node node;
130 sector_t sector;
131};
132
133struct alg_spec {
134 char *alg_string;
135 char *key_string;
136 __u8 *key;
137 unsigned key_size;
138};
139
140struct dm_integrity_c {
141 struct dm_dev *dev;
142 unsigned tag_size;
143 __s8 log2_tag_size;
144 sector_t start;
145 mempool_t *journal_io_mempool;
146 struct dm_io_client *io;
147 struct dm_bufio_client *bufio;
148 struct workqueue_struct *metadata_wq;
149 struct superblock *sb;
150 unsigned journal_pages;
151 struct page_list *journal;
152 struct page_list *journal_io;
153 struct page_list *journal_xor;
154
155 struct crypto_skcipher *journal_crypt;
156 struct scatterlist **journal_scatterlist;
157 struct scatterlist **journal_io_scatterlist;
158 struct skcipher_request **sk_requests;
159
160 struct crypto_shash *journal_mac;
161
162 struct journal_node *journal_tree;
163 struct rb_root journal_tree_root;
164
165 sector_t provided_data_sectors;
166
167 unsigned short journal_entry_size;
168 unsigned char journal_entries_per_sector;
169 unsigned char journal_section_entries;
170 unsigned short journal_section_sectors;
171 unsigned journal_sections;
172 unsigned journal_entries;
173 sector_t device_sectors;
174 unsigned initial_sectors;
175 unsigned metadata_run;
176 __s8 log2_metadata_run;
177 __u8 log2_buffer_sectors;
178 __u8 sectors_per_block;
179
180 unsigned char mode;
181 bool suspending;
182
183 int failed;
184
185 struct crypto_shash *internal_hash;
186
187 /* these variables are locked with endio_wait.lock */
188 struct rb_root in_progress;
189 wait_queue_head_t endio_wait;
190 struct workqueue_struct *wait_wq;
191
192 unsigned char commit_seq;
193 commit_id_t commit_ids[N_COMMIT_IDS];
194
195 unsigned committed_section;
196 unsigned n_committed_sections;
197
198 unsigned uncommitted_section;
199 unsigned n_uncommitted_sections;
200
201 unsigned free_section;
202 unsigned char free_section_entry;
203 unsigned free_sectors;
204
205 unsigned free_sectors_threshold;
206
207 struct workqueue_struct *commit_wq;
208 struct work_struct commit_work;
209
210 struct workqueue_struct *writer_wq;
211 struct work_struct writer_work;
212
213 struct bio_list flush_bio_list;
214
215 unsigned long autocommit_jiffies;
216 struct timer_list autocommit_timer;
217 unsigned autocommit_msec;
218
219 wait_queue_head_t copy_to_journal_wait;
220
221 struct completion crypto_backoff;
222
223 bool journal_uptodate;
224 bool just_formatted;
225
226 struct alg_spec internal_hash_alg;
227 struct alg_spec journal_crypt_alg;
228 struct alg_spec journal_mac_alg;
229
230 atomic64_t number_of_mismatches;
231};
232
233struct dm_integrity_range {
234 sector_t logical_sector;
235 unsigned n_sectors;
236 struct rb_node node;
237};
238
239struct dm_integrity_io {
240 struct work_struct work;
241
242 struct dm_integrity_c *ic;
243 bool write;
244 bool fua;
245
246 struct dm_integrity_range range;
247
248 sector_t metadata_block;
249 unsigned metadata_offset;
250
251 atomic_t in_flight;
252 blk_status_t bi_status;
253
254 struct completion *completion;
255
256 struct gendisk *orig_bi_disk;
257 u8 orig_bi_partno;
258 bio_end_io_t *orig_bi_end_io;
259 struct bio_integrity_payload *orig_bi_integrity;
260 struct bvec_iter orig_bi_iter;
261};
262
263struct journal_completion {
264 struct dm_integrity_c *ic;
265 atomic_t in_flight;
266 struct completion comp;
267};
268
269struct journal_io {
270 struct dm_integrity_range range;
271 struct journal_completion *comp;
272};
273
274static struct kmem_cache *journal_io_cache;
275
276#define JOURNAL_IO_MEMPOOL 32
277
278#ifdef DEBUG_PRINT
279#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
280static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
281{
282 va_list args;
283 va_start(args, msg);
284 vprintk(msg, args);
285 va_end(args);
286 if (len)
287 pr_cont(":");
288 while (len) {
289 pr_cont(" %02x", *bytes);
290 bytes++;
291 len--;
292 }
293 pr_cont("\n");
294}
295#define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
296#else
297#define DEBUG_print(x, ...) do { } while (0)
298#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
299#endif
300
301/*
302 * DM Integrity profile, protection is performed layer above (dm-crypt)
303 */
304static const struct blk_integrity_profile dm_integrity_profile = {
305 .name = "DM-DIF-EXT-TAG",
306 .generate_fn = NULL,
307 .verify_fn = NULL,
308};
309
310static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
311static void integrity_bio_wait(struct work_struct *w);
312static void dm_integrity_dtr(struct dm_target *ti);
313
314static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
315{
316 if (err == -EILSEQ)
317 atomic64_inc(&ic->number_of_mismatches);
318 if (!cmpxchg(&ic->failed, 0, err))
319 DMERR("Error on %s: %d", msg, err);
320}
321
322static int dm_integrity_failed(struct dm_integrity_c *ic)
323{
324 return READ_ONCE(ic->failed);
325}
326
327static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
328 unsigned j, unsigned char seq)
329{
330 /*
331 * Xor the number with section and sector, so that if a piece of
332 * journal is written at wrong place, it is detected.
333 */
334 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
335}
336
337static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
338 sector_t *area, sector_t *offset)
339{
340 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
341
342 *area = data_sector >> log2_interleave_sectors;
343 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
344}
345
346#define sector_to_block(ic, n) \
347do { \
348 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
349 (n) >>= (ic)->sb->log2_sectors_per_block; \
350} while (0)
351
352static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
353 sector_t offset, unsigned *metadata_offset)
354{
355 __u64 ms;
356 unsigned mo;
357
358 ms = area << ic->sb->log2_interleave_sectors;
359 if (likely(ic->log2_metadata_run >= 0))
360 ms += area << ic->log2_metadata_run;
361 else
362 ms += area * ic->metadata_run;
363 ms >>= ic->log2_buffer_sectors;
364
365 sector_to_block(ic, offset);
366
367 if (likely(ic->log2_tag_size >= 0)) {
368 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
369 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
370 } else {
371 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
372 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
373 }
374 *metadata_offset = mo;
375 return ms;
376}
377
378static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
379{
380 sector_t result;
381
382 result = area << ic->sb->log2_interleave_sectors;
383 if (likely(ic->log2_metadata_run >= 0))
384 result += (area + 1) << ic->log2_metadata_run;
385 else
386 result += (area + 1) * ic->metadata_run;
387
388 result += (sector_t)ic->initial_sectors + offset;
389 return result;
390}
391
392static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
393{
394 if (unlikely(*sec_ptr >= ic->journal_sections))
395 *sec_ptr -= ic->journal_sections;
396}
397
398static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
399{
400 struct dm_io_request io_req;
401 struct dm_io_region io_loc;
402
403 io_req.bi_op = op;
404 io_req.bi_op_flags = op_flags;
405 io_req.mem.type = DM_IO_KMEM;
406 io_req.mem.ptr.addr = ic->sb;
407 io_req.notify.fn = NULL;
408 io_req.client = ic->io;
409 io_loc.bdev = ic->dev->bdev;
410 io_loc.sector = ic->start;
411 io_loc.count = SB_SECTORS;
412
413 return dm_io(&io_req, 1, &io_loc, NULL);
414}
415
416static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
417 bool e, const char *function)
418{
419#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
420 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
421
422 if (unlikely(section >= ic->journal_sections) ||
423 unlikely(offset >= limit)) {
424 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
425 function, section, offset, ic->journal_sections, limit);
426 BUG();
427 }
428#endif
429}
430
431static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
432 unsigned *pl_index, unsigned *pl_offset)
433{
434 unsigned sector;
435
436 access_journal_check(ic, section, offset, false, "page_list_location");
437
438 sector = section * ic->journal_section_sectors + offset;
439
440 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
441 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
442}
443
444static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
445 unsigned section, unsigned offset, unsigned *n_sectors)
446{
447 unsigned pl_index, pl_offset;
448 char *va;
449
450 page_list_location(ic, section, offset, &pl_index, &pl_offset);
451
452 if (n_sectors)
453 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
454
455 va = lowmem_page_address(pl[pl_index].page);
456
457 return (struct journal_sector *)(va + pl_offset);
458}
459
460static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
461{
462 return access_page_list(ic, ic->journal, section, offset, NULL);
463}
464
465static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
466{
467 unsigned rel_sector, offset;
468 struct journal_sector *js;
469
470 access_journal_check(ic, section, n, true, "access_journal_entry");
471
472 rel_sector = n % JOURNAL_BLOCK_SECTORS;
473 offset = n / JOURNAL_BLOCK_SECTORS;
474
475 js = access_journal(ic, section, rel_sector);
476 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
477}
478
479static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
480{
481 n <<= ic->sb->log2_sectors_per_block;
482
483 n += JOURNAL_BLOCK_SECTORS;
484
485 access_journal_check(ic, section, n, false, "access_journal_data");
486
487 return access_journal(ic, section, n);
488}
489
490static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
491{
492 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
493 int r;
494 unsigned j, size;
495
496 desc->tfm = ic->journal_mac;
497 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
498
499 r = crypto_shash_init(desc);
500 if (unlikely(r)) {
501 dm_integrity_io_error(ic, "crypto_shash_init", r);
502 goto err;
503 }
504
505 for (j = 0; j < ic->journal_section_entries; j++) {
506 struct journal_entry *je = access_journal_entry(ic, section, j);
507 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
508 if (unlikely(r)) {
509 dm_integrity_io_error(ic, "crypto_shash_update", r);
510 goto err;
511 }
512 }
513
514 size = crypto_shash_digestsize(ic->journal_mac);
515
516 if (likely(size <= JOURNAL_MAC_SIZE)) {
517 r = crypto_shash_final(desc, result);
518 if (unlikely(r)) {
519 dm_integrity_io_error(ic, "crypto_shash_final", r);
520 goto err;
521 }
522 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
523 } else {
524 __u8 digest[size];
525 r = crypto_shash_final(desc, digest);
526 if (unlikely(r)) {
527 dm_integrity_io_error(ic, "crypto_shash_final", r);
528 goto err;
529 }
530 memcpy(result, digest, JOURNAL_MAC_SIZE);
531 }
532
533 return;
534err:
535 memset(result, 0, JOURNAL_MAC_SIZE);
536}
537
538static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
539{
540 __u8 result[JOURNAL_MAC_SIZE];
541 unsigned j;
542
543 if (!ic->journal_mac)
544 return;
545
546 section_mac(ic, section, result);
547
548 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
549 struct journal_sector *js = access_journal(ic, section, j);
550
551 if (likely(wr))
552 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
553 else {
554 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
555 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
556 }
557 }
558}
559
560static void complete_journal_op(void *context)
561{
562 struct journal_completion *comp = context;
563 BUG_ON(!atomic_read(&comp->in_flight));
564 if (likely(atomic_dec_and_test(&comp->in_flight)))
565 complete(&comp->comp);
566}
567
568static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
569 unsigned n_sections, struct journal_completion *comp)
570{
571 struct async_submit_ctl submit;
572 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
573 unsigned pl_index, pl_offset, section_index;
574 struct page_list *source_pl, *target_pl;
575
576 if (likely(encrypt)) {
577 source_pl = ic->journal;
578 target_pl = ic->journal_io;
579 } else {
580 source_pl = ic->journal_io;
581 target_pl = ic->journal;
582 }
583
584 page_list_location(ic, section, 0, &pl_index, &pl_offset);
585
586 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
587
588 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
589
590 section_index = pl_index;
591
592 do {
593 size_t this_step;
594 struct page *src_pages[2];
595 struct page *dst_page;
596
597 while (unlikely(pl_index == section_index)) {
598 unsigned dummy;
599 if (likely(encrypt))
600 rw_section_mac(ic, section, true);
601 section++;
602 n_sections--;
603 if (!n_sections)
604 break;
605 page_list_location(ic, section, 0, §ion_index, &dummy);
606 }
607
608 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
609 dst_page = target_pl[pl_index].page;
610 src_pages[0] = source_pl[pl_index].page;
611 src_pages[1] = ic->journal_xor[pl_index].page;
612
613 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
614
615 pl_index++;
616 pl_offset = 0;
617 n_bytes -= this_step;
618 } while (n_bytes);
619
620 BUG_ON(n_sections);
621
622 async_tx_issue_pending_all();
623}
624
625static void complete_journal_encrypt(struct crypto_async_request *req, int err)
626{
627 struct journal_completion *comp = req->data;
628 if (unlikely(err)) {
629 if (likely(err == -EINPROGRESS)) {
630 complete(&comp->ic->crypto_backoff);
631 return;
632 }
633 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
634 }
635 complete_journal_op(comp);
636}
637
638static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
639{
640 int r;
641 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
642 complete_journal_encrypt, comp);
643 if (likely(encrypt))
644 r = crypto_skcipher_encrypt(req);
645 else
646 r = crypto_skcipher_decrypt(req);
647 if (likely(!r))
648 return false;
649 if (likely(r == -EINPROGRESS))
650 return true;
651 if (likely(r == -EBUSY)) {
652 wait_for_completion(&comp->ic->crypto_backoff);
653 reinit_completion(&comp->ic->crypto_backoff);
654 return true;
655 }
656 dm_integrity_io_error(comp->ic, "encrypt", r);
657 return false;
658}
659
660static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
661 unsigned n_sections, struct journal_completion *comp)
662{
663 struct scatterlist **source_sg;
664 struct scatterlist **target_sg;
665
666 atomic_add(2, &comp->in_flight);
667
668 if (likely(encrypt)) {
669 source_sg = ic->journal_scatterlist;
670 target_sg = ic->journal_io_scatterlist;
671 } else {
672 source_sg = ic->journal_io_scatterlist;
673 target_sg = ic->journal_scatterlist;
674 }
675
676 do {
677 struct skcipher_request *req;
678 unsigned ivsize;
679 char *iv;
680
681 if (likely(encrypt))
682 rw_section_mac(ic, section, true);
683
684 req = ic->sk_requests[section];
685 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
686 iv = req->iv;
687
688 memcpy(iv, iv + ivsize, ivsize);
689
690 req->src = source_sg[section];
691 req->dst = target_sg[section];
692
693 if (unlikely(do_crypt(encrypt, req, comp)))
694 atomic_inc(&comp->in_flight);
695
696 section++;
697 n_sections--;
698 } while (n_sections);
699
700 atomic_dec(&comp->in_flight);
701 complete_journal_op(comp);
702}
703
704static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
705 unsigned n_sections, struct journal_completion *comp)
706{
707 if (ic->journal_xor)
708 return xor_journal(ic, encrypt, section, n_sections, comp);
709 else
710 return crypt_journal(ic, encrypt, section, n_sections, comp);
711}
712
713static void complete_journal_io(unsigned long error, void *context)
714{
715 struct journal_completion *comp = context;
716 if (unlikely(error != 0))
717 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
718 complete_journal_op(comp);
719}
720
721static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
722 unsigned n_sections, struct journal_completion *comp)
723{
724 struct dm_io_request io_req;
725 struct dm_io_region io_loc;
726 unsigned sector, n_sectors, pl_index, pl_offset;
727 int r;
728
729 if (unlikely(dm_integrity_failed(ic))) {
730 if (comp)
731 complete_journal_io(-1UL, comp);
732 return;
733 }
734
735 sector = section * ic->journal_section_sectors;
736 n_sectors = n_sections * ic->journal_section_sectors;
737
738 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
739 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
740
741 io_req.bi_op = op;
742 io_req.bi_op_flags = op_flags;
743 io_req.mem.type = DM_IO_PAGE_LIST;
744 if (ic->journal_io)
745 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
746 else
747 io_req.mem.ptr.pl = &ic->journal[pl_index];
748 io_req.mem.offset = pl_offset;
749 if (likely(comp != NULL)) {
750 io_req.notify.fn = complete_journal_io;
751 io_req.notify.context = comp;
752 } else {
753 io_req.notify.fn = NULL;
754 }
755 io_req.client = ic->io;
756 io_loc.bdev = ic->dev->bdev;
757 io_loc.sector = ic->start + SB_SECTORS + sector;
758 io_loc.count = n_sectors;
759
760 r = dm_io(&io_req, 1, &io_loc, NULL);
761 if (unlikely(r)) {
762 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
763 if (comp) {
764 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
765 complete_journal_io(-1UL, comp);
766 }
767 }
768}
769
770static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
771{
772 struct journal_completion io_comp;
773 struct journal_completion crypt_comp_1;
774 struct journal_completion crypt_comp_2;
775 unsigned i;
776
777 io_comp.ic = ic;
778 init_completion(&io_comp.comp);
779
780 if (commit_start + commit_sections <= ic->journal_sections) {
781 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
782 if (ic->journal_io) {
783 crypt_comp_1.ic = ic;
784 init_completion(&crypt_comp_1.comp);
785 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
786 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
787 wait_for_completion_io(&crypt_comp_1.comp);
788 } else {
789 for (i = 0; i < commit_sections; i++)
790 rw_section_mac(ic, commit_start + i, true);
791 }
792 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
793 commit_sections, &io_comp);
794 } else {
795 unsigned to_end;
796 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
797 to_end = ic->journal_sections - commit_start;
798 if (ic->journal_io) {
799 crypt_comp_1.ic = ic;
800 init_completion(&crypt_comp_1.comp);
801 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
802 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
803 if (try_wait_for_completion(&crypt_comp_1.comp)) {
804 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
805 reinit_completion(&crypt_comp_1.comp);
806 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
807 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
808 wait_for_completion_io(&crypt_comp_1.comp);
809 } else {
810 crypt_comp_2.ic = ic;
811 init_completion(&crypt_comp_2.comp);
812 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
813 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
814 wait_for_completion_io(&crypt_comp_1.comp);
815 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
816 wait_for_completion_io(&crypt_comp_2.comp);
817 }
818 } else {
819 for (i = 0; i < to_end; i++)
820 rw_section_mac(ic, commit_start + i, true);
821 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
822 for (i = 0; i < commit_sections - to_end; i++)
823 rw_section_mac(ic, i, true);
824 }
825 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
826 }
827
828 wait_for_completion_io(&io_comp.comp);
829}
830
831static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
832 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
833{
834 struct dm_io_request io_req;
835 struct dm_io_region io_loc;
836 int r;
837 unsigned sector, pl_index, pl_offset;
838
839 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
840
841 if (unlikely(dm_integrity_failed(ic))) {
842 fn(-1UL, data);
843 return;
844 }
845
846 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
847
848 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
849 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
850
851 io_req.bi_op = REQ_OP_WRITE;
852 io_req.bi_op_flags = 0;
853 io_req.mem.type = DM_IO_PAGE_LIST;
854 io_req.mem.ptr.pl = &ic->journal[pl_index];
855 io_req.mem.offset = pl_offset;
856 io_req.notify.fn = fn;
857 io_req.notify.context = data;
858 io_req.client = ic->io;
859 io_loc.bdev = ic->dev->bdev;
860 io_loc.sector = ic->start + target;
861 io_loc.count = n_sectors;
862
863 r = dm_io(&io_req, 1, &io_loc, NULL);
864 if (unlikely(r)) {
865 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
866 fn(-1UL, data);
867 }
868}
869
870static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
871{
872 struct rb_node **n = &ic->in_progress.rb_node;
873 struct rb_node *parent;
874
875 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
876
877 parent = NULL;
878
879 while (*n) {
880 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
881
882 parent = *n;
883 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
884 n = &range->node.rb_left;
885 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
886 n = &range->node.rb_right;
887 } else {
888 return false;
889 }
890 }
891
892 rb_link_node(&new_range->node, parent, n);
893 rb_insert_color(&new_range->node, &ic->in_progress);
894
895 return true;
896}
897
898static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
899{
900 rb_erase(&range->node, &ic->in_progress);
901 wake_up_locked(&ic->endio_wait);
902}
903
904static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
905{
906 unsigned long flags;
907
908 spin_lock_irqsave(&ic->endio_wait.lock, flags);
909 remove_range_unlocked(ic, range);
910 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
911}
912
913static void init_journal_node(struct journal_node *node)
914{
915 RB_CLEAR_NODE(&node->node);
916 node->sector = (sector_t)-1;
917}
918
919static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
920{
921 struct rb_node **link;
922 struct rb_node *parent;
923
924 node->sector = sector;
925 BUG_ON(!RB_EMPTY_NODE(&node->node));
926
927 link = &ic->journal_tree_root.rb_node;
928 parent = NULL;
929
930 while (*link) {
931 struct journal_node *j;
932 parent = *link;
933 j = container_of(parent, struct journal_node, node);
934 if (sector < j->sector)
935 link = &j->node.rb_left;
936 else
937 link = &j->node.rb_right;
938 }
939
940 rb_link_node(&node->node, parent, link);
941 rb_insert_color(&node->node, &ic->journal_tree_root);
942}
943
944static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
945{
946 BUG_ON(RB_EMPTY_NODE(&node->node));
947 rb_erase(&node->node, &ic->journal_tree_root);
948 init_journal_node(node);
949}
950
951#define NOT_FOUND (-1U)
952
953static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
954{
955 struct rb_node *n = ic->journal_tree_root.rb_node;
956 unsigned found = NOT_FOUND;
957 *next_sector = (sector_t)-1;
958 while (n) {
959 struct journal_node *j = container_of(n, struct journal_node, node);
960 if (sector == j->sector) {
961 found = j - ic->journal_tree;
962 }
963 if (sector < j->sector) {
964 *next_sector = j->sector;
965 n = j->node.rb_left;
966 } else {
967 n = j->node.rb_right;
968 }
969 }
970
971 return found;
972}
973
974static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
975{
976 struct journal_node *node, *next_node;
977 struct rb_node *next;
978
979 if (unlikely(pos >= ic->journal_entries))
980 return false;
981 node = &ic->journal_tree[pos];
982 if (unlikely(RB_EMPTY_NODE(&node->node)))
983 return false;
984 if (unlikely(node->sector != sector))
985 return false;
986
987 next = rb_next(&node->node);
988 if (unlikely(!next))
989 return true;
990
991 next_node = container_of(next, struct journal_node, node);
992 return next_node->sector != sector;
993}
994
995static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
996{
997 struct rb_node *next;
998 struct journal_node *next_node;
999 unsigned next_section;
1000
1001 BUG_ON(RB_EMPTY_NODE(&node->node));
1002
1003 next = rb_next(&node->node);
1004 if (unlikely(!next))
1005 return false;
1006
1007 next_node = container_of(next, struct journal_node, node);
1008
1009 if (next_node->sector != node->sector)
1010 return false;
1011
1012 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1013 if (next_section >= ic->committed_section &&
1014 next_section < ic->committed_section + ic->n_committed_sections)
1015 return true;
1016 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1017 return true;
1018
1019 return false;
1020}
1021
1022#define TAG_READ 0
1023#define TAG_WRITE 1
1024#define TAG_CMP 2
1025
1026static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1027 unsigned *metadata_offset, unsigned total_size, int op)
1028{
1029 do {
1030 unsigned char *data, *dp;
1031 struct dm_buffer *b;
1032 unsigned to_copy;
1033 int r;
1034
1035 r = dm_integrity_failed(ic);
1036 if (unlikely(r))
1037 return r;
1038
1039 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1040 if (unlikely(IS_ERR(data)))
1041 return PTR_ERR(data);
1042
1043 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1044 dp = data + *metadata_offset;
1045 if (op == TAG_READ) {
1046 memcpy(tag, dp, to_copy);
1047 } else if (op == TAG_WRITE) {
1048 memcpy(dp, tag, to_copy);
1049 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1050 } else {
1051 /* e.g.: op == TAG_CMP */
1052 if (unlikely(memcmp(dp, tag, to_copy))) {
1053 unsigned i;
1054
1055 for (i = 0; i < to_copy; i++) {
1056 if (dp[i] != tag[i])
1057 break;
1058 total_size--;
1059 }
1060 dm_bufio_release(b);
1061 return total_size;
1062 }
1063 }
1064 dm_bufio_release(b);
1065
1066 tag += to_copy;
1067 *metadata_offset += to_copy;
1068 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1069 (*metadata_block)++;
1070 *metadata_offset = 0;
1071 }
1072 total_size -= to_copy;
1073 } while (unlikely(total_size));
1074
1075 return 0;
1076}
1077
1078static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1079{
1080 int r;
1081 r = dm_bufio_write_dirty_buffers(ic->bufio);
1082 if (unlikely(r))
1083 dm_integrity_io_error(ic, "writing tags", r);
1084}
1085
1086static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1087{
1088 DECLARE_WAITQUEUE(wait, current);
1089 __add_wait_queue(&ic->endio_wait, &wait);
1090 __set_current_state(TASK_UNINTERRUPTIBLE);
1091 spin_unlock_irq(&ic->endio_wait.lock);
1092 io_schedule();
1093 spin_lock_irq(&ic->endio_wait.lock);
1094 __remove_wait_queue(&ic->endio_wait, &wait);
1095}
1096
1097static void autocommit_fn(struct timer_list *t)
1098{
1099 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1100
1101 if (likely(!dm_integrity_failed(ic)))
1102 queue_work(ic->commit_wq, &ic->commit_work);
1103}
1104
1105static void schedule_autocommit(struct dm_integrity_c *ic)
1106{
1107 if (!timer_pending(&ic->autocommit_timer))
1108 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1109}
1110
1111static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1112{
1113 struct bio *bio;
1114 unsigned long flags;
1115
1116 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1117 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1118 bio_list_add(&ic->flush_bio_list, bio);
1119 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1120
1121 queue_work(ic->commit_wq, &ic->commit_work);
1122}
1123
1124static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1125{
1126 int r = dm_integrity_failed(ic);
1127 if (unlikely(r) && !bio->bi_status)
1128 bio->bi_status = errno_to_blk_status(r);
1129 bio_endio(bio);
1130}
1131
1132static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1133{
1134 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1135
1136 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1137 submit_flush_bio(ic, dio);
1138 else
1139 do_endio(ic, bio);
1140}
1141
1142static void dec_in_flight(struct dm_integrity_io *dio)
1143{
1144 if (atomic_dec_and_test(&dio->in_flight)) {
1145 struct dm_integrity_c *ic = dio->ic;
1146 struct bio *bio;
1147
1148 remove_range(ic, &dio->range);
1149
1150 if (unlikely(dio->write))
1151 schedule_autocommit(ic);
1152
1153 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1154
1155 if (unlikely(dio->bi_status) && !bio->bi_status)
1156 bio->bi_status = dio->bi_status;
1157 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1158 dio->range.logical_sector += dio->range.n_sectors;
1159 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1160 INIT_WORK(&dio->work, integrity_bio_wait);
1161 queue_work(ic->wait_wq, &dio->work);
1162 return;
1163 }
1164 do_endio_flush(ic, dio);
1165 }
1166}
1167
1168static void integrity_end_io(struct bio *bio)
1169{
1170 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1171
1172 bio->bi_iter = dio->orig_bi_iter;
1173 bio->bi_disk = dio->orig_bi_disk;
1174 bio->bi_partno = dio->orig_bi_partno;
1175 if (dio->orig_bi_integrity) {
1176 bio->bi_integrity = dio->orig_bi_integrity;
1177 bio->bi_opf |= REQ_INTEGRITY;
1178 }
1179 bio->bi_end_io = dio->orig_bi_end_io;
1180
1181 if (dio->completion)
1182 complete(dio->completion);
1183
1184 dec_in_flight(dio);
1185}
1186
1187static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1188 const char *data, char *result)
1189{
1190 __u64 sector_le = cpu_to_le64(sector);
1191 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1192 int r;
1193 unsigned digest_size;
1194
1195 req->tfm = ic->internal_hash;
1196 req->flags = 0;
1197
1198 r = crypto_shash_init(req);
1199 if (unlikely(r < 0)) {
1200 dm_integrity_io_error(ic, "crypto_shash_init", r);
1201 goto failed;
1202 }
1203
1204 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1205 if (unlikely(r < 0)) {
1206 dm_integrity_io_error(ic, "crypto_shash_update", r);
1207 goto failed;
1208 }
1209
1210 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1211 if (unlikely(r < 0)) {
1212 dm_integrity_io_error(ic, "crypto_shash_update", r);
1213 goto failed;
1214 }
1215
1216 r = crypto_shash_final(req, result);
1217 if (unlikely(r < 0)) {
1218 dm_integrity_io_error(ic, "crypto_shash_final", r);
1219 goto failed;
1220 }
1221
1222 digest_size = crypto_shash_digestsize(ic->internal_hash);
1223 if (unlikely(digest_size < ic->tag_size))
1224 memset(result + digest_size, 0, ic->tag_size - digest_size);
1225
1226 return;
1227
1228failed:
1229 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1230 get_random_bytes(result, ic->tag_size);
1231}
1232
1233static void integrity_metadata(struct work_struct *w)
1234{
1235 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1236 struct dm_integrity_c *ic = dio->ic;
1237
1238 int r;
1239
1240 if (ic->internal_hash) {
1241 struct bvec_iter iter;
1242 struct bio_vec bv;
1243 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1244 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1245 char *checksums;
1246 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1247 char checksums_onstack[ic->tag_size + extra_space];
1248 unsigned sectors_to_process = dio->range.n_sectors;
1249 sector_t sector = dio->range.logical_sector;
1250
1251 if (unlikely(ic->mode == 'R'))
1252 goto skip_io;
1253
1254 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1255 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1256 if (!checksums)
1257 checksums = checksums_onstack;
1258
1259 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1260 unsigned pos;
1261 char *mem, *checksums_ptr;
1262
1263again:
1264 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1265 pos = 0;
1266 checksums_ptr = checksums;
1267 do {
1268 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1269 checksums_ptr += ic->tag_size;
1270 sectors_to_process -= ic->sectors_per_block;
1271 pos += ic->sectors_per_block << SECTOR_SHIFT;
1272 sector += ic->sectors_per_block;
1273 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1274 kunmap_atomic(mem);
1275
1276 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1277 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1278 if (unlikely(r)) {
1279 if (r > 0) {
1280 DMERR("Checksum failed at sector 0x%llx",
1281 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1282 r = -EILSEQ;
1283 atomic64_inc(&ic->number_of_mismatches);
1284 }
1285 if (likely(checksums != checksums_onstack))
1286 kfree(checksums);
1287 goto error;
1288 }
1289
1290 if (!sectors_to_process)
1291 break;
1292
1293 if (unlikely(pos < bv.bv_len)) {
1294 bv.bv_offset += pos;
1295 bv.bv_len -= pos;
1296 goto again;
1297 }
1298 }
1299
1300 if (likely(checksums != checksums_onstack))
1301 kfree(checksums);
1302 } else {
1303 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1304
1305 if (bip) {
1306 struct bio_vec biv;
1307 struct bvec_iter iter;
1308 unsigned data_to_process = dio->range.n_sectors;
1309 sector_to_block(ic, data_to_process);
1310 data_to_process *= ic->tag_size;
1311
1312 bip_for_each_vec(biv, bip, iter) {
1313 unsigned char *tag;
1314 unsigned this_len;
1315
1316 BUG_ON(PageHighMem(biv.bv_page));
1317 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1318 this_len = min(biv.bv_len, data_to_process);
1319 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1320 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1321 if (unlikely(r))
1322 goto error;
1323 data_to_process -= this_len;
1324 if (!data_to_process)
1325 break;
1326 }
1327 }
1328 }
1329skip_io:
1330 dec_in_flight(dio);
1331 return;
1332error:
1333 dio->bi_status = errno_to_blk_status(r);
1334 dec_in_flight(dio);
1335}
1336
1337static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1338{
1339 struct dm_integrity_c *ic = ti->private;
1340 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1341 struct bio_integrity_payload *bip;
1342
1343 sector_t area, offset;
1344
1345 dio->ic = ic;
1346 dio->bi_status = 0;
1347
1348 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1349 submit_flush_bio(ic, dio);
1350 return DM_MAPIO_SUBMITTED;
1351 }
1352
1353 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1354 dio->write = bio_op(bio) == REQ_OP_WRITE;
1355 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1356 if (unlikely(dio->fua)) {
1357 /*
1358 * Don't pass down the FUA flag because we have to flush
1359 * disk cache anyway.
1360 */
1361 bio->bi_opf &= ~REQ_FUA;
1362 }
1363 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1364 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1365 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1366 (unsigned long long)ic->provided_data_sectors);
1367 return DM_MAPIO_KILL;
1368 }
1369 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1370 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1371 ic->sectors_per_block,
1372 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1373 return DM_MAPIO_KILL;
1374 }
1375
1376 if (ic->sectors_per_block > 1) {
1377 struct bvec_iter iter;
1378 struct bio_vec bv;
1379 bio_for_each_segment(bv, bio, iter) {
1380 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1381 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1382 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1383 return DM_MAPIO_KILL;
1384 }
1385 }
1386 }
1387
1388 bip = bio_integrity(bio);
1389 if (!ic->internal_hash) {
1390 if (bip) {
1391 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1392 if (ic->log2_tag_size >= 0)
1393 wanted_tag_size <<= ic->log2_tag_size;
1394 else
1395 wanted_tag_size *= ic->tag_size;
1396 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1397 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
1398 return DM_MAPIO_KILL;
1399 }
1400 }
1401 } else {
1402 if (unlikely(bip != NULL)) {
1403 DMERR("Unexpected integrity data when using internal hash");
1404 return DM_MAPIO_KILL;
1405 }
1406 }
1407
1408 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1409 return DM_MAPIO_KILL;
1410
1411 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1412 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1413 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1414
1415 dm_integrity_map_continue(dio, true);
1416 return DM_MAPIO_SUBMITTED;
1417}
1418
1419static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1420 unsigned journal_section, unsigned journal_entry)
1421{
1422 struct dm_integrity_c *ic = dio->ic;
1423 sector_t logical_sector;
1424 unsigned n_sectors;
1425
1426 logical_sector = dio->range.logical_sector;
1427 n_sectors = dio->range.n_sectors;
1428 do {
1429 struct bio_vec bv = bio_iovec(bio);
1430 char *mem;
1431
1432 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1433 bv.bv_len = n_sectors << SECTOR_SHIFT;
1434 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1435 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1436retry_kmap:
1437 mem = kmap_atomic(bv.bv_page);
1438 if (likely(dio->write))
1439 flush_dcache_page(bv.bv_page);
1440
1441 do {
1442 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1443
1444 if (unlikely(!dio->write)) {
1445 struct journal_sector *js;
1446 char *mem_ptr;
1447 unsigned s;
1448
1449 if (unlikely(journal_entry_is_inprogress(je))) {
1450 flush_dcache_page(bv.bv_page);
1451 kunmap_atomic(mem);
1452
1453 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1454 goto retry_kmap;
1455 }
1456 smp_rmb();
1457 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1458 js = access_journal_data(ic, journal_section, journal_entry);
1459 mem_ptr = mem + bv.bv_offset;
1460 s = 0;
1461 do {
1462 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1463 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1464 js++;
1465 mem_ptr += 1 << SECTOR_SHIFT;
1466 } while (++s < ic->sectors_per_block);
1467#ifdef INTERNAL_VERIFY
1468 if (ic->internal_hash) {
1469 char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1470
1471 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1472 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1473 DMERR("Checksum failed when reading from journal, at sector 0x%llx",
1474 (unsigned long long)logical_sector);
1475 }
1476 }
1477#endif
1478 }
1479
1480 if (!ic->internal_hash) {
1481 struct bio_integrity_payload *bip = bio_integrity(bio);
1482 unsigned tag_todo = ic->tag_size;
1483 char *tag_ptr = journal_entry_tag(ic, je);
1484
1485 if (bip) do {
1486 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1487 unsigned tag_now = min(biv.bv_len, tag_todo);
1488 char *tag_addr;
1489 BUG_ON(PageHighMem(biv.bv_page));
1490 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1491 if (likely(dio->write))
1492 memcpy(tag_ptr, tag_addr, tag_now);
1493 else
1494 memcpy(tag_addr, tag_ptr, tag_now);
1495 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1496 tag_ptr += tag_now;
1497 tag_todo -= tag_now;
1498 } while (unlikely(tag_todo)); else {
1499 if (likely(dio->write))
1500 memset(tag_ptr, 0, tag_todo);
1501 }
1502 }
1503
1504 if (likely(dio->write)) {
1505 struct journal_sector *js;
1506 unsigned s;
1507
1508 js = access_journal_data(ic, journal_section, journal_entry);
1509 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1510
1511 s = 0;
1512 do {
1513 je->last_bytes[s] = js[s].commit_id;
1514 } while (++s < ic->sectors_per_block);
1515
1516 if (ic->internal_hash) {
1517 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1518 if (unlikely(digest_size > ic->tag_size)) {
1519 char checksums_onstack[digest_size];
1520 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1521 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1522 } else
1523 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1524 }
1525
1526 journal_entry_set_sector(je, logical_sector);
1527 }
1528 logical_sector += ic->sectors_per_block;
1529
1530 journal_entry++;
1531 if (unlikely(journal_entry == ic->journal_section_entries)) {
1532 journal_entry = 0;
1533 journal_section++;
1534 wraparound_section(ic, &journal_section);
1535 }
1536
1537 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1538 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1539
1540 if (unlikely(!dio->write))
1541 flush_dcache_page(bv.bv_page);
1542 kunmap_atomic(mem);
1543 } while (n_sectors);
1544
1545 if (likely(dio->write)) {
1546 smp_mb();
1547 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1548 wake_up(&ic->copy_to_journal_wait);
1549 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1550 queue_work(ic->commit_wq, &ic->commit_work);
1551 } else {
1552 schedule_autocommit(ic);
1553 }
1554 } else {
1555 remove_range(ic, &dio->range);
1556 }
1557
1558 if (unlikely(bio->bi_iter.bi_size)) {
1559 sector_t area, offset;
1560
1561 dio->range.logical_sector = logical_sector;
1562 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1563 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1564 return true;
1565 }
1566
1567 return false;
1568}
1569
1570static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1571{
1572 struct dm_integrity_c *ic = dio->ic;
1573 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1574 unsigned journal_section, journal_entry;
1575 unsigned journal_read_pos;
1576 struct completion read_comp;
1577 bool need_sync_io = ic->internal_hash && !dio->write;
1578
1579 if (need_sync_io && from_map) {
1580 INIT_WORK(&dio->work, integrity_bio_wait);
1581 queue_work(ic->metadata_wq, &dio->work);
1582 return;
1583 }
1584
1585lock_retry:
1586 spin_lock_irq(&ic->endio_wait.lock);
1587retry:
1588 if (unlikely(dm_integrity_failed(ic))) {
1589 spin_unlock_irq(&ic->endio_wait.lock);
1590 do_endio(ic, bio);
1591 return;
1592 }
1593 dio->range.n_sectors = bio_sectors(bio);
1594 journal_read_pos = NOT_FOUND;
1595 if (likely(ic->mode == 'J')) {
1596 if (dio->write) {
1597 unsigned next_entry, i, pos;
1598 unsigned ws, we, range_sectors;
1599
1600 dio->range.n_sectors = min(dio->range.n_sectors,
1601 ic->free_sectors << ic->sb->log2_sectors_per_block);
1602 if (unlikely(!dio->range.n_sectors))
1603 goto sleep;
1604 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1605 ic->free_sectors -= range_sectors;
1606 journal_section = ic->free_section;
1607 journal_entry = ic->free_section_entry;
1608
1609 next_entry = ic->free_section_entry + range_sectors;
1610 ic->free_section_entry = next_entry % ic->journal_section_entries;
1611 ic->free_section += next_entry / ic->journal_section_entries;
1612 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1613 wraparound_section(ic, &ic->free_section);
1614
1615 pos = journal_section * ic->journal_section_entries + journal_entry;
1616 ws = journal_section;
1617 we = journal_entry;
1618 i = 0;
1619 do {
1620 struct journal_entry *je;
1621
1622 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1623 pos++;
1624 if (unlikely(pos >= ic->journal_entries))
1625 pos = 0;
1626
1627 je = access_journal_entry(ic, ws, we);
1628 BUG_ON(!journal_entry_is_unused(je));
1629 journal_entry_set_inprogress(je);
1630 we++;
1631 if (unlikely(we == ic->journal_section_entries)) {
1632 we = 0;
1633 ws++;
1634 wraparound_section(ic, &ws);
1635 }
1636 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
1637
1638 spin_unlock_irq(&ic->endio_wait.lock);
1639 goto journal_read_write;
1640 } else {
1641 sector_t next_sector;
1642 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1643 if (likely(journal_read_pos == NOT_FOUND)) {
1644 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1645 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1646 } else {
1647 unsigned i;
1648 unsigned jp = journal_read_pos + 1;
1649 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1650 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
1651 break;
1652 }
1653 dio->range.n_sectors = i;
1654 }
1655 }
1656 }
1657 if (unlikely(!add_new_range(ic, &dio->range))) {
1658 /*
1659 * We must not sleep in the request routine because it could
1660 * stall bios on current->bio_list.
1661 * So, we offload the bio to a workqueue if we have to sleep.
1662 */
1663sleep:
1664 if (from_map) {
1665 spin_unlock_irq(&ic->endio_wait.lock);
1666 INIT_WORK(&dio->work, integrity_bio_wait);
1667 queue_work(ic->wait_wq, &dio->work);
1668 return;
1669 } else {
1670 sleep_on_endio_wait(ic);
1671 goto retry;
1672 }
1673 }
1674 spin_unlock_irq(&ic->endio_wait.lock);
1675
1676 if (unlikely(journal_read_pos != NOT_FOUND)) {
1677 journal_section = journal_read_pos / ic->journal_section_entries;
1678 journal_entry = journal_read_pos % ic->journal_section_entries;
1679 goto journal_read_write;
1680 }
1681
1682 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1683
1684 if (need_sync_io) {
1685 init_completion(&read_comp);
1686 dio->completion = &read_comp;
1687 } else
1688 dio->completion = NULL;
1689
1690 dio->orig_bi_iter = bio->bi_iter;
1691
1692 dio->orig_bi_disk = bio->bi_disk;
1693 dio->orig_bi_partno = bio->bi_partno;
1694 bio_set_dev(bio, ic->dev->bdev);
1695
1696 dio->orig_bi_integrity = bio_integrity(bio);
1697 bio->bi_integrity = NULL;
1698 bio->bi_opf &= ~REQ_INTEGRITY;
1699
1700 dio->orig_bi_end_io = bio->bi_end_io;
1701 bio->bi_end_io = integrity_end_io;
1702
1703 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1704 bio->bi_iter.bi_sector += ic->start;
1705 generic_make_request(bio);
1706
1707 if (need_sync_io) {
1708 wait_for_completion_io(&read_comp);
1709 if (likely(!bio->bi_status))
1710 integrity_metadata(&dio->work);
1711 else
1712 dec_in_flight(dio);
1713
1714 } else {
1715 INIT_WORK(&dio->work, integrity_metadata);
1716 queue_work(ic->metadata_wq, &dio->work);
1717 }
1718
1719 return;
1720
1721journal_read_write:
1722 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1723 goto lock_retry;
1724
1725 do_endio_flush(ic, dio);
1726}
1727
1728
1729static void integrity_bio_wait(struct work_struct *w)
1730{
1731 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1732
1733 dm_integrity_map_continue(dio, false);
1734}
1735
1736static void pad_uncommitted(struct dm_integrity_c *ic)
1737{
1738 if (ic->free_section_entry) {
1739 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1740 ic->free_section_entry = 0;
1741 ic->free_section++;
1742 wraparound_section(ic, &ic->free_section);
1743 ic->n_uncommitted_sections++;
1744 }
1745 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1746 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
1747}
1748
1749static void integrity_commit(struct work_struct *w)
1750{
1751 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1752 unsigned commit_start, commit_sections;
1753 unsigned i, j, n;
1754 struct bio *flushes;
1755
1756 del_timer(&ic->autocommit_timer);
1757
1758 spin_lock_irq(&ic->endio_wait.lock);
1759 flushes = bio_list_get(&ic->flush_bio_list);
1760 if (unlikely(ic->mode != 'J')) {
1761 spin_unlock_irq(&ic->endio_wait.lock);
1762 dm_integrity_flush_buffers(ic);
1763 goto release_flush_bios;
1764 }
1765
1766 pad_uncommitted(ic);
1767 commit_start = ic->uncommitted_section;
1768 commit_sections = ic->n_uncommitted_sections;
1769 spin_unlock_irq(&ic->endio_wait.lock);
1770
1771 if (!commit_sections)
1772 goto release_flush_bios;
1773
1774 i = commit_start;
1775 for (n = 0; n < commit_sections; n++) {
1776 for (j = 0; j < ic->journal_section_entries; j++) {
1777 struct journal_entry *je;
1778 je = access_journal_entry(ic, i, j);
1779 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1780 }
1781 for (j = 0; j < ic->journal_section_sectors; j++) {
1782 struct journal_sector *js;
1783 js = access_journal(ic, i, j);
1784 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1785 }
1786 i++;
1787 if (unlikely(i >= ic->journal_sections))
1788 ic->commit_seq = next_commit_seq(ic->commit_seq);
1789 wraparound_section(ic, &i);
1790 }
1791 smp_rmb();
1792
1793 write_journal(ic, commit_start, commit_sections);
1794
1795 spin_lock_irq(&ic->endio_wait.lock);
1796 ic->uncommitted_section += commit_sections;
1797 wraparound_section(ic, &ic->uncommitted_section);
1798 ic->n_uncommitted_sections -= commit_sections;
1799 ic->n_committed_sections += commit_sections;
1800 spin_unlock_irq(&ic->endio_wait.lock);
1801
1802 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1803 queue_work(ic->writer_wq, &ic->writer_work);
1804
1805release_flush_bios:
1806 while (flushes) {
1807 struct bio *next = flushes->bi_next;
1808 flushes->bi_next = NULL;
1809 do_endio(ic, flushes);
1810 flushes = next;
1811 }
1812}
1813
1814static void complete_copy_from_journal(unsigned long error, void *context)
1815{
1816 struct journal_io *io = context;
1817 struct journal_completion *comp = io->comp;
1818 struct dm_integrity_c *ic = comp->ic;
1819 remove_range(ic, &io->range);
1820 mempool_free(io, ic->journal_io_mempool);
1821 if (unlikely(error != 0))
1822 dm_integrity_io_error(ic, "copying from journal", -EIO);
1823 complete_journal_op(comp);
1824}
1825
1826static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1827 struct journal_entry *je)
1828{
1829 unsigned s = 0;
1830 do {
1831 js->commit_id = je->last_bytes[s];
1832 js++;
1833 } while (++s < ic->sectors_per_block);
1834}
1835
1836static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1837 unsigned write_sections, bool from_replay)
1838{
1839 unsigned i, j, n;
1840 struct journal_completion comp;
1841 struct blk_plug plug;
1842
1843 blk_start_plug(&plug);
1844
1845 comp.ic = ic;
1846 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1847 init_completion(&comp.comp);
1848
1849 i = write_start;
1850 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1851#ifndef INTERNAL_VERIFY
1852 if (unlikely(from_replay))
1853#endif
1854 rw_section_mac(ic, i, false);
1855 for (j = 0; j < ic->journal_section_entries; j++) {
1856 struct journal_entry *je = access_journal_entry(ic, i, j);
1857 sector_t sec, area, offset;
1858 unsigned k, l, next_loop;
1859 sector_t metadata_block;
1860 unsigned metadata_offset;
1861 struct journal_io *io;
1862
1863 if (journal_entry_is_unused(je))
1864 continue;
1865 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1866 sec = journal_entry_get_sector(je);
1867 if (unlikely(from_replay)) {
1868 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1869 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1870 sec &= ~(sector_t)(ic->sectors_per_block - 1);
1871 }
1872 }
1873 get_area_and_offset(ic, sec, &area, &offset);
1874 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
1875 for (k = j + 1; k < ic->journal_section_entries; k++) {
1876 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1877 sector_t sec2, area2, offset2;
1878 if (journal_entry_is_unused(je2))
1879 break;
1880 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1881 sec2 = journal_entry_get_sector(je2);
1882 get_area_and_offset(ic, sec2, &area2, &offset2);
1883 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
1884 break;
1885 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
1886 }
1887 next_loop = k - 1;
1888
1889 io = mempool_alloc(ic->journal_io_mempool, GFP_NOIO);
1890 io->comp = ∁
1891 io->range.logical_sector = sec;
1892 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
1893
1894 spin_lock_irq(&ic->endio_wait.lock);
1895 while (unlikely(!add_new_range(ic, &io->range)))
1896 sleep_on_endio_wait(ic);
1897
1898 if (likely(!from_replay)) {
1899 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
1900
1901 /* don't write if there is newer committed sector */
1902 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
1903 struct journal_entry *je2 = access_journal_entry(ic, i, j);
1904
1905 journal_entry_set_unused(je2);
1906 remove_journal_node(ic, §ion_node[j]);
1907 j++;
1908 sec += ic->sectors_per_block;
1909 offset += ic->sectors_per_block;
1910 }
1911 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
1912 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
1913
1914 journal_entry_set_unused(je2);
1915 remove_journal_node(ic, §ion_node[k - 1]);
1916 k--;
1917 }
1918 if (j == k) {
1919 remove_range_unlocked(ic, &io->range);
1920 spin_unlock_irq(&ic->endio_wait.lock);
1921 mempool_free(io, ic->journal_io_mempool);
1922 goto skip_io;
1923 }
1924 for (l = j; l < k; l++) {
1925 remove_journal_node(ic, §ion_node[l]);
1926 }
1927 }
1928 spin_unlock_irq(&ic->endio_wait.lock);
1929
1930 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
1931 for (l = j; l < k; l++) {
1932 int r;
1933 struct journal_entry *je2 = access_journal_entry(ic, i, l);
1934
1935 if (
1936#ifndef INTERNAL_VERIFY
1937 unlikely(from_replay) &&
1938#endif
1939 ic->internal_hash) {
1940 char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
1941
1942 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
1943 (char *)access_journal_data(ic, i, l), test_tag);
1944 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
1945 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
1946 }
1947
1948 journal_entry_set_unused(je2);
1949 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
1950 ic->tag_size, TAG_WRITE);
1951 if (unlikely(r)) {
1952 dm_integrity_io_error(ic, "reading tags", r);
1953 }
1954 }
1955
1956 atomic_inc(&comp.in_flight);
1957 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
1958 (k - j) << ic->sb->log2_sectors_per_block,
1959 get_data_sector(ic, area, offset),
1960 complete_copy_from_journal, io);
1961skip_io:
1962 j = next_loop;
1963 }
1964 }
1965
1966 dm_bufio_write_dirty_buffers_async(ic->bufio);
1967
1968 blk_finish_plug(&plug);
1969
1970 complete_journal_op(&comp);
1971 wait_for_completion_io(&comp.comp);
1972
1973 dm_integrity_flush_buffers(ic);
1974}
1975
1976static void integrity_writer(struct work_struct *w)
1977{
1978 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
1979 unsigned write_start, write_sections;
1980
1981 unsigned prev_free_sectors;
1982
1983 /* the following test is not needed, but it tests the replay code */
1984 if (READ_ONCE(ic->suspending))
1985 return;
1986
1987 spin_lock_irq(&ic->endio_wait.lock);
1988 write_start = ic->committed_section;
1989 write_sections = ic->n_committed_sections;
1990 spin_unlock_irq(&ic->endio_wait.lock);
1991
1992 if (!write_sections)
1993 return;
1994
1995 do_journal_write(ic, write_start, write_sections, false);
1996
1997 spin_lock_irq(&ic->endio_wait.lock);
1998
1999 ic->committed_section += write_sections;
2000 wraparound_section(ic, &ic->committed_section);
2001 ic->n_committed_sections -= write_sections;
2002
2003 prev_free_sectors = ic->free_sectors;
2004 ic->free_sectors += write_sections * ic->journal_section_entries;
2005 if (unlikely(!prev_free_sectors))
2006 wake_up_locked(&ic->endio_wait);
2007
2008 spin_unlock_irq(&ic->endio_wait.lock);
2009}
2010
2011static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2012 unsigned n_sections, unsigned char commit_seq)
2013{
2014 unsigned i, j, n;
2015
2016 if (!n_sections)
2017 return;
2018
2019 for (n = 0; n < n_sections; n++) {
2020 i = start_section + n;
2021 wraparound_section(ic, &i);
2022 for (j = 0; j < ic->journal_section_sectors; j++) {
2023 struct journal_sector *js = access_journal(ic, i, j);
2024 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2025 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2026 }
2027 for (j = 0; j < ic->journal_section_entries; j++) {
2028 struct journal_entry *je = access_journal_entry(ic, i, j);
2029 journal_entry_set_unused(je);
2030 }
2031 }
2032
2033 write_journal(ic, start_section, n_sections);
2034}
2035
2036static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2037{
2038 unsigned char k;
2039 for (k = 0; k < N_COMMIT_IDS; k++) {
2040 if (dm_integrity_commit_id(ic, i, j, k) == id)
2041 return k;
2042 }
2043 dm_integrity_io_error(ic, "journal commit id", -EIO);
2044 return -EIO;
2045}
2046
2047static void replay_journal(struct dm_integrity_c *ic)
2048{
2049 unsigned i, j;
2050 bool used_commit_ids[N_COMMIT_IDS];
2051 unsigned max_commit_id_sections[N_COMMIT_IDS];
2052 unsigned write_start, write_sections;
2053 unsigned continue_section;
2054 bool journal_empty;
2055 unsigned char unused, last_used, want_commit_seq;
2056
2057 if (ic->mode == 'R')
2058 return;
2059
2060 if (ic->journal_uptodate)
2061 return;
2062
2063 last_used = 0;
2064 write_start = 0;
2065
2066 if (!ic->just_formatted) {
2067 DEBUG_print("reading journal\n");
2068 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2069 if (ic->journal_io)
2070 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2071 if (ic->journal_io) {
2072 struct journal_completion crypt_comp;
2073 crypt_comp.ic = ic;
2074 init_completion(&crypt_comp.comp);
2075 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2076 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2077 wait_for_completion(&crypt_comp.comp);
2078 }
2079 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2080 }
2081
2082 if (dm_integrity_failed(ic))
2083 goto clear_journal;
2084
2085 journal_empty = true;
2086 memset(used_commit_ids, 0, sizeof used_commit_ids);
2087 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2088 for (i = 0; i < ic->journal_sections; i++) {
2089 for (j = 0; j < ic->journal_section_sectors; j++) {
2090 int k;
2091 struct journal_sector *js = access_journal(ic, i, j);
2092 k = find_commit_seq(ic, i, j, js->commit_id);
2093 if (k < 0)
2094 goto clear_journal;
2095 used_commit_ids[k] = true;
2096 max_commit_id_sections[k] = i;
2097 }
2098 if (journal_empty) {
2099 for (j = 0; j < ic->journal_section_entries; j++) {
2100 struct journal_entry *je = access_journal_entry(ic, i, j);
2101 if (!journal_entry_is_unused(je)) {
2102 journal_empty = false;
2103 break;
2104 }
2105 }
2106 }
2107 }
2108
2109 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2110 unused = N_COMMIT_IDS - 1;
2111 while (unused && !used_commit_ids[unused - 1])
2112 unused--;
2113 } else {
2114 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2115 if (!used_commit_ids[unused])
2116 break;
2117 if (unused == N_COMMIT_IDS) {
2118 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2119 goto clear_journal;
2120 }
2121 }
2122 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2123 unused, used_commit_ids[0], used_commit_ids[1],
2124 used_commit_ids[2], used_commit_ids[3]);
2125
2126 last_used = prev_commit_seq(unused);
2127 want_commit_seq = prev_commit_seq(last_used);
2128
2129 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2130 journal_empty = true;
2131
2132 write_start = max_commit_id_sections[last_used] + 1;
2133 if (unlikely(write_start >= ic->journal_sections))
2134 want_commit_seq = next_commit_seq(want_commit_seq);
2135 wraparound_section(ic, &write_start);
2136
2137 i = write_start;
2138 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2139 for (j = 0; j < ic->journal_section_sectors; j++) {
2140 struct journal_sector *js = access_journal(ic, i, j);
2141
2142 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2143 /*
2144 * This could be caused by crash during writing.
2145 * We won't replay the inconsistent part of the
2146 * journal.
2147 */
2148 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2149 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2150 goto brk;
2151 }
2152 }
2153 i++;
2154 if (unlikely(i >= ic->journal_sections))
2155 want_commit_seq = next_commit_seq(want_commit_seq);
2156 wraparound_section(ic, &i);
2157 }
2158brk:
2159
2160 if (!journal_empty) {
2161 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2162 write_sections, write_start, want_commit_seq);
2163 do_journal_write(ic, write_start, write_sections, true);
2164 }
2165
2166 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2167 continue_section = write_start;
2168 ic->commit_seq = want_commit_seq;
2169 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2170 } else {
2171 unsigned s;
2172 unsigned char erase_seq;
2173clear_journal:
2174 DEBUG_print("clearing journal\n");
2175
2176 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2177 s = write_start;
2178 init_journal(ic, s, 1, erase_seq);
2179 s++;
2180 wraparound_section(ic, &s);
2181 if (ic->journal_sections >= 2) {
2182 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2183 s += ic->journal_sections - 2;
2184 wraparound_section(ic, &s);
2185 init_journal(ic, s, 1, erase_seq);
2186 }
2187
2188 continue_section = 0;
2189 ic->commit_seq = next_commit_seq(erase_seq);
2190 }
2191
2192 ic->committed_section = continue_section;
2193 ic->n_committed_sections = 0;
2194
2195 ic->uncommitted_section = continue_section;
2196 ic->n_uncommitted_sections = 0;
2197
2198 ic->free_section = continue_section;
2199 ic->free_section_entry = 0;
2200 ic->free_sectors = ic->journal_entries;
2201
2202 ic->journal_tree_root = RB_ROOT;
2203 for (i = 0; i < ic->journal_entries; i++)
2204 init_journal_node(&ic->journal_tree[i]);
2205}
2206
2207static void dm_integrity_postsuspend(struct dm_target *ti)
2208{
2209 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2210
2211 del_timer_sync(&ic->autocommit_timer);
2212
2213 ic->suspending = true;
2214
2215 queue_work(ic->commit_wq, &ic->commit_work);
2216 drain_workqueue(ic->commit_wq);
2217
2218 if (ic->mode == 'J') {
2219 drain_workqueue(ic->writer_wq);
2220 dm_integrity_flush_buffers(ic);
2221 }
2222
2223 ic->suspending = false;
2224
2225 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2226
2227 ic->journal_uptodate = true;
2228}
2229
2230static void dm_integrity_resume(struct dm_target *ti)
2231{
2232 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2233
2234 replay_journal(ic);
2235}
2236
2237static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2238 unsigned status_flags, char *result, unsigned maxlen)
2239{
2240 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2241 unsigned arg_count;
2242 size_t sz = 0;
2243
2244 switch (type) {
2245 case STATUSTYPE_INFO:
2246 DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches));
2247 break;
2248
2249 case STATUSTYPE_TABLE: {
2250 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2251 watermark_percentage += ic->journal_entries / 2;
2252 do_div(watermark_percentage, ic->journal_entries);
2253 arg_count = 5;
2254 arg_count += ic->sectors_per_block != 1;
2255 arg_count += !!ic->internal_hash_alg.alg_string;
2256 arg_count += !!ic->journal_crypt_alg.alg_string;
2257 arg_count += !!ic->journal_mac_alg.alg_string;
2258 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2259 ic->tag_size, ic->mode, arg_count);
2260 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2261 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2262 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2263 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2264 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2265 if (ic->sectors_per_block != 1)
2266 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2267
2268#define EMIT_ALG(a, n) \
2269 do { \
2270 if (ic->a.alg_string) { \
2271 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2272 if (ic->a.key_string) \
2273 DMEMIT(":%s", ic->a.key_string);\
2274 } \
2275 } while (0)
2276 EMIT_ALG(internal_hash_alg, "internal_hash");
2277 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2278 EMIT_ALG(journal_mac_alg, "journal_mac");
2279 break;
2280 }
2281 }
2282}
2283
2284static int dm_integrity_iterate_devices(struct dm_target *ti,
2285 iterate_devices_callout_fn fn, void *data)
2286{
2287 struct dm_integrity_c *ic = ti->private;
2288
2289 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2290}
2291
2292static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2293{
2294 struct dm_integrity_c *ic = ti->private;
2295
2296 if (ic->sectors_per_block > 1) {
2297 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2298 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2299 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2300 }
2301}
2302
2303static void calculate_journal_section_size(struct dm_integrity_c *ic)
2304{
2305 unsigned sector_space = JOURNAL_SECTOR_DATA;
2306
2307 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2308 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
2309 JOURNAL_ENTRY_ROUNDUP);
2310
2311 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2312 sector_space -= JOURNAL_MAC_PER_SECTOR;
2313 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2314 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
2315 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
2316 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2317}
2318
2319static int calculate_device_limits(struct dm_integrity_c *ic)
2320{
2321 __u64 initial_sectors;
2322 sector_t last_sector, last_area, last_offset;
2323
2324 calculate_journal_section_size(ic);
2325 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2326 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->device_sectors || initial_sectors > UINT_MAX)
2327 return -EINVAL;
2328 ic->initial_sectors = initial_sectors;
2329
2330 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
2331 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2332 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2333 ic->log2_metadata_run = __ffs(ic->metadata_run);
2334 else
2335 ic->log2_metadata_run = -1;
2336
2337 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2338 last_sector = get_data_sector(ic, last_area, last_offset);
2339
2340 if (ic->start + last_sector < last_sector || ic->start + last_sector >= ic->device_sectors)
2341 return -EINVAL;
2342
2343 return 0;
2344}
2345
2346static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2347{
2348 unsigned journal_sections;
2349 int test_bit;
2350
2351 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
2352 memcpy(ic->sb->magic, SB_MAGIC, 8);
2353 ic->sb->version = SB_VERSION;
2354 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
2355 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
2356 if (ic->journal_mac_alg.alg_string)
2357 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2358
2359 calculate_journal_section_size(ic);
2360 journal_sections = journal_sectors / ic->journal_section_sectors;
2361 if (!journal_sections)
2362 journal_sections = 1;
2363 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2364
2365 if (!interleave_sectors)
2366 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2367 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2368 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2369 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2370
2371 ic->provided_data_sectors = 0;
2372 for (test_bit = fls64(ic->device_sectors) - 1; test_bit >= 3; test_bit--) {
2373 __u64 prev_data_sectors = ic->provided_data_sectors;
2374
2375 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2376 if (calculate_device_limits(ic))
2377 ic->provided_data_sectors = prev_data_sectors;
2378 }
2379
2380 if (!ic->provided_data_sectors)
2381 return -EINVAL;
2382
2383 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2384
2385 return 0;
2386}
2387
2388static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2389{
2390 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2391 struct blk_integrity bi;
2392
2393 memset(&bi, 0, sizeof(bi));
2394 bi.profile = &dm_integrity_profile;
2395 bi.tuple_size = ic->tag_size;
2396 bi.tag_size = bi.tuple_size;
2397 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
2398
2399 blk_integrity_register(disk, &bi);
2400 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2401}
2402
2403static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
2404{
2405 unsigned i;
2406
2407 if (!pl)
2408 return;
2409 for (i = 0; i < ic->journal_pages; i++)
2410 if (pl[i].page)
2411 __free_page(pl[i].page);
2412 kvfree(pl);
2413}
2414
2415static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
2416{
2417 size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
2418 struct page_list *pl;
2419 unsigned i;
2420
2421 pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
2422 if (!pl)
2423 return NULL;
2424
2425 for (i = 0; i < ic->journal_pages; i++) {
2426 pl[i].page = alloc_page(GFP_KERNEL);
2427 if (!pl[i].page) {
2428 dm_integrity_free_page_list(ic, pl);
2429 return NULL;
2430 }
2431 if (i)
2432 pl[i - 1].next = &pl[i];
2433 }
2434
2435 return pl;
2436}
2437
2438static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2439{
2440 unsigned i;
2441 for (i = 0; i < ic->journal_sections; i++)
2442 kvfree(sl[i]);
2443 kvfree(sl);
2444}
2445
2446static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2447{
2448 struct scatterlist **sl;
2449 unsigned i;
2450
2451 sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO);
2452 if (!sl)
2453 return NULL;
2454
2455 for (i = 0; i < ic->journal_sections; i++) {
2456 struct scatterlist *s;
2457 unsigned start_index, start_offset;
2458 unsigned end_index, end_offset;
2459 unsigned n_pages;
2460 unsigned idx;
2461
2462 page_list_location(ic, i, 0, &start_index, &start_offset);
2463 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2464
2465 n_pages = (end_index - start_index + 1);
2466
2467 s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL);
2468 if (!s) {
2469 dm_integrity_free_journal_scatterlist(ic, sl);
2470 return NULL;
2471 }
2472
2473 sg_init_table(s, n_pages);
2474 for (idx = start_index; idx <= end_index; idx++) {
2475 char *va = lowmem_page_address(pl[idx].page);
2476 unsigned start = 0, end = PAGE_SIZE;
2477 if (idx == start_index)
2478 start = start_offset;
2479 if (idx == end_index)
2480 end = end_offset + (1 << SECTOR_SHIFT);
2481 sg_set_buf(&s[idx - start_index], va + start, end - start);
2482 }
2483
2484 sl[i] = s;
2485 }
2486
2487 return sl;
2488}
2489
2490static void free_alg(struct alg_spec *a)
2491{
2492 kzfree(a->alg_string);
2493 kzfree(a->key);
2494 memset(a, 0, sizeof *a);
2495}
2496
2497static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2498{
2499 char *k;
2500
2501 free_alg(a);
2502
2503 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2504 if (!a->alg_string)
2505 goto nomem;
2506
2507 k = strchr(a->alg_string, ':');
2508 if (k) {
2509 *k = 0;
2510 a->key_string = k + 1;
2511 if (strlen(a->key_string) & 1)
2512 goto inval;
2513
2514 a->key_size = strlen(a->key_string) / 2;
2515 a->key = kmalloc(a->key_size, GFP_KERNEL);
2516 if (!a->key)
2517 goto nomem;
2518 if (hex2bin(a->key, a->key_string, a->key_size))
2519 goto inval;
2520 }
2521
2522 return 0;
2523inval:
2524 *error = error_inval;
2525 return -EINVAL;
2526nomem:
2527 *error = "Out of memory for an argument";
2528 return -ENOMEM;
2529}
2530
2531static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2532 char *error_alg, char *error_key)
2533{
2534 int r;
2535
2536 if (a->alg_string) {
2537 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC);
2538 if (IS_ERR(*hash)) {
2539 *error = error_alg;
2540 r = PTR_ERR(*hash);
2541 *hash = NULL;
2542 return r;
2543 }
2544
2545 if (a->key) {
2546 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2547 if (r) {
2548 *error = error_key;
2549 return r;
2550 }
2551 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
2552 *error = error_key;
2553 return -ENOKEY;
2554 }
2555 }
2556
2557 return 0;
2558}
2559
2560static int create_journal(struct dm_integrity_c *ic, char **error)
2561{
2562 int r = 0;
2563 unsigned i;
2564 __u64 journal_pages, journal_desc_size, journal_tree_size;
2565 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2566 struct skcipher_request *req = NULL;
2567
2568 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2569 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2570 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2571 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
2572
2573 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2574 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2575 journal_desc_size = journal_pages * sizeof(struct page_list);
2576 if (journal_pages >= totalram_pages - totalhigh_pages || journal_desc_size > ULONG_MAX) {
2577 *error = "Journal doesn't fit into memory";
2578 r = -ENOMEM;
2579 goto bad;
2580 }
2581 ic->journal_pages = journal_pages;
2582
2583 ic->journal = dm_integrity_alloc_page_list(ic);
2584 if (!ic->journal) {
2585 *error = "Could not allocate memory for journal";
2586 r = -ENOMEM;
2587 goto bad;
2588 }
2589 if (ic->journal_crypt_alg.alg_string) {
2590 unsigned ivsize, blocksize;
2591 struct journal_completion comp;
2592
2593 comp.ic = ic;
2594 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2595 if (IS_ERR(ic->journal_crypt)) {
2596 *error = "Invalid journal cipher";
2597 r = PTR_ERR(ic->journal_crypt);
2598 ic->journal_crypt = NULL;
2599 goto bad;
2600 }
2601 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2602 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2603
2604 if (ic->journal_crypt_alg.key) {
2605 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2606 ic->journal_crypt_alg.key_size);
2607 if (r) {
2608 *error = "Error setting encryption key";
2609 goto bad;
2610 }
2611 }
2612 DEBUG_print("cipher %s, block size %u iv size %u\n",
2613 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2614
2615 ic->journal_io = dm_integrity_alloc_page_list(ic);
2616 if (!ic->journal_io) {
2617 *error = "Could not allocate memory for journal io";
2618 r = -ENOMEM;
2619 goto bad;
2620 }
2621
2622 if (blocksize == 1) {
2623 struct scatterlist *sg;
2624
2625 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2626 if (!req) {
2627 *error = "Could not allocate crypt request";
2628 r = -ENOMEM;
2629 goto bad;
2630 }
2631
2632 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2633 if (!crypt_iv) {
2634 *error = "Could not allocate iv";
2635 r = -ENOMEM;
2636 goto bad;
2637 }
2638
2639 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2640 if (!ic->journal_xor) {
2641 *error = "Could not allocate memory for journal xor";
2642 r = -ENOMEM;
2643 goto bad;
2644 }
2645
2646 sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL);
2647 if (!sg) {
2648 *error = "Unable to allocate sg list";
2649 r = -ENOMEM;
2650 goto bad;
2651 }
2652 sg_init_table(sg, ic->journal_pages + 1);
2653 for (i = 0; i < ic->journal_pages; i++) {
2654 char *va = lowmem_page_address(ic->journal_xor[i].page);
2655 clear_page(va);
2656 sg_set_buf(&sg[i], va, PAGE_SIZE);
2657 }
2658 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2659 memset(crypt_iv, 0x00, ivsize);
2660
2661 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2662 init_completion(&comp.comp);
2663 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2664 if (do_crypt(true, req, &comp))
2665 wait_for_completion(&comp.comp);
2666 kvfree(sg);
2667 r = dm_integrity_failed(ic);
2668 if (r) {
2669 *error = "Unable to encrypt journal";
2670 goto bad;
2671 }
2672 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2673
2674 crypto_free_skcipher(ic->journal_crypt);
2675 ic->journal_crypt = NULL;
2676 } else {
2677 unsigned crypt_len = roundup(ivsize, blocksize);
2678
2679 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2680 if (!req) {
2681 *error = "Could not allocate crypt request";
2682 r = -ENOMEM;
2683 goto bad;
2684 }
2685
2686 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2687 if (!crypt_iv) {
2688 *error = "Could not allocate iv";
2689 r = -ENOMEM;
2690 goto bad;
2691 }
2692
2693 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2694 if (!crypt_data) {
2695 *error = "Unable to allocate crypt data";
2696 r = -ENOMEM;
2697 goto bad;
2698 }
2699
2700 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2701 if (!ic->journal_scatterlist) {
2702 *error = "Unable to allocate sg list";
2703 r = -ENOMEM;
2704 goto bad;
2705 }
2706 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2707 if (!ic->journal_io_scatterlist) {
2708 *error = "Unable to allocate sg list";
2709 r = -ENOMEM;
2710 goto bad;
2711 }
2712 ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO);
2713 if (!ic->sk_requests) {
2714 *error = "Unable to allocate sk requests";
2715 r = -ENOMEM;
2716 goto bad;
2717 }
2718 for (i = 0; i < ic->journal_sections; i++) {
2719 struct scatterlist sg;
2720 struct skcipher_request *section_req;
2721 __u32 section_le = cpu_to_le32(i);
2722
2723 memset(crypt_iv, 0x00, ivsize);
2724 memset(crypt_data, 0x00, crypt_len);
2725 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
2726
2727 sg_init_one(&sg, crypt_data, crypt_len);
2728 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
2729 init_completion(&comp.comp);
2730 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2731 if (do_crypt(true, req, &comp))
2732 wait_for_completion(&comp.comp);
2733
2734 r = dm_integrity_failed(ic);
2735 if (r) {
2736 *error = "Unable to generate iv";
2737 goto bad;
2738 }
2739
2740 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2741 if (!section_req) {
2742 *error = "Unable to allocate crypt request";
2743 r = -ENOMEM;
2744 goto bad;
2745 }
2746 section_req->iv = kmalloc(ivsize * 2, GFP_KERNEL);
2747 if (!section_req->iv) {
2748 skcipher_request_free(section_req);
2749 *error = "Unable to allocate iv";
2750 r = -ENOMEM;
2751 goto bad;
2752 }
2753 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
2754 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
2755 ic->sk_requests[i] = section_req;
2756 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
2757 }
2758 }
2759 }
2760
2761 for (i = 0; i < N_COMMIT_IDS; i++) {
2762 unsigned j;
2763retest_commit_id:
2764 for (j = 0; j < i; j++) {
2765 if (ic->commit_ids[j] == ic->commit_ids[i]) {
2766 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
2767 goto retest_commit_id;
2768 }
2769 }
2770 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
2771 }
2772
2773 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
2774 if (journal_tree_size > ULONG_MAX) {
2775 *error = "Journal doesn't fit into memory";
2776 r = -ENOMEM;
2777 goto bad;
2778 }
2779 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
2780 if (!ic->journal_tree) {
2781 *error = "Could not allocate memory for journal tree";
2782 r = -ENOMEM;
2783 }
2784bad:
2785 kfree(crypt_data);
2786 kfree(crypt_iv);
2787 skcipher_request_free(req);
2788
2789 return r;
2790}
2791
2792/*
2793 * Construct a integrity mapping
2794 *
2795 * Arguments:
2796 * device
2797 * offset from the start of the device
2798 * tag size
2799 * D - direct writes, J - journal writes, R - recovery mode
2800 * number of optional arguments
2801 * optional arguments:
2802 * journal_sectors
2803 * interleave_sectors
2804 * buffer_sectors
2805 * journal_watermark
2806 * commit_time
2807 * internal_hash
2808 * journal_crypt
2809 * journal_mac
2810 * block_size
2811 */
2812static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
2813{
2814 struct dm_integrity_c *ic;
2815 char dummy;
2816 int r;
2817 unsigned extra_args;
2818 struct dm_arg_set as;
2819 static const struct dm_arg _args[] = {
2820 {0, 9, "Invalid number of feature args"},
2821 };
2822 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
2823 bool should_write_sb;
2824 __u64 threshold;
2825 unsigned long long start;
2826
2827#define DIRECT_ARGUMENTS 4
2828
2829 if (argc <= DIRECT_ARGUMENTS) {
2830 ti->error = "Invalid argument count";
2831 return -EINVAL;
2832 }
2833
2834 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
2835 if (!ic) {
2836 ti->error = "Cannot allocate integrity context";
2837 return -ENOMEM;
2838 }
2839 ti->private = ic;
2840 ti->per_io_data_size = sizeof(struct dm_integrity_io);
2841
2842 ic->in_progress = RB_ROOT;
2843 init_waitqueue_head(&ic->endio_wait);
2844 bio_list_init(&ic->flush_bio_list);
2845 init_waitqueue_head(&ic->copy_to_journal_wait);
2846 init_completion(&ic->crypto_backoff);
2847 atomic64_set(&ic->number_of_mismatches, 0);
2848
2849 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
2850 if (r) {
2851 ti->error = "Device lookup failed";
2852 goto bad;
2853 }
2854
2855 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
2856 ti->error = "Invalid starting offset";
2857 r = -EINVAL;
2858 goto bad;
2859 }
2860 ic->start = start;
2861
2862 if (strcmp(argv[2], "-")) {
2863 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
2864 ti->error = "Invalid tag size";
2865 r = -EINVAL;
2866 goto bad;
2867 }
2868 }
2869
2870 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
2871 ic->mode = argv[3][0];
2872 else {
2873 ti->error = "Invalid mode (expecting J, D, R)";
2874 r = -EINVAL;
2875 goto bad;
2876 }
2877
2878 ic->device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
2879 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
2880 ic->device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
2881 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2882 buffer_sectors = DEFAULT_BUFFER_SECTORS;
2883 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
2884 sync_msec = DEFAULT_SYNC_MSEC;
2885 ic->sectors_per_block = 1;
2886
2887 as.argc = argc - DIRECT_ARGUMENTS;
2888 as.argv = argv + DIRECT_ARGUMENTS;
2889 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
2890 if (r)
2891 goto bad;
2892
2893 while (extra_args--) {
2894 const char *opt_string;
2895 unsigned val;
2896 opt_string = dm_shift_arg(&as);
2897 if (!opt_string) {
2898 r = -EINVAL;
2899 ti->error = "Not enough feature arguments";
2900 goto bad;
2901 }
2902 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
2903 journal_sectors = val;
2904 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
2905 interleave_sectors = val;
2906 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
2907 buffer_sectors = val;
2908 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
2909 journal_watermark = val;
2910 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
2911 sync_msec = val;
2912 else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
2913 if (val < 1 << SECTOR_SHIFT ||
2914 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
2915 (val & (val -1))) {
2916 r = -EINVAL;
2917 ti->error = "Invalid block_size argument";
2918 goto bad;
2919 }
2920 ic->sectors_per_block = val >> SECTOR_SHIFT;
2921 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
2922 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
2923 "Invalid internal_hash argument");
2924 if (r)
2925 goto bad;
2926 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
2927 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
2928 "Invalid journal_crypt argument");
2929 if (r)
2930 goto bad;
2931 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
2932 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
2933 "Invalid journal_mac argument");
2934 if (r)
2935 goto bad;
2936 } else {
2937 r = -EINVAL;
2938 ti->error = "Invalid argument";
2939 goto bad;
2940 }
2941 }
2942
2943 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
2944 "Invalid internal hash", "Error setting internal hash key");
2945 if (r)
2946 goto bad;
2947
2948 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
2949 "Invalid journal mac", "Error setting journal mac key");
2950 if (r)
2951 goto bad;
2952
2953 if (!ic->tag_size) {
2954 if (!ic->internal_hash) {
2955 ti->error = "Unknown tag size";
2956 r = -EINVAL;
2957 goto bad;
2958 }
2959 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
2960 }
2961 if (ic->tag_size > MAX_TAG_SIZE) {
2962 ti->error = "Too big tag size";
2963 r = -EINVAL;
2964 goto bad;
2965 }
2966 if (!(ic->tag_size & (ic->tag_size - 1)))
2967 ic->log2_tag_size = __ffs(ic->tag_size);
2968 else
2969 ic->log2_tag_size = -1;
2970
2971 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
2972 ic->autocommit_msec = sync_msec;
2973 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
2974
2975 ic->io = dm_io_client_create();
2976 if (IS_ERR(ic->io)) {
2977 r = PTR_ERR(ic->io);
2978 ic->io = NULL;
2979 ti->error = "Cannot allocate dm io";
2980 goto bad;
2981 }
2982
2983 ic->journal_io_mempool = mempool_create_slab_pool(JOURNAL_IO_MEMPOOL, journal_io_cache);
2984 if (!ic->journal_io_mempool) {
2985 r = -ENOMEM;
2986 ti->error = "Cannot allocate mempool";
2987 goto bad;
2988 }
2989
2990 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
2991 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
2992 if (!ic->metadata_wq) {
2993 ti->error = "Cannot allocate workqueue";
2994 r = -ENOMEM;
2995 goto bad;
2996 }
2997
2998 /*
2999 * If this workqueue were percpu, it would cause bio reordering
3000 * and reduced performance.
3001 */
3002 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3003 if (!ic->wait_wq) {
3004 ti->error = "Cannot allocate workqueue";
3005 r = -ENOMEM;
3006 goto bad;
3007 }
3008
3009 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3010 if (!ic->commit_wq) {
3011 ti->error = "Cannot allocate workqueue";
3012 r = -ENOMEM;
3013 goto bad;
3014 }
3015 INIT_WORK(&ic->commit_work, integrity_commit);
3016
3017 if (ic->mode == 'J') {
3018 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3019 if (!ic->writer_wq) {
3020 ti->error = "Cannot allocate workqueue";
3021 r = -ENOMEM;
3022 goto bad;
3023 }
3024 INIT_WORK(&ic->writer_work, integrity_writer);
3025 }
3026
3027 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3028 if (!ic->sb) {
3029 r = -ENOMEM;
3030 ti->error = "Cannot allocate superblock area";
3031 goto bad;
3032 }
3033
3034 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3035 if (r) {
3036 ti->error = "Error reading superblock";
3037 goto bad;
3038 }
3039 should_write_sb = false;
3040 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3041 if (ic->mode != 'R') {
3042 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3043 r = -EINVAL;
3044 ti->error = "The device is not initialized";
3045 goto bad;
3046 }
3047 }
3048
3049 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3050 if (r) {
3051 ti->error = "Could not initialize superblock";
3052 goto bad;
3053 }
3054 if (ic->mode != 'R')
3055 should_write_sb = true;
3056 }
3057
3058 if (ic->sb->version != SB_VERSION) {
3059 r = -EINVAL;
3060 ti->error = "Unknown version";
3061 goto bad;
3062 }
3063 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3064 r = -EINVAL;
3065 ti->error = "Tag size doesn't match the information in superblock";
3066 goto bad;
3067 }
3068 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3069 r = -EINVAL;
3070 ti->error = "Block size doesn't match the information in superblock";
3071 goto bad;
3072 }
3073 if (!le32_to_cpu(ic->sb->journal_sections)) {
3074 r = -EINVAL;
3075 ti->error = "Corrupted superblock, journal_sections is 0";
3076 goto bad;
3077 }
3078 /* make sure that ti->max_io_len doesn't overflow */
3079 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3080 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3081 r = -EINVAL;
3082 ti->error = "Invalid interleave_sectors in the superblock";
3083 goto bad;
3084 }
3085 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3086 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3087 /* test for overflow */
3088 r = -EINVAL;
3089 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3090 goto bad;
3091 }
3092 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3093 r = -EINVAL;
3094 ti->error = "Journal mac mismatch";
3095 goto bad;
3096 }
3097 r = calculate_device_limits(ic);
3098 if (r) {
3099 ti->error = "The device is too small";
3100 goto bad;
3101 }
3102 if (ti->len > ic->provided_data_sectors) {
3103 r = -EINVAL;
3104 ti->error = "Not enough provided sectors for requested mapping size";
3105 goto bad;
3106 }
3107
3108 if (!buffer_sectors)
3109 buffer_sectors = 1;
3110 ic->log2_buffer_sectors = min3((int)__fls(buffer_sectors), (int)__ffs(ic->metadata_run), 31 - SECTOR_SHIFT);
3111
3112 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3113 threshold += 50;
3114 do_div(threshold, 100);
3115 ic->free_sectors_threshold = threshold;
3116
3117 DEBUG_print("initialized:\n");
3118 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3119 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3120 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3121 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3122 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3123 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3124 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3125 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3126 DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
3127 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3128 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3129 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3130 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3131 (unsigned long long)ic->provided_data_sectors);
3132 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3133
3134 ic->bufio = dm_bufio_client_create(ic->dev->bdev, 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors),
3135 1, 0, NULL, NULL);
3136 if (IS_ERR(ic->bufio)) {
3137 r = PTR_ERR(ic->bufio);
3138 ti->error = "Cannot initialize dm-bufio";
3139 ic->bufio = NULL;
3140 goto bad;
3141 }
3142 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3143
3144 if (ic->mode != 'R') {
3145 r = create_journal(ic, &ti->error);
3146 if (r)
3147 goto bad;
3148 }
3149
3150 if (should_write_sb) {
3151 int r;
3152
3153 init_journal(ic, 0, ic->journal_sections, 0);
3154 r = dm_integrity_failed(ic);
3155 if (unlikely(r)) {
3156 ti->error = "Error initializing journal";
3157 goto bad;
3158 }
3159 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3160 if (r) {
3161 ti->error = "Error initializing superblock";
3162 goto bad;
3163 }
3164 ic->just_formatted = true;
3165 }
3166
3167 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3168 if (r)
3169 goto bad;
3170
3171 if (!ic->internal_hash)
3172 dm_integrity_set(ti, ic);
3173
3174 ti->num_flush_bios = 1;
3175 ti->flush_supported = true;
3176
3177 return 0;
3178bad:
3179 dm_integrity_dtr(ti);
3180 return r;
3181}
3182
3183static void dm_integrity_dtr(struct dm_target *ti)
3184{
3185 struct dm_integrity_c *ic = ti->private;
3186
3187 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3188
3189 if (ic->metadata_wq)
3190 destroy_workqueue(ic->metadata_wq);
3191 if (ic->wait_wq)
3192 destroy_workqueue(ic->wait_wq);
3193 if (ic->commit_wq)
3194 destroy_workqueue(ic->commit_wq);
3195 if (ic->writer_wq)
3196 destroy_workqueue(ic->writer_wq);
3197 if (ic->bufio)
3198 dm_bufio_client_destroy(ic->bufio);
3199 mempool_destroy(ic->journal_io_mempool);
3200 if (ic->io)
3201 dm_io_client_destroy(ic->io);
3202 if (ic->dev)
3203 dm_put_device(ti, ic->dev);
3204 dm_integrity_free_page_list(ic, ic->journal);
3205 dm_integrity_free_page_list(ic, ic->journal_io);
3206 dm_integrity_free_page_list(ic, ic->journal_xor);
3207 if (ic->journal_scatterlist)
3208 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3209 if (ic->journal_io_scatterlist)
3210 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3211 if (ic->sk_requests) {
3212 unsigned i;
3213
3214 for (i = 0; i < ic->journal_sections; i++) {
3215 struct skcipher_request *req = ic->sk_requests[i];
3216 if (req) {
3217 kzfree(req->iv);
3218 skcipher_request_free(req);
3219 }
3220 }
3221 kvfree(ic->sk_requests);
3222 }
3223 kvfree(ic->journal_tree);
3224 if (ic->sb)
3225 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3226
3227 if (ic->internal_hash)
3228 crypto_free_shash(ic->internal_hash);
3229 free_alg(&ic->internal_hash_alg);
3230
3231 if (ic->journal_crypt)
3232 crypto_free_skcipher(ic->journal_crypt);
3233 free_alg(&ic->journal_crypt_alg);
3234
3235 if (ic->journal_mac)
3236 crypto_free_shash(ic->journal_mac);
3237 free_alg(&ic->journal_mac_alg);
3238
3239 kfree(ic);
3240}
3241
3242static struct target_type integrity_target = {
3243 .name = "integrity",
3244 .version = {1, 1, 0},
3245 .module = THIS_MODULE,
3246 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3247 .ctr = dm_integrity_ctr,
3248 .dtr = dm_integrity_dtr,
3249 .map = dm_integrity_map,
3250 .postsuspend = dm_integrity_postsuspend,
3251 .resume = dm_integrity_resume,
3252 .status = dm_integrity_status,
3253 .iterate_devices = dm_integrity_iterate_devices,
3254 .io_hints = dm_integrity_io_hints,
3255};
3256
3257int __init dm_integrity_init(void)
3258{
3259 int r;
3260
3261 journal_io_cache = kmem_cache_create("integrity_journal_io",
3262 sizeof(struct journal_io), 0, 0, NULL);
3263 if (!journal_io_cache) {
3264 DMERR("can't allocate journal io cache");
3265 return -ENOMEM;
3266 }
3267
3268 r = dm_register_target(&integrity_target);
3269
3270 if (r < 0)
3271 DMERR("register failed %d", r);
3272
3273 return r;
3274}
3275
3276void dm_integrity_exit(void)
3277{
3278 dm_unregister_target(&integrity_target);
3279 kmem_cache_destroy(journal_io_cache);
3280}
3281
3282module_init(dm_integrity_init);
3283module_exit(dm_integrity_exit);
3284
3285MODULE_AUTHOR("Milan Broz");
3286MODULE_AUTHOR("Mikulas Patocka");
3287MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3288MODULE_LICENSE("GPL");