Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (C) 2012 Red Hat, Inc.
  3 *
  4 * Author: Mikulas Patocka <mpatocka@redhat.com>
  5 *
  6 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
  7 *
  8 * This file is released under the GPLv2.
  9 *
 10 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
 11 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
 12 * hash device. Setting this greatly improves performance when data and hash
 13 * are on the same disk on different partitions on devices with poor random
 14 * access behavior.
 15 */
 16
 17#include "dm-bufio.h"
 18
 19#include <linux/module.h>
 20#include <linux/device-mapper.h>
 21#include <crypto/hash.h>
 22
 23#define DM_MSG_PREFIX			"verity"
 24
 25#define DM_VERITY_IO_VEC_INLINE		16
 26#define DM_VERITY_MEMPOOL_SIZE		4
 27#define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
 28
 29#define DM_VERITY_MAX_LEVELS		63
 30
 31static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
 32
 33module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
 34
 35struct dm_verity {
 36	struct dm_dev *data_dev;
 37	struct dm_dev *hash_dev;
 38	struct dm_target *ti;
 39	struct dm_bufio_client *bufio;
 40	char *alg_name;
 41	struct crypto_shash *tfm;
 42	u8 *root_digest;	/* digest of the root block */
 43	u8 *salt;		/* salt: its size is salt_size */
 44	unsigned salt_size;
 45	sector_t data_start;	/* data offset in 512-byte sectors */
 46	sector_t hash_start;	/* hash start in blocks */
 47	sector_t data_blocks;	/* the number of data blocks */
 48	sector_t hash_blocks;	/* the number of hash blocks */
 49	unsigned char data_dev_block_bits;	/* log2(data blocksize) */
 50	unsigned char hash_dev_block_bits;	/* log2(hash blocksize) */
 51	unsigned char hash_per_block_bits;	/* log2(hashes in hash block) */
 52	unsigned char levels;	/* the number of tree levels */
 53	unsigned char version;
 54	unsigned digest_size;	/* digest size for the current hash algorithm */
 55	unsigned shash_descsize;/* the size of temporary space for crypto */
 56	int hash_failed;	/* set to 1 if hash of any block failed */
 57
 58	mempool_t *io_mempool;	/* mempool of struct dm_verity_io */
 59	mempool_t *vec_mempool;	/* mempool of bio vector */
 60
 61	struct workqueue_struct *verify_wq;
 62
 63	/* starting blocks for each tree level. 0 is the lowest level. */
 64	sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
 65};
 66
 67struct dm_verity_io {
 68	struct dm_verity *v;
 69	struct bio *bio;
 70
 71	/* original values of bio->bi_end_io and bio->bi_private */
 72	bio_end_io_t *orig_bi_end_io;
 73	void *orig_bi_private;
 74
 75	sector_t block;
 76	unsigned n_blocks;
 77
 78	/* saved bio vector */
 79	struct bio_vec *io_vec;
 80	unsigned io_vec_size;
 81
 82	struct work_struct work;
 83
 84	/* A space for short vectors; longer vectors are allocated separately. */
 85	struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
 86
 87	/*
 88	 * Three variably-size fields follow this struct:
 89	 *
 90	 * u8 hash_desc[v->shash_descsize];
 91	 * u8 real_digest[v->digest_size];
 92	 * u8 want_digest[v->digest_size];
 93	 *
 94	 * To access them use: io_hash_desc(), io_real_digest() and io_want_digest().
 95	 */
 96};
 97
 98static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
 99{
100	return (struct shash_desc *)(io + 1);
101}
102
103static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io)
104{
105	return (u8 *)(io + 1) + v->shash_descsize;
106}
107
108static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io)
109{
110	return (u8 *)(io + 1) + v->shash_descsize + v->digest_size;
111}
112
113/*
114 * Auxiliary structure appended to each dm-bufio buffer. If the value
115 * hash_verified is nonzero, hash of the block has been verified.
116 *
117 * The variable hash_verified is set to 0 when allocating the buffer, then
118 * it can be changed to 1 and it is never reset to 0 again.
119 *
120 * There is no lock around this value, a race condition can at worst cause
121 * that multiple processes verify the hash of the same buffer simultaneously
122 * and write 1 to hash_verified simultaneously.
123 * This condition is harmless, so we don't need locking.
124 */
125struct buffer_aux {
126	int hash_verified;
127};
128
129/*
130 * Initialize struct buffer_aux for a freshly created buffer.
131 */
132static void dm_bufio_alloc_callback(struct dm_buffer *buf)
133{
134	struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
135
136	aux->hash_verified = 0;
137}
138
139/*
140 * Translate input sector number to the sector number on the target device.
141 */
142static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
143{
144	return v->data_start + dm_target_offset(v->ti, bi_sector);
145}
146
147/*
148 * Return hash position of a specified block at a specified tree level
149 * (0 is the lowest level).
150 * The lowest "hash_per_block_bits"-bits of the result denote hash position
151 * inside a hash block. The remaining bits denote location of the hash block.
152 */
153static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
154					 int level)
155{
156	return block >> (level * v->hash_per_block_bits);
157}
158
159static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
160				 sector_t *hash_block, unsigned *offset)
161{
162	sector_t position = verity_position_at_level(v, block, level);
163	unsigned idx;
164
165	*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
166
167	if (!offset)
168		return;
169
170	idx = position & ((1 << v->hash_per_block_bits) - 1);
171	if (!v->version)
172		*offset = idx * v->digest_size;
173	else
174		*offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
175}
176
177/*
178 * Verify hash of a metadata block pertaining to the specified data block
179 * ("block" argument) at a specified level ("level" argument).
180 *
181 * On successful return, io_want_digest(v, io) contains the hash value for
182 * a lower tree level or for the data block (if we're at the lowest leve).
183 *
184 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
185 * If "skip_unverified" is false, unverified buffer is hashed and verified
186 * against current value of io_want_digest(v, io).
187 */
188static int verity_verify_level(struct dm_verity_io *io, sector_t block,
189			       int level, bool skip_unverified)
190{
191	struct dm_verity *v = io->v;
192	struct dm_buffer *buf;
193	struct buffer_aux *aux;
194	u8 *data;
195	int r;
196	sector_t hash_block;
197	unsigned offset;
198
199	verity_hash_at_level(v, block, level, &hash_block, &offset);
200
201	data = dm_bufio_read(v->bufio, hash_block, &buf);
202	if (unlikely(IS_ERR(data)))
203		return PTR_ERR(data);
204
205	aux = dm_bufio_get_aux_data(buf);
206
207	if (!aux->hash_verified) {
208		struct shash_desc *desc;
209		u8 *result;
210
211		if (skip_unverified) {
212			r = 1;
213			goto release_ret_r;
214		}
215
216		desc = io_hash_desc(v, io);
217		desc->tfm = v->tfm;
218		desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
219		r = crypto_shash_init(desc);
220		if (r < 0) {
221			DMERR("crypto_shash_init failed: %d", r);
222			goto release_ret_r;
223		}
224
225		if (likely(v->version >= 1)) {
226			r = crypto_shash_update(desc, v->salt, v->salt_size);
227			if (r < 0) {
228				DMERR("crypto_shash_update failed: %d", r);
229				goto release_ret_r;
230			}
231		}
232
233		r = crypto_shash_update(desc, data, 1 << v->hash_dev_block_bits);
234		if (r < 0) {
235			DMERR("crypto_shash_update failed: %d", r);
236			goto release_ret_r;
237		}
238
239		if (!v->version) {
240			r = crypto_shash_update(desc, v->salt, v->salt_size);
241			if (r < 0) {
242				DMERR("crypto_shash_update failed: %d", r);
243				goto release_ret_r;
244			}
245		}
246
247		result = io_real_digest(v, io);
248		r = crypto_shash_final(desc, result);
249		if (r < 0) {
250			DMERR("crypto_shash_final failed: %d", r);
251			goto release_ret_r;
252		}
253		if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
254			DMERR_LIMIT("metadata block %llu is corrupted",
255				(unsigned long long)hash_block);
256			v->hash_failed = 1;
257			r = -EIO;
258			goto release_ret_r;
259		} else
260			aux->hash_verified = 1;
261	}
262
263	data += offset;
264
265	memcpy(io_want_digest(v, io), data, v->digest_size);
266
267	dm_bufio_release(buf);
268	return 0;
269
270release_ret_r:
271	dm_bufio_release(buf);
272
273	return r;
274}
275
276/*
277 * Verify one "dm_verity_io" structure.
278 */
279static int verity_verify_io(struct dm_verity_io *io)
280{
281	struct dm_verity *v = io->v;
282	unsigned b;
283	int i;
284	unsigned vector = 0, offset = 0;
285
286	for (b = 0; b < io->n_blocks; b++) {
287		struct shash_desc *desc;
288		u8 *result;
289		int r;
290		unsigned todo;
291
292		if (likely(v->levels)) {
293			/*
294			 * First, we try to get the requested hash for
295			 * the current block. If the hash block itself is
296			 * verified, zero is returned. If it isn't, this
297			 * function returns 0 and we fall back to whole
298			 * chain verification.
299			 */
300			int r = verity_verify_level(io, io->block + b, 0, true);
301			if (likely(!r))
302				goto test_block_hash;
303			if (r < 0)
304				return r;
305		}
306
307		memcpy(io_want_digest(v, io), v->root_digest, v->digest_size);
308
309		for (i = v->levels - 1; i >= 0; i--) {
310			int r = verity_verify_level(io, io->block + b, i, false);
311			if (unlikely(r))
312				return r;
313		}
314
315test_block_hash:
316		desc = io_hash_desc(v, io);
317		desc->tfm = v->tfm;
318		desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
319		r = crypto_shash_init(desc);
320		if (r < 0) {
321			DMERR("crypto_shash_init failed: %d", r);
322			return r;
323		}
324
325		if (likely(v->version >= 1)) {
326			r = crypto_shash_update(desc, v->salt, v->salt_size);
327			if (r < 0) {
328				DMERR("crypto_shash_update failed: %d", r);
329				return r;
330			}
331		}
332
333		todo = 1 << v->data_dev_block_bits;
334		do {
335			struct bio_vec *bv;
336			u8 *page;
337			unsigned len;
338
339			BUG_ON(vector >= io->io_vec_size);
340			bv = &io->io_vec[vector];
341			page = kmap_atomic(bv->bv_page);
342			len = bv->bv_len - offset;
343			if (likely(len >= todo))
344				len = todo;
345			r = crypto_shash_update(desc,
346					page + bv->bv_offset + offset, len);
347			kunmap_atomic(page);
348			if (r < 0) {
349				DMERR("crypto_shash_update failed: %d", r);
350				return r;
351			}
352			offset += len;
353			if (likely(offset == bv->bv_len)) {
354				offset = 0;
355				vector++;
356			}
357			todo -= len;
358		} while (todo);
359
360		if (!v->version) {
361			r = crypto_shash_update(desc, v->salt, v->salt_size);
362			if (r < 0) {
363				DMERR("crypto_shash_update failed: %d", r);
364				return r;
365			}
366		}
367
368		result = io_real_digest(v, io);
369		r = crypto_shash_final(desc, result);
370		if (r < 0) {
371			DMERR("crypto_shash_final failed: %d", r);
372			return r;
373		}
374		if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) {
375			DMERR_LIMIT("data block %llu is corrupted",
376				(unsigned long long)(io->block + b));
377			v->hash_failed = 1;
378			return -EIO;
379		}
380	}
381	BUG_ON(vector != io->io_vec_size);
382	BUG_ON(offset);
383
384	return 0;
385}
386
387/*
388 * End one "io" structure with a given error.
389 */
390static void verity_finish_io(struct dm_verity_io *io, int error)
391{
392	struct bio *bio = io->bio;
393	struct dm_verity *v = io->v;
394
395	bio->bi_end_io = io->orig_bi_end_io;
396	bio->bi_private = io->orig_bi_private;
397
398	if (io->io_vec != io->io_vec_inline)
399		mempool_free(io->io_vec, v->vec_mempool);
400
401	mempool_free(io, v->io_mempool);
402
403	bio_endio(bio, error);
404}
405
406static void verity_work(struct work_struct *w)
407{
408	struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
409
410	verity_finish_io(io, verity_verify_io(io));
411}
412
413static void verity_end_io(struct bio *bio, int error)
414{
415	struct dm_verity_io *io = bio->bi_private;
416
417	if (error) {
418		verity_finish_io(io, error);
419		return;
420	}
421
422	INIT_WORK(&io->work, verity_work);
423	queue_work(io->v->verify_wq, &io->work);
424}
425
426/*
427 * Prefetch buffers for the specified io.
428 * The root buffer is not prefetched, it is assumed that it will be cached
429 * all the time.
430 */
431static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
432{
433	int i;
434
435	for (i = v->levels - 2; i >= 0; i--) {
436		sector_t hash_block_start;
437		sector_t hash_block_end;
438		verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
439		verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
440		if (!i) {
441			unsigned cluster = *(volatile unsigned *)&dm_verity_prefetch_cluster;
442
443			cluster >>= v->data_dev_block_bits;
444			if (unlikely(!cluster))
445				goto no_prefetch_cluster;
446
447			if (unlikely(cluster & (cluster - 1)))
448				cluster = 1 << (fls(cluster) - 1);
449
450			hash_block_start &= ~(sector_t)(cluster - 1);
451			hash_block_end |= cluster - 1;
452			if (unlikely(hash_block_end >= v->hash_blocks))
453				hash_block_end = v->hash_blocks - 1;
454		}
455no_prefetch_cluster:
456		dm_bufio_prefetch(v->bufio, hash_block_start,
457				  hash_block_end - hash_block_start + 1);
458	}
459}
460
461/*
462 * Bio map function. It allocates dm_verity_io structure and bio vector and
463 * fills them. Then it issues prefetches and the I/O.
464 */
465static int verity_map(struct dm_target *ti, struct bio *bio,
466		      union map_info *map_context)
467{
468	struct dm_verity *v = ti->private;
469	struct dm_verity_io *io;
470
471	bio->bi_bdev = v->data_dev->bdev;
472	bio->bi_sector = verity_map_sector(v, bio->bi_sector);
473
474	if (((unsigned)bio->bi_sector | bio_sectors(bio)) &
475	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
476		DMERR_LIMIT("unaligned io");
477		return -EIO;
478	}
479
480	if ((bio->bi_sector + bio_sectors(bio)) >>
481	    (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
482		DMERR_LIMIT("io out of range");
483		return -EIO;
484	}
485
486	if (bio_data_dir(bio) == WRITE)
487		return -EIO;
488
489	io = mempool_alloc(v->io_mempool, GFP_NOIO);
490	io->v = v;
491	io->bio = bio;
492	io->orig_bi_end_io = bio->bi_end_io;
493	io->orig_bi_private = bio->bi_private;
494	io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
495	io->n_blocks = bio->bi_size >> v->data_dev_block_bits;
496
497	bio->bi_end_io = verity_end_io;
498	bio->bi_private = io;
499	io->io_vec_size = bio->bi_vcnt - bio->bi_idx;
500	if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
501		io->io_vec = io->io_vec_inline;
502	else
503		io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
504	memcpy(io->io_vec, bio_iovec(bio),
505	       io->io_vec_size * sizeof(struct bio_vec));
506
507	verity_prefetch_io(v, io);
508
509	generic_make_request(bio);
510
511	return DM_MAPIO_SUBMITTED;
512}
513
514/*
515 * Status: V (valid) or C (corruption found)
516 */
517static int verity_status(struct dm_target *ti, status_type_t type,
518			 char *result, unsigned maxlen)
519{
520	struct dm_verity *v = ti->private;
521	unsigned sz = 0;
522	unsigned x;
523
524	switch (type) {
525	case STATUSTYPE_INFO:
526		DMEMIT("%c", v->hash_failed ? 'C' : 'V');
527		break;
528	case STATUSTYPE_TABLE:
529		DMEMIT("%u %s %s %u %u %llu %llu %s ",
530			v->version,
531			v->data_dev->name,
532			v->hash_dev->name,
533			1 << v->data_dev_block_bits,
534			1 << v->hash_dev_block_bits,
535			(unsigned long long)v->data_blocks,
536			(unsigned long long)v->hash_start,
537			v->alg_name
538			);
539		for (x = 0; x < v->digest_size; x++)
540			DMEMIT("%02x", v->root_digest[x]);
541		DMEMIT(" ");
542		if (!v->salt_size)
543			DMEMIT("-");
544		else
545			for (x = 0; x < v->salt_size; x++)
546				DMEMIT("%02x", v->salt[x]);
547		break;
548	}
549
550	return 0;
551}
552
553static int verity_ioctl(struct dm_target *ti, unsigned cmd,
554			unsigned long arg)
555{
556	struct dm_verity *v = ti->private;
557	int r = 0;
558
559	if (v->data_start ||
560	    ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
561		r = scsi_verify_blk_ioctl(NULL, cmd);
562
563	return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode,
564				     cmd, arg);
565}
566
567static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
568			struct bio_vec *biovec, int max_size)
569{
570	struct dm_verity *v = ti->private;
571	struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
572
573	if (!q->merge_bvec_fn)
574		return max_size;
575
576	bvm->bi_bdev = v->data_dev->bdev;
577	bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
578
579	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
580}
581
582static int verity_iterate_devices(struct dm_target *ti,
583				  iterate_devices_callout_fn fn, void *data)
584{
585	struct dm_verity *v = ti->private;
586
587	return fn(ti, v->data_dev, v->data_start, ti->len, data);
588}
589
590static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
591{
592	struct dm_verity *v = ti->private;
593
594	if (limits->logical_block_size < 1 << v->data_dev_block_bits)
595		limits->logical_block_size = 1 << v->data_dev_block_bits;
596
597	if (limits->physical_block_size < 1 << v->data_dev_block_bits)
598		limits->physical_block_size = 1 << v->data_dev_block_bits;
599
600	blk_limits_io_min(limits, limits->logical_block_size);
601}
602
603static void verity_dtr(struct dm_target *ti)
604{
605	struct dm_verity *v = ti->private;
606
607	if (v->verify_wq)
608		destroy_workqueue(v->verify_wq);
609
610	if (v->vec_mempool)
611		mempool_destroy(v->vec_mempool);
612
613	if (v->io_mempool)
614		mempool_destroy(v->io_mempool);
615
616	if (v->bufio)
617		dm_bufio_client_destroy(v->bufio);
618
619	kfree(v->salt);
620	kfree(v->root_digest);
621
622	if (v->tfm)
623		crypto_free_shash(v->tfm);
624
625	kfree(v->alg_name);
626
627	if (v->hash_dev)
628		dm_put_device(ti, v->hash_dev);
629
630	if (v->data_dev)
631		dm_put_device(ti, v->data_dev);
632
633	kfree(v);
634}
635
636/*
637 * Target parameters:
638 *	<version>	The current format is version 1.
639 *			Vsn 0 is compatible with original Chromium OS releases.
640 *	<data device>
641 *	<hash device>
642 *	<data block size>
643 *	<hash block size>
644 *	<the number of data blocks>
645 *	<hash start block>
646 *	<algorithm>
647 *	<digest>
648 *	<salt>		Hex string or "-" if no salt.
649 */
650static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
651{
652	struct dm_verity *v;
653	unsigned num;
654	unsigned long long num_ll;
655	int r;
656	int i;
657	sector_t hash_position;
658	char dummy;
659
660	v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
661	if (!v) {
662		ti->error = "Cannot allocate verity structure";
663		return -ENOMEM;
664	}
665	ti->private = v;
666	v->ti = ti;
667
668	if ((dm_table_get_mode(ti->table) & ~FMODE_READ)) {
669		ti->error = "Device must be readonly";
670		r = -EINVAL;
671		goto bad;
672	}
673
674	if (argc != 10) {
675		ti->error = "Invalid argument count: exactly 10 arguments required";
676		r = -EINVAL;
677		goto bad;
678	}
679
680	if (sscanf(argv[0], "%d%c", &num, &dummy) != 1 ||
681	    num < 0 || num > 1) {
682		ti->error = "Invalid version";
683		r = -EINVAL;
684		goto bad;
685	}
686	v->version = num;
687
688	r = dm_get_device(ti, argv[1], FMODE_READ, &v->data_dev);
689	if (r) {
690		ti->error = "Data device lookup failed";
691		goto bad;
692	}
693
694	r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
695	if (r) {
696		ti->error = "Data device lookup failed";
697		goto bad;
698	}
699
700	if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
701	    !num || (num & (num - 1)) ||
702	    num < bdev_logical_block_size(v->data_dev->bdev) ||
703	    num > PAGE_SIZE) {
704		ti->error = "Invalid data device block size";
705		r = -EINVAL;
706		goto bad;
707	}
708	v->data_dev_block_bits = ffs(num) - 1;
709
710	if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
711	    !num || (num & (num - 1)) ||
712	    num < bdev_logical_block_size(v->hash_dev->bdev) ||
713	    num > INT_MAX) {
714		ti->error = "Invalid hash device block size";
715		r = -EINVAL;
716		goto bad;
717	}
718	v->hash_dev_block_bits = ffs(num) - 1;
719
720	if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
721	    (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
722	    >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
723		ti->error = "Invalid data blocks";
724		r = -EINVAL;
725		goto bad;
726	}
727	v->data_blocks = num_ll;
728
729	if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
730		ti->error = "Data device is too small";
731		r = -EINVAL;
732		goto bad;
733	}
734
735	if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
736	    (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
737	    >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
738		ti->error = "Invalid hash start";
739		r = -EINVAL;
740		goto bad;
741	}
742	v->hash_start = num_ll;
743
744	v->alg_name = kstrdup(argv[7], GFP_KERNEL);
745	if (!v->alg_name) {
746		ti->error = "Cannot allocate algorithm name";
747		r = -ENOMEM;
748		goto bad;
749	}
750
751	v->tfm = crypto_alloc_shash(v->alg_name, 0, 0);
752	if (IS_ERR(v->tfm)) {
753		ti->error = "Cannot initialize hash function";
754		r = PTR_ERR(v->tfm);
755		v->tfm = NULL;
756		goto bad;
757	}
758	v->digest_size = crypto_shash_digestsize(v->tfm);
759	if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
760		ti->error = "Digest size too big";
761		r = -EINVAL;
762		goto bad;
763	}
764	v->shash_descsize =
765		sizeof(struct shash_desc) + crypto_shash_descsize(v->tfm);
766
767	v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
768	if (!v->root_digest) {
769		ti->error = "Cannot allocate root digest";
770		r = -ENOMEM;
771		goto bad;
772	}
773	if (strlen(argv[8]) != v->digest_size * 2 ||
774	    hex2bin(v->root_digest, argv[8], v->digest_size)) {
775		ti->error = "Invalid root digest";
776		r = -EINVAL;
777		goto bad;
778	}
779
780	if (strcmp(argv[9], "-")) {
781		v->salt_size = strlen(argv[9]) / 2;
782		v->salt = kmalloc(v->salt_size, GFP_KERNEL);
783		if (!v->salt) {
784			ti->error = "Cannot allocate salt";
785			r = -ENOMEM;
786			goto bad;
787		}
788		if (strlen(argv[9]) != v->salt_size * 2 ||
789		    hex2bin(v->salt, argv[9], v->salt_size)) {
790			ti->error = "Invalid salt";
791			r = -EINVAL;
792			goto bad;
793		}
794	}
795
796	v->hash_per_block_bits =
797		fls((1 << v->hash_dev_block_bits) / v->digest_size) - 1;
798
799	v->levels = 0;
800	if (v->data_blocks)
801		while (v->hash_per_block_bits * v->levels < 64 &&
802		       (unsigned long long)(v->data_blocks - 1) >>
803		       (v->hash_per_block_bits * v->levels))
804			v->levels++;
805
806	if (v->levels > DM_VERITY_MAX_LEVELS) {
807		ti->error = "Too many tree levels";
808		r = -E2BIG;
809		goto bad;
810	}
811
812	hash_position = v->hash_start;
813	for (i = v->levels - 1; i >= 0; i--) {
814		sector_t s;
815		v->hash_level_block[i] = hash_position;
816		s = verity_position_at_level(v, v->data_blocks, i);
817		s = (s >> v->hash_per_block_bits) +
818		    !!(s & ((1 << v->hash_per_block_bits) - 1));
819		if (hash_position + s < hash_position) {
820			ti->error = "Hash device offset overflow";
821			r = -E2BIG;
822			goto bad;
823		}
824		hash_position += s;
825	}
826	v->hash_blocks = hash_position;
827
828	v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
829		1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
830		dm_bufio_alloc_callback, NULL);
831	if (IS_ERR(v->bufio)) {
832		ti->error = "Cannot initialize dm-bufio";
833		r = PTR_ERR(v->bufio);
834		v->bufio = NULL;
835		goto bad;
836	}
837
838	if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
839		ti->error = "Hash device is too small";
840		r = -E2BIG;
841		goto bad;
842	}
843
844	v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
845	  sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2);
846	if (!v->io_mempool) {
847		ti->error = "Cannot allocate io mempool";
848		r = -ENOMEM;
849		goto bad;
850	}
851
852	v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
853					BIO_MAX_PAGES * sizeof(struct bio_vec));
854	if (!v->vec_mempool) {
855		ti->error = "Cannot allocate vector mempool";
856		r = -ENOMEM;
857		goto bad;
858	}
859
860	/* WQ_UNBOUND greatly improves performance when running on ramdisk */
861	v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
862	if (!v->verify_wq) {
863		ti->error = "Cannot allocate workqueue";
864		r = -ENOMEM;
865		goto bad;
866	}
867
868	return 0;
869
870bad:
871	verity_dtr(ti);
872
873	return r;
874}
875
876static struct target_type verity_target = {
877	.name		= "verity",
878	.version	= {1, 0, 0},
879	.module		= THIS_MODULE,
880	.ctr		= verity_ctr,
881	.dtr		= verity_dtr,
882	.map		= verity_map,
883	.status		= verity_status,
884	.ioctl		= verity_ioctl,
885	.merge		= verity_merge,
886	.iterate_devices = verity_iterate_devices,
887	.io_hints	= verity_io_hints,
888};
889
890static int __init dm_verity_init(void)
891{
892	int r;
893
894	r = dm_register_target(&verity_target);
895	if (r < 0)
896		DMERR("register failed %d", r);
897
898	return r;
899}
900
901static void __exit dm_verity_exit(void)
902{
903	dm_unregister_target(&verity_target);
904}
905
906module_init(dm_verity_init);
907module_exit(dm_verity_exit);
908
909MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
910MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
911MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
912MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
913MODULE_LICENSE("GPL");