Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Adiantum length-preserving encryption mode
  4 *
  5 * Copyright 2018 Google LLC
  6 */
  7
  8/*
  9 * Adiantum is a tweakable, length-preserving encryption mode designed for fast
 10 * and secure disk encryption, especially on CPUs without dedicated crypto
 11 * instructions.  Adiantum encrypts each sector using the XChaCha12 stream
 12 * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
 13 * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
 14 * 16-byte block.  See the paper for details:
 15 *
 16 *	Adiantum: length-preserving encryption for entry-level processors
 17 *      (https://eprint.iacr.org/2018/720.pdf)
 18 *
 19 * For flexibility, this implementation also allows other ciphers:
 20 *
 21 *	- Stream cipher: XChaCha12 or XChaCha20
 22 *	- Block cipher: any with a 128-bit block size and 256-bit key
 23 *
 24 * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
 25 * HPolyC is not supported.  This is because Adiantum is ~20% faster than HPolyC
 26 * but still provably as secure, and also the ε-∆U hash function of HBSH is
 27 * formally defined to take two inputs (tweak, message) which makes it difficult
 28 * to wrap with the crypto_shash API.  Rather, some details need to be handled
 29 * here.  Nevertheless, if needed in the future, support for other ε-∆U hash
 30 * functions could be added here.
 31 */
 32
 33#include <crypto/b128ops.h>
 34#include <crypto/chacha.h>
 35#include <crypto/internal/cipher.h>
 36#include <crypto/internal/hash.h>
 37#include <crypto/internal/poly1305.h>
 38#include <crypto/internal/skcipher.h>
 39#include <crypto/nhpoly1305.h>
 40#include <crypto/scatterwalk.h>
 41#include <linux/module.h>
 42
 43/*
 44 * Size of right-hand part of input data, in bytes; also the size of the block
 45 * cipher's block size and the hash function's output.
 46 */
 47#define BLOCKCIPHER_BLOCK_SIZE		16
 48
 49/* Size of the block cipher key (K_E) in bytes */
 50#define BLOCKCIPHER_KEY_SIZE		32
 51
 52/* Size of the hash key (K_H) in bytes */
 53#define HASH_KEY_SIZE		(POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
 54
 55/*
 56 * The specification allows variable-length tweaks, but Linux's crypto API
 57 * currently only allows algorithms to support a single length.  The "natural"
 58 * tweak length for Adiantum is 16, since that fits into one Poly1305 block for
 59 * the best performance.  But longer tweaks are useful for fscrypt, to avoid
 60 * needing to derive per-file keys.  So instead we use two blocks, or 32 bytes.
 61 */
 62#define TWEAK_SIZE		32
 63
 64struct adiantum_instance_ctx {
 65	struct crypto_skcipher_spawn streamcipher_spawn;
 66	struct crypto_cipher_spawn blockcipher_spawn;
 67	struct crypto_shash_spawn hash_spawn;
 68};
 69
 70struct adiantum_tfm_ctx {
 71	struct crypto_skcipher *streamcipher;
 72	struct crypto_cipher *blockcipher;
 73	struct crypto_shash *hash;
 74	struct poly1305_core_key header_hash_key;
 75};
 76
 77struct adiantum_request_ctx {
 78
 79	/*
 80	 * Buffer for right-hand part of data, i.e.
 81	 *
 82	 *    P_L => P_M => C_M => C_R when encrypting, or
 83	 *    C_R => C_M => P_M => P_L when decrypting.
 84	 *
 85	 * Also used to build the IV for the stream cipher.
 86	 */
 87	union {
 88		u8 bytes[XCHACHA_IV_SIZE];
 89		__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
 90		le128 bignum;	/* interpret as element of Z/(2^{128}Z) */
 91	} rbuf;
 92
 93	bool enc; /* true if encrypting, false if decrypting */
 94
 95	/*
 96	 * The result of the Poly1305 ε-∆U hash function applied to
 97	 * (bulk length, tweak)
 98	 */
 99	le128 header_hash;
100
101	/* Sub-requests, must be last */
102	union {
103		struct shash_desc hash_desc;
104		struct skcipher_request streamcipher_req;
105	} u;
106};
107
108/*
109 * Given the XChaCha stream key K_S, derive the block cipher key K_E and the
110 * hash key K_H as follows:
111 *
112 *     K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
113 *
114 * Note that this denotes using bits from the XChaCha keystream, which here we
115 * get indirectly by encrypting a buffer containing all 0's.
116 */
117static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
118			   unsigned int keylen)
119{
120	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
121	struct {
122		u8 iv[XCHACHA_IV_SIZE];
123		u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
124		struct scatterlist sg;
125		struct crypto_wait wait;
126		struct skcipher_request req; /* must be last */
127	} *data;
128	u8 *keyp;
129	int err;
130
131	/* Set the stream cipher key (K_S) */
132	crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
133	crypto_skcipher_set_flags(tctx->streamcipher,
134				  crypto_skcipher_get_flags(tfm) &
135				  CRYPTO_TFM_REQ_MASK);
136	err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
137	if (err)
138		return err;
139
140	/* Derive the subkeys */
141	data = kzalloc(sizeof(*data) +
142		       crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
143	if (!data)
144		return -ENOMEM;
145	data->iv[0] = 1;
146	sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
147	crypto_init_wait(&data->wait);
148	skcipher_request_set_tfm(&data->req, tctx->streamcipher);
149	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
150						  CRYPTO_TFM_REQ_MAY_BACKLOG,
151				      crypto_req_done, &data->wait);
152	skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
153				   sizeof(data->derived_keys), data->iv);
154	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
155	if (err)
156		goto out;
157	keyp = data->derived_keys;
158
159	/* Set the block cipher key (K_E) */
160	crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
161	crypto_cipher_set_flags(tctx->blockcipher,
162				crypto_skcipher_get_flags(tfm) &
163				CRYPTO_TFM_REQ_MASK);
164	err = crypto_cipher_setkey(tctx->blockcipher, keyp,
165				   BLOCKCIPHER_KEY_SIZE);
166	if (err)
167		goto out;
168	keyp += BLOCKCIPHER_KEY_SIZE;
169
170	/* Set the hash key (K_H) */
171	poly1305_core_setkey(&tctx->header_hash_key, keyp);
172	keyp += POLY1305_BLOCK_SIZE;
173
174	crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
175	crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
176					   CRYPTO_TFM_REQ_MASK);
177	err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
178	keyp += NHPOLY1305_KEY_SIZE;
179	WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
180out:
181	kfree_sensitive(data);
182	return err;
183}
184
185/* Addition in Z/(2^{128}Z) */
186static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
187{
188	u64 x = le64_to_cpu(v1->b);
189	u64 y = le64_to_cpu(v2->b);
190
191	r->b = cpu_to_le64(x + y);
192	r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
193			   (x + y < x));
194}
195
196/* Subtraction in Z/(2^{128}Z) */
197static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
198{
199	u64 x = le64_to_cpu(v1->b);
200	u64 y = le64_to_cpu(v2->b);
201
202	r->b = cpu_to_le64(x - y);
203	r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
204			   (x - y > x));
205}
206
207/*
208 * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
209 * result to rctx->header_hash.  This is the calculation
210 *
211 *	H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
212 *
213 * from the procedure in section 6.4 of the Adiantum paper.  The resulting value
214 * is reused in both the first and second hash steps.  Specifically, it's added
215 * to the result of an independently keyed ε-∆U hash function (for equal length
216 * inputs only) taken over the left-hand part (the "bulk") of the message, to
217 * give the overall Adiantum hash of the (tweak, left-hand part) pair.
218 */
219static void adiantum_hash_header(struct skcipher_request *req)
220{
221	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
222	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
223	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
224	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
225	struct {
226		__le64 message_bits;
227		__le64 padding;
228	} header = {
229		.message_bits = cpu_to_le64((u64)bulk_len * 8)
230	};
231	struct poly1305_state state;
232
233	poly1305_core_init(&state);
234
235	BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
236	poly1305_core_blocks(&state, &tctx->header_hash_key,
237			     &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
238
239	BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
240	poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
241			     TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
242
243	poly1305_core_emit(&state, NULL, &rctx->header_hash);
244}
245
246/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
247static int adiantum_hash_message(struct skcipher_request *req,
248				 struct scatterlist *sgl, le128 *digest)
249{
250	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
251	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
252	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
253	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
254	struct shash_desc *hash_desc = &rctx->u.hash_desc;
255	struct sg_mapping_iter miter;
256	unsigned int i, n;
257	int err;
258
259	hash_desc->tfm = tctx->hash;
260
261	err = crypto_shash_init(hash_desc);
262	if (err)
263		return err;
264
265	sg_miter_start(&miter, sgl, sg_nents(sgl),
266		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
267	for (i = 0; i < bulk_len; i += n) {
268		sg_miter_next(&miter);
269		n = min_t(unsigned int, miter.length, bulk_len - i);
270		err = crypto_shash_update(hash_desc, miter.addr, n);
271		if (err)
272			break;
273	}
274	sg_miter_stop(&miter);
275	if (err)
276		return err;
277
278	return crypto_shash_final(hash_desc, (u8 *)digest);
279}
280
281/* Continue Adiantum encryption/decryption after the stream cipher step */
282static int adiantum_finish(struct skcipher_request *req)
283{
284	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
285	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
286	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
287	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
288	le128 digest;
289	int err;
290
291	/* If decrypting, decrypt C_M with the block cipher to get P_M */
292	if (!rctx->enc)
293		crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
294					  rctx->rbuf.bytes);
295
296	/*
297	 * Second hash step
298	 *	enc: C_R = C_M - H_{K_H}(T, C_L)
299	 *	dec: P_R = P_M - H_{K_H}(T, P_L)
300	 */
301	err = adiantum_hash_message(req, req->dst, &digest);
302	if (err)
303		return err;
304	le128_add(&digest, &digest, &rctx->header_hash);
305	le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
306	scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
307				 bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
308	return 0;
309}
310
311static void adiantum_streamcipher_done(struct crypto_async_request *areq,
312				       int err)
313{
314	struct skcipher_request *req = areq->data;
315
316	if (!err)
317		err = adiantum_finish(req);
318
319	skcipher_request_complete(req, err);
320}
321
322static int adiantum_crypt(struct skcipher_request *req, bool enc)
323{
324	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
325	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
326	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
327	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
328	unsigned int stream_len;
329	le128 digest;
330	int err;
331
332	if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
333		return -EINVAL;
334
335	rctx->enc = enc;
336
337	/*
338	 * First hash step
339	 *	enc: P_M = P_R + H_{K_H}(T, P_L)
340	 *	dec: C_M = C_R + H_{K_H}(T, C_L)
341	 */
342	adiantum_hash_header(req);
343	err = adiantum_hash_message(req, req->src, &digest);
344	if (err)
345		return err;
346	le128_add(&digest, &digest, &rctx->header_hash);
347	scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
348				 bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
349	le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
350
351	/* If encrypting, encrypt P_M with the block cipher to get C_M */
352	if (enc)
353		crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
354					  rctx->rbuf.bytes);
355
356	/* Initialize the rest of the XChaCha IV (first part is C_M) */
357	BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
358	BUILD_BUG_ON(XCHACHA_IV_SIZE != 32);	/* nonce || stream position */
359	rctx->rbuf.words[4] = cpu_to_le32(1);
360	rctx->rbuf.words[5] = 0;
361	rctx->rbuf.words[6] = 0;
362	rctx->rbuf.words[7] = 0;
363
364	/*
365	 * XChaCha needs to be done on all the data except the last 16 bytes;
366	 * for disk encryption that usually means 4080 or 496 bytes.  But ChaCha
367	 * implementations tend to be most efficient when passed a whole number
368	 * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
369	 * And here it doesn't matter whether the last 16 bytes are written to,
370	 * as the second hash step will overwrite them.  Thus, round the XChaCha
371	 * length up to the next 64-byte boundary if possible.
372	 */
373	stream_len = bulk_len;
374	if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
375		stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
376
377	skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
378	skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
379				   req->dst, stream_len, &rctx->rbuf);
380	skcipher_request_set_callback(&rctx->u.streamcipher_req,
381				      req->base.flags,
382				      adiantum_streamcipher_done, req);
383	return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
384		adiantum_finish(req);
385}
386
387static int adiantum_encrypt(struct skcipher_request *req)
388{
389	return adiantum_crypt(req, true);
390}
391
392static int adiantum_decrypt(struct skcipher_request *req)
393{
394	return adiantum_crypt(req, false);
395}
396
397static int adiantum_init_tfm(struct crypto_skcipher *tfm)
398{
399	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
400	struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
401	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
402	struct crypto_skcipher *streamcipher;
403	struct crypto_cipher *blockcipher;
404	struct crypto_shash *hash;
405	unsigned int subreq_size;
406	int err;
407
408	streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
409	if (IS_ERR(streamcipher))
410		return PTR_ERR(streamcipher);
411
412	blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
413	if (IS_ERR(blockcipher)) {
414		err = PTR_ERR(blockcipher);
415		goto err_free_streamcipher;
416	}
417
418	hash = crypto_spawn_shash(&ictx->hash_spawn);
419	if (IS_ERR(hash)) {
420		err = PTR_ERR(hash);
421		goto err_free_blockcipher;
422	}
423
424	tctx->streamcipher = streamcipher;
425	tctx->blockcipher = blockcipher;
426	tctx->hash = hash;
427
428	BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
429		     sizeof(struct adiantum_request_ctx));
430	subreq_size = max(sizeof_field(struct adiantum_request_ctx,
431				       u.hash_desc) +
432			  crypto_shash_descsize(hash),
433			  sizeof_field(struct adiantum_request_ctx,
434				       u.streamcipher_req) +
435			  crypto_skcipher_reqsize(streamcipher));
436
437	crypto_skcipher_set_reqsize(tfm,
438				    offsetof(struct adiantum_request_ctx, u) +
439				    subreq_size);
440	return 0;
441
442err_free_blockcipher:
443	crypto_free_cipher(blockcipher);
444err_free_streamcipher:
445	crypto_free_skcipher(streamcipher);
446	return err;
447}
448
449static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
450{
451	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
452
453	crypto_free_skcipher(tctx->streamcipher);
454	crypto_free_cipher(tctx->blockcipher);
455	crypto_free_shash(tctx->hash);
456}
457
458static void adiantum_free_instance(struct skcipher_instance *inst)
459{
460	struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
461
462	crypto_drop_skcipher(&ictx->streamcipher_spawn);
463	crypto_drop_cipher(&ictx->blockcipher_spawn);
464	crypto_drop_shash(&ictx->hash_spawn);
465	kfree(inst);
466}
467
468/*
469 * Check for a supported set of inner algorithms.
470 * See the comment at the beginning of this file.
471 */
472static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
473					  struct crypto_alg *blockcipher_alg,
474					  struct shash_alg *hash_alg)
475{
476	if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
477	    strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
478		return false;
479
480	if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
481	    blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
482		return false;
483	if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
484		return false;
485
486	if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
487		return false;
488
489	return true;
490}
491
492static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
493{
494	u32 mask;
495	const char *nhpoly1305_name;
496	struct skcipher_instance *inst;
497	struct adiantum_instance_ctx *ictx;
498	struct skcipher_alg *streamcipher_alg;
499	struct crypto_alg *blockcipher_alg;
500	struct shash_alg *hash_alg;
501	int err;
502
503	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
504	if (err)
505		return err;
506
507	inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
508	if (!inst)
509		return -ENOMEM;
510	ictx = skcipher_instance_ctx(inst);
511
512	/* Stream cipher, e.g. "xchacha12" */
513	err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
514				   skcipher_crypto_instance(inst),
515				   crypto_attr_alg_name(tb[1]), 0, mask);
516	if (err)
517		goto err_free_inst;
518	streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
519
520	/* Block cipher, e.g. "aes" */
521	err = crypto_grab_cipher(&ictx->blockcipher_spawn,
522				 skcipher_crypto_instance(inst),
523				 crypto_attr_alg_name(tb[2]), 0, mask);
524	if (err)
525		goto err_free_inst;
526	blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
527
528	/* NHPoly1305 ε-∆U hash function */
529	nhpoly1305_name = crypto_attr_alg_name(tb[3]);
530	if (nhpoly1305_name == ERR_PTR(-ENOENT))
531		nhpoly1305_name = "nhpoly1305";
532	err = crypto_grab_shash(&ictx->hash_spawn,
533				skcipher_crypto_instance(inst),
534				nhpoly1305_name, 0, mask);
535	if (err)
536		goto err_free_inst;
537	hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
538
539	/* Check the set of algorithms */
540	if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
541					   hash_alg)) {
542		pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
543			streamcipher_alg->base.cra_name,
544			blockcipher_alg->cra_name, hash_alg->base.cra_name);
545		err = -EINVAL;
546		goto err_free_inst;
547	}
548
549	/* Instance fields */
550
551	err = -ENAMETOOLONG;
552	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
553		     "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
554		     blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
555		goto err_free_inst;
556	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
557		     "adiantum(%s,%s,%s)",
558		     streamcipher_alg->base.cra_driver_name,
559		     blockcipher_alg->cra_driver_name,
560		     hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
561		goto err_free_inst;
562
563	inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
564	inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
565	inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
566				       hash_alg->base.cra_alignmask;
567	/*
568	 * The block cipher is only invoked once per message, so for long
569	 * messages (e.g. sectors for disk encryption) its performance doesn't
570	 * matter as much as that of the stream cipher and hash function.  Thus,
571	 * weigh the block cipher's ->cra_priority less.
572	 */
573	inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
574				       2 * hash_alg->base.cra_priority +
575				       blockcipher_alg->cra_priority) / 7;
576
577	inst->alg.setkey = adiantum_setkey;
578	inst->alg.encrypt = adiantum_encrypt;
579	inst->alg.decrypt = adiantum_decrypt;
580	inst->alg.init = adiantum_init_tfm;
581	inst->alg.exit = adiantum_exit_tfm;
582	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
583	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
584	inst->alg.ivsize = TWEAK_SIZE;
585
586	inst->free = adiantum_free_instance;
587
588	err = skcipher_register_instance(tmpl, inst);
589	if (err) {
590err_free_inst:
591		adiantum_free_instance(inst);
592	}
593	return err;
594}
595
596/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
597static struct crypto_template adiantum_tmpl = {
598	.name = "adiantum",
599	.create = adiantum_create,
600	.module = THIS_MODULE,
601};
602
603static int __init adiantum_module_init(void)
604{
605	return crypto_register_template(&adiantum_tmpl);
606}
607
608static void __exit adiantum_module_exit(void)
609{
610	crypto_unregister_template(&adiantum_tmpl);
611}
612
613subsys_initcall(adiantum_module_init);
614module_exit(adiantum_module_exit);
615
616MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
617MODULE_LICENSE("GPL v2");
618MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
619MODULE_ALIAS_CRYPTO("adiantum");
620MODULE_IMPORT_NS(CRYPTO_INTERNAL);