Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * echainiv: Encrypted Chain IV Generator
  3 *
  4 * This generator generates an IV based on a sequence number by xoring it
  5 * with a salt and then encrypting it with the same key as used to encrypt
  6 * the plain text.  This algorithm requires that the block size be equal
  7 * to the IV size.  It is mainly useful for CBC.
  8 *
  9 * This generator can only be used by algorithms where authentication
 10 * is performed after encryption (i.e., authenc).
 11 *
 12 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
 13 *
 14 * This program is free software; you can redistribute it and/or modify it
 15 * under the terms of the GNU General Public License as published by the Free
 16 * Software Foundation; either version 2 of the License, or (at your option)
 17 * any later version.
 18 *
 19 */
 20
 21#include <crypto/internal/geniv.h>
 22#include <crypto/scatterwalk.h>
 
 23#include <linux/err.h>
 24#include <linux/init.h>
 25#include <linux/kernel.h>
 26#include <linux/mm.h>
 27#include <linux/module.h>
 28#include <linux/percpu.h>
 29#include <linux/spinlock.h>
 30#include <linux/string.h>
 31
 32#define MAX_IV_SIZE 16
 33
 34static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
 35
 36/* We don't care if we get preempted and read/write IVs from the next CPU. */
 37static void echainiv_read_iv(u8 *dst, unsigned size)
 38{
 39	u32 *a = (u32 *)dst;
 40	u32 __percpu *b = echainiv_iv;
 41
 42	for (; size >= 4; size -= 4) {
 43		*a++ = this_cpu_read(*b);
 44		b++;
 45	}
 46}
 47
 48static void echainiv_write_iv(const u8 *src, unsigned size)
 49{
 50	const u32 *a = (const u32 *)src;
 51	u32 __percpu *b = echainiv_iv;
 52
 53	for (; size >= 4; size -= 4) {
 54		this_cpu_write(*b, *a);
 55		a++;
 56		b++;
 57	}
 58}
 59
 60static void echainiv_encrypt_complete2(struct aead_request *req, int err)
 61{
 62	struct aead_request *subreq = aead_request_ctx(req);
 63	struct crypto_aead *geniv;
 64	unsigned int ivsize;
 65
 66	if (err == -EINPROGRESS)
 67		return;
 68
 69	if (err)
 70		goto out;
 71
 72	geniv = crypto_aead_reqtfm(req);
 73	ivsize = crypto_aead_ivsize(geniv);
 74
 75	echainiv_write_iv(subreq->iv, ivsize);
 76
 77	if (req->iv != subreq->iv)
 78		memcpy(req->iv, subreq->iv, ivsize);
 79
 80out:
 81	if (req->iv != subreq->iv)
 82		kzfree(subreq->iv);
 83}
 84
 85static void echainiv_encrypt_complete(struct crypto_async_request *base,
 86					 int err)
 87{
 88	struct aead_request *req = base->data;
 89
 90	echainiv_encrypt_complete2(req, err);
 91	aead_request_complete(req, err);
 92}
 93
 94static int echainiv_encrypt(struct aead_request *req)
 95{
 96	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 97	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 98	struct aead_request *subreq = aead_request_ctx(req);
 99	crypto_completion_t compl;
100	void *data;
101	u8 *info;
102	unsigned int ivsize = crypto_aead_ivsize(geniv);
103	int err;
104
105	if (req->cryptlen < ivsize)
106		return -EINVAL;
107
108	aead_request_set_tfm(subreq, ctx->child);
109
110	compl = echainiv_encrypt_complete;
111	data = req;
112	info = req->iv;
113
114	if (req->src != req->dst) {
115		struct blkcipher_desc desc = {
116			.tfm = ctx->null,
117		};
118
119		err = crypto_blkcipher_encrypt(
120			&desc, req->dst, req->src,
121			req->assoclen + req->cryptlen);
122		if (err)
123			return err;
124	}
125
126	if (unlikely(!IS_ALIGNED((unsigned long)info,
127				 crypto_aead_alignmask(geniv) + 1))) {
128		info = kmalloc(ivsize, req->base.flags &
129				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
130								  GFP_ATOMIC);
131		if (!info)
132			return -ENOMEM;
133
134		memcpy(info, req->iv, ivsize);
 
 
135	}
136
137	aead_request_set_callback(subreq, req->base.flags, compl, data);
 
138	aead_request_set_crypt(subreq, req->dst, req->dst,
139			       req->cryptlen, info);
140	aead_request_set_ad(subreq, req->assoclen);
141
142	crypto_xor(info, ctx->salt, ivsize);
 
 
 
143	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
144	echainiv_read_iv(info, ivsize);
145
146	err = crypto_aead_encrypt(subreq);
147	echainiv_encrypt_complete2(req, err);
148	return err;
 
 
 
 
 
 
 
 
 
149}
150
151static int echainiv_decrypt(struct aead_request *req)
152{
153	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
154	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
155	struct aead_request *subreq = aead_request_ctx(req);
156	crypto_completion_t compl;
157	void *data;
158	unsigned int ivsize = crypto_aead_ivsize(geniv);
159
160	if (req->cryptlen < ivsize)
161		return -EINVAL;
162
163	aead_request_set_tfm(subreq, ctx->child);
164
165	compl = req->base.complete;
166	data = req->base.data;
167
168	aead_request_set_callback(subreq, req->base.flags, compl, data);
169	aead_request_set_crypt(subreq, req->src, req->dst,
170			       req->cryptlen - ivsize, req->iv);
171	aead_request_set_ad(subreq, req->assoclen + ivsize);
172
173	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
174
175	return crypto_aead_decrypt(subreq);
176}
177
178static int echainiv_aead_create(struct crypto_template *tmpl,
179				struct rtattr **tb)
180{
181	struct aead_instance *inst;
182	struct crypto_aead_spawn *spawn;
183	struct aead_alg *alg;
184	int err;
185
186	inst = aead_geniv_alloc(tmpl, tb, 0, 0);
187
188	if (IS_ERR(inst))
189		return PTR_ERR(inst);
190
191	spawn = aead_instance_ctx(inst);
192	alg = crypto_spawn_aead_alg(spawn);
193
194	err = -EINVAL;
195	if (inst->alg.ivsize & (sizeof(u32) - 1) ||
196	    inst->alg.ivsize > MAX_IV_SIZE)
197		goto free_inst;
198
199	inst->alg.encrypt = echainiv_encrypt;
200	inst->alg.decrypt = echainiv_decrypt;
201
202	inst->alg.init = aead_init_geniv;
203	inst->alg.exit = aead_exit_geniv;
204
205	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
206	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
207	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
208
209	inst->free = aead_geniv_free;
210
211	err = aead_register_instance(tmpl, inst);
212	if (err)
213		goto free_inst;
214
215out:
216	return err;
217
218free_inst:
219	aead_geniv_free(inst);
220	goto out;
221}
222
223static void echainiv_free(struct crypto_instance *inst)
224{
225	aead_geniv_free(aead_instance(inst));
226}
227
228static struct crypto_template echainiv_tmpl = {
229	.name = "echainiv",
230	.create = echainiv_aead_create,
231	.free = echainiv_free,
232	.module = THIS_MODULE,
233};
234
235static int __init echainiv_module_init(void)
236{
237	return crypto_register_template(&echainiv_tmpl);
238}
239
240static void __exit echainiv_module_exit(void)
241{
242	crypto_unregister_template(&echainiv_tmpl);
243}
244
245module_init(echainiv_module_init);
246module_exit(echainiv_module_exit);
247
248MODULE_LICENSE("GPL");
249MODULE_DESCRIPTION("Encrypted Chain IV Generator");
250MODULE_ALIAS_CRYPTO("echainiv");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * echainiv: Encrypted Chain IV Generator
  4 *
  5 * This generator generates an IV based on a sequence number by multiplying
  6 * it with a salt and then encrypting it with the same key as used to encrypt
  7 * the plain text.  This algorithm requires that the block size be equal
  8 * to the IV size.  It is mainly useful for CBC.
  9 *
 10 * This generator can only be used by algorithms where authentication
 11 * is performed after encryption (i.e., authenc).
 12 *
 13 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
 14 */
 15
 16#include <crypto/internal/geniv.h>
 17#include <crypto/scatterwalk.h>
 18#include <crypto/skcipher.h>
 19#include <linux/err.h>
 20#include <linux/init.h>
 21#include <linux/kernel.h>
 
 22#include <linux/module.h>
 23#include <linux/slab.h>
 
 24#include <linux/string.h>
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26static int echainiv_encrypt(struct aead_request *req)
 27{
 28	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 29	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 30	struct aead_request *subreq = aead_request_ctx(req);
 31	__be64 nseqno;
 32	u64 seqno;
 33	u8 *info;
 34	unsigned int ivsize = crypto_aead_ivsize(geniv);
 35	int err;
 36
 37	if (req->cryptlen < ivsize)
 38		return -EINVAL;
 39
 40	aead_request_set_tfm(subreq, ctx->child);
 41
 
 
 42	info = req->iv;
 43
 44	if (req->src != req->dst) {
 45		SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
 
 
 
 
 
 
 
 
 
 46
 47		skcipher_request_set_sync_tfm(nreq, ctx->sknull);
 48		skcipher_request_set_callback(nreq, req->base.flags,
 49					      NULL, NULL);
 50		skcipher_request_set_crypt(nreq, req->src, req->dst,
 51					   req->assoclen + req->cryptlen,
 52					   NULL);
 
 53
 54		err = crypto_skcipher_encrypt(nreq);
 55		if (err)
 56			return err;
 57	}
 58
 59	aead_request_set_callback(subreq, req->base.flags,
 60				  req->base.complete, req->base.data);
 61	aead_request_set_crypt(subreq, req->dst, req->dst,
 62			       req->cryptlen, info);
 63	aead_request_set_ad(subreq, req->assoclen);
 64
 65	memcpy(&nseqno, info + ivsize - 8, 8);
 66	seqno = be64_to_cpu(nseqno);
 67	memset(info, 0, ivsize);
 68
 69	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
 
 70
 71	do {
 72		u64 a;
 73
 74		memcpy(&a, ctx->salt + ivsize - 8, 8);
 75
 76		a |= 1;
 77		a *= seqno;
 78
 79		memcpy(info + ivsize - 8, &a, 8);
 80	} while ((ivsize -= 8));
 81
 82	return crypto_aead_encrypt(subreq);
 83}
 84
 85static int echainiv_decrypt(struct aead_request *req)
 86{
 87	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
 88	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 89	struct aead_request *subreq = aead_request_ctx(req);
 90	crypto_completion_t compl;
 91	void *data;
 92	unsigned int ivsize = crypto_aead_ivsize(geniv);
 93
 94	if (req->cryptlen < ivsize)
 95		return -EINVAL;
 96
 97	aead_request_set_tfm(subreq, ctx->child);
 98
 99	compl = req->base.complete;
100	data = req->base.data;
101
102	aead_request_set_callback(subreq, req->base.flags, compl, data);
103	aead_request_set_crypt(subreq, req->src, req->dst,
104			       req->cryptlen - ivsize, req->iv);
105	aead_request_set_ad(subreq, req->assoclen + ivsize);
106
107	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
108
109	return crypto_aead_decrypt(subreq);
110}
111
112static int echainiv_aead_create(struct crypto_template *tmpl,
113				struct rtattr **tb)
114{
115	struct aead_instance *inst;
 
 
116	int err;
117
118	inst = aead_geniv_alloc(tmpl, tb);
119
120	if (IS_ERR(inst))
121		return PTR_ERR(inst);
122
 
 
 
123	err = -EINVAL;
124	if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
 
125		goto free_inst;
126
127	inst->alg.encrypt = echainiv_encrypt;
128	inst->alg.decrypt = echainiv_decrypt;
129
130	inst->alg.init = aead_init_geniv;
131	inst->alg.exit = aead_exit_geniv;
132
 
133	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
134	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
135
 
 
136	err = aead_register_instance(tmpl, inst);
137	if (err) {
 
 
 
 
 
138free_inst:
139		inst->free(inst);
140	}
141	return err;
 
 
 
 
142}
143
144static struct crypto_template echainiv_tmpl = {
145	.name = "echainiv",
146	.create = echainiv_aead_create,
 
147	.module = THIS_MODULE,
148};
149
150static int __init echainiv_module_init(void)
151{
152	return crypto_register_template(&echainiv_tmpl);
153}
154
155static void __exit echainiv_module_exit(void)
156{
157	crypto_unregister_template(&echainiv_tmpl);
158}
159
160subsys_initcall(echainiv_module_init);
161module_exit(echainiv_module_exit);
162
163MODULE_LICENSE("GPL");
164MODULE_DESCRIPTION("Encrypted Chain IV Generator");
165MODULE_ALIAS_CRYPTO("echainiv");