Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.1
  1/*
  2 * chainiv: Chain IV Generator
  3 *
  4 * Generate IVs simply be using the last block of the previous encryption.
  5 * This is mainly useful for CBC with a synchronous algorithm.
  6 *
  7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License as published by the Free
 11 * Software Foundation; either version 2 of the License, or (at your option)
 12 * any later version.
 13 *
 14 */
 15
 16#include <crypto/internal/skcipher.h>
 17#include <crypto/rng.h>
 18#include <crypto/crypto_wq.h>
 19#include <linux/err.h>
 20#include <linux/init.h>
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/spinlock.h>
 24#include <linux/string.h>
 25#include <linux/workqueue.h>
 26
 27enum {
 28	CHAINIV_STATE_INUSE = 0,
 29};
 30
 31struct chainiv_ctx {
 32	spinlock_t lock;
 33	char iv[];
 34};
 35
 36struct async_chainiv_ctx {
 37	unsigned long state;
 38
 39	spinlock_t lock;
 40	int err;
 41
 42	struct crypto_queue queue;
 43	struct work_struct postponed;
 44
 45	char iv[];
 46};
 47
 48static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
 49{
 50	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
 51	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 52	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
 53	unsigned int ivsize;
 54	int err;
 55
 56	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 57	ablkcipher_request_set_callback(subreq, req->creq.base.flags &
 58						~CRYPTO_TFM_REQ_MAY_SLEEP,
 59					req->creq.base.complete,
 60					req->creq.base.data);
 61	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
 62				     req->creq.nbytes, req->creq.info);
 63
 64	spin_lock_bh(&ctx->lock);
 65
 66	ivsize = crypto_ablkcipher_ivsize(geniv);
 67
 68	memcpy(req->giv, ctx->iv, ivsize);
 69	memcpy(subreq->info, ctx->iv, ivsize);
 70
 71	err = crypto_ablkcipher_encrypt(subreq);
 72	if (err)
 73		goto unlock;
 74
 75	memcpy(ctx->iv, subreq->info, ivsize);
 76
 77unlock:
 78	spin_unlock_bh(&ctx->lock);
 79
 80	return err;
 81}
 82
 83static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
 84{
 85	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
 86	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 87	int err = 0;
 88
 89	spin_lock_bh(&ctx->lock);
 90	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
 91	    chainiv_givencrypt_first)
 92		goto unlock;
 93
 94	crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
 95	err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
 96				   crypto_ablkcipher_ivsize(geniv));
 97
 98unlock:
 99	spin_unlock_bh(&ctx->lock);
100
101	if (err)
102		return err;
103
104	return chainiv_givencrypt(req);
105}
106
107static int chainiv_init_common(struct crypto_tfm *tfm)
108{
109	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
110
111	return skcipher_geniv_init(tfm);
 
 
 
 
 
 
112}
113
114static int chainiv_init(struct crypto_tfm *tfm)
115{
 
116	struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
 
117
118	spin_lock_init(&ctx->lock);
119
120	return chainiv_init_common(tfm);
 
 
 
 
 
 
121}
122
123static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
124{
125	int queued;
126	int err = ctx->err;
127
128	if (!ctx->queue.qlen) {
129		smp_mb__before_clear_bit();
130		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
131
132		if (!ctx->queue.qlen ||
133		    test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
134			goto out;
135	}
136
137	queued = queue_work(kcrypto_wq, &ctx->postponed);
138	BUG_ON(!queued);
139
140out:
141	return err;
142}
143
144static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
145{
146	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
147	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
148	int err;
149
150	spin_lock_bh(&ctx->lock);
151	err = skcipher_enqueue_givcrypt(&ctx->queue, req);
152	spin_unlock_bh(&ctx->lock);
153
154	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
155		return err;
156
157	ctx->err = err;
158	return async_chainiv_schedule_work(ctx);
159}
160
161static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
162{
163	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
164	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
165	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
166	unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
167
168	memcpy(req->giv, ctx->iv, ivsize);
169	memcpy(subreq->info, ctx->iv, ivsize);
170
171	ctx->err = crypto_ablkcipher_encrypt(subreq);
172	if (ctx->err)
173		goto out;
174
175	memcpy(ctx->iv, subreq->info, ivsize);
176
177out:
178	return async_chainiv_schedule_work(ctx);
179}
180
181static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
182{
183	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
184	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
185	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
186
187	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
188	ablkcipher_request_set_callback(subreq, req->creq.base.flags,
189					req->creq.base.complete,
190					req->creq.base.data);
191	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
192				     req->creq.nbytes, req->creq.info);
193
194	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
195		goto postpone;
196
197	if (ctx->queue.qlen) {
198		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
199		goto postpone;
200	}
201
202	return async_chainiv_givencrypt_tail(req);
203
204postpone:
205	return async_chainiv_postpone_request(req);
206}
207
208static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
209{
210	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
211	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
212	int err = 0;
213
214	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
215		goto out;
216
217	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
218	    async_chainiv_givencrypt_first)
219		goto unlock;
220
221	crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
222	err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
223				   crypto_ablkcipher_ivsize(geniv));
224
225unlock:
226	clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
227
228	if (err)
229		return err;
230
231out:
232	return async_chainiv_givencrypt(req);
233}
234
235static void async_chainiv_do_postponed(struct work_struct *work)
236{
237	struct async_chainiv_ctx *ctx = container_of(work,
238						     struct async_chainiv_ctx,
239						     postponed);
240	struct skcipher_givcrypt_request *req;
241	struct ablkcipher_request *subreq;
242	int err;
243
244	/* Only handle one request at a time to avoid hogging keventd. */
245	spin_lock_bh(&ctx->lock);
246	req = skcipher_dequeue_givcrypt(&ctx->queue);
247	spin_unlock_bh(&ctx->lock);
248
249	if (!req) {
250		async_chainiv_schedule_work(ctx);
251		return;
252	}
253
254	subreq = skcipher_givcrypt_reqctx(req);
255	subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
256
257	err = async_chainiv_givencrypt_tail(req);
258
259	local_bh_disable();
260	skcipher_givcrypt_complete(req, err);
261	local_bh_enable();
262}
263
264static int async_chainiv_init(struct crypto_tfm *tfm)
265{
 
266	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
 
267
268	spin_lock_init(&ctx->lock);
269
270	crypto_init_queue(&ctx->queue, 100);
271	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
272
273	return chainiv_init_common(tfm);
 
 
 
 
 
 
 
274}
275
276static void async_chainiv_exit(struct crypto_tfm *tfm)
277{
278	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
279
280	BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
281
282	skcipher_geniv_exit(tfm);
283}
284
285static struct crypto_template chainiv_tmpl;
286
287static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
288{
289	struct crypto_attr_type *algt;
290	struct crypto_instance *inst;
291	int err;
292
293	algt = crypto_get_attr_type(tb);
294	err = PTR_ERR(algt);
295	if (IS_ERR(algt))
296		return ERR_PTR(err);
297
298	err = crypto_get_default_rng();
299	if (err)
300		return ERR_PTR(err);
301
302	inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
303	if (IS_ERR(inst))
304		goto put_rng;
305
306	inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
307
308	inst->alg.cra_init = chainiv_init;
309	inst->alg.cra_exit = skcipher_geniv_exit;
310
311	inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
312
313	if (!crypto_requires_sync(algt->type, algt->mask)) {
314		inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
315
316		inst->alg.cra_ablkcipher.givencrypt =
317			async_chainiv_givencrypt_first;
318
319		inst->alg.cra_init = async_chainiv_init;
320		inst->alg.cra_exit = async_chainiv_exit;
321
322		inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
323	}
324
325	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
326
327out:
328	return inst;
329
330put_rng:
331	crypto_put_default_rng();
332	goto out;
333}
334
335static void chainiv_free(struct crypto_instance *inst)
336{
337	skcipher_geniv_free(inst);
338	crypto_put_default_rng();
339}
340
341static struct crypto_template chainiv_tmpl = {
342	.name = "chainiv",
343	.alloc = chainiv_alloc,
344	.free = chainiv_free,
345	.module = THIS_MODULE,
346};
347
348static int __init chainiv_module_init(void)
349{
350	return crypto_register_template(&chainiv_tmpl);
351}
352
353static void chainiv_module_exit(void)
354{
355	crypto_unregister_template(&chainiv_tmpl);
356}
357
358module_init(chainiv_module_init);
359module_exit(chainiv_module_exit);
360
361MODULE_LICENSE("GPL");
362MODULE_DESCRIPTION("Chain IV Generator");
v4.6
  1/*
  2 * chainiv: Chain IV Generator
  3 *
  4 * Generate IVs simply be using the last block of the previous encryption.
  5 * This is mainly useful for CBC with a synchronous algorithm.
  6 *
  7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License as published by the Free
 11 * Software Foundation; either version 2 of the License, or (at your option)
 12 * any later version.
 13 *
 14 */
 15
 16#include <crypto/internal/skcipher.h>
 17#include <crypto/rng.h>
 18#include <crypto/crypto_wq.h>
 19#include <linux/err.h>
 20#include <linux/init.h>
 21#include <linux/kernel.h>
 22#include <linux/module.h>
 23#include <linux/spinlock.h>
 24#include <linux/string.h>
 25#include <linux/workqueue.h>
 26
 27enum {
 28	CHAINIV_STATE_INUSE = 0,
 29};
 30
 31struct chainiv_ctx {
 32	spinlock_t lock;
 33	char iv[];
 34};
 35
 36struct async_chainiv_ctx {
 37	unsigned long state;
 38
 39	spinlock_t lock;
 40	int err;
 41
 42	struct crypto_queue queue;
 43	struct work_struct postponed;
 44
 45	char iv[];
 46};
 47
 48static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
 49{
 50	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
 51	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 52	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
 53	unsigned int ivsize;
 54	int err;
 55
 56	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 57	ablkcipher_request_set_callback(subreq, req->creq.base.flags &
 58						~CRYPTO_TFM_REQ_MAY_SLEEP,
 59					req->creq.base.complete,
 60					req->creq.base.data);
 61	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
 62				     req->creq.nbytes, req->creq.info);
 63
 64	spin_lock_bh(&ctx->lock);
 65
 66	ivsize = crypto_ablkcipher_ivsize(geniv);
 67
 68	memcpy(req->giv, ctx->iv, ivsize);
 69	memcpy(subreq->info, ctx->iv, ivsize);
 70
 71	err = crypto_ablkcipher_encrypt(subreq);
 72	if (err)
 73		goto unlock;
 74
 75	memcpy(ctx->iv, subreq->info, ivsize);
 76
 77unlock:
 78	spin_unlock_bh(&ctx->lock);
 79
 80	return err;
 81}
 82
 83static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
 84{
 85	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
 
 86	int err = 0;
 87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
 89
 90	if (iv) {
 91		err = crypto_rng_get_bytes(crypto_default_rng, iv,
 92					   crypto_ablkcipher_ivsize(geniv));
 93		crypto_put_default_rng();
 94	}
 95
 96	return err ?: skcipher_geniv_init(tfm);
 97}
 98
 99static int chainiv_init(struct crypto_tfm *tfm)
100{
101	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
102	struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
103	char *iv;
104
105	spin_lock_init(&ctx->lock);
106
107	iv = NULL;
108	if (!crypto_get_default_rng()) {
109		crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
110		iv = ctx->iv;
111	}
112
113	return chainiv_init_common(tfm, iv);
114}
115
116static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
117{
118	int queued;
119	int err = ctx->err;
120
121	if (!ctx->queue.qlen) {
122		smp_mb__before_atomic();
123		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
124
125		if (!ctx->queue.qlen ||
126		    test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
127			goto out;
128	}
129
130	queued = queue_work(kcrypto_wq, &ctx->postponed);
131	BUG_ON(!queued);
132
133out:
134	return err;
135}
136
137static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req)
138{
139	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
140	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
141	int err;
142
143	spin_lock_bh(&ctx->lock);
144	err = skcipher_enqueue_givcrypt(&ctx->queue, req);
145	spin_unlock_bh(&ctx->lock);
146
147	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
148		return err;
149
150	ctx->err = err;
151	return async_chainiv_schedule_work(ctx);
152}
153
154static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req)
155{
156	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
157	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
158	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
159	unsigned int ivsize = crypto_ablkcipher_ivsize(geniv);
160
161	memcpy(req->giv, ctx->iv, ivsize);
162	memcpy(subreq->info, ctx->iv, ivsize);
163
164	ctx->err = crypto_ablkcipher_encrypt(subreq);
165	if (ctx->err)
166		goto out;
167
168	memcpy(ctx->iv, subreq->info, ivsize);
169
170out:
171	return async_chainiv_schedule_work(ctx);
172}
173
174static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
175{
176	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
177	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
178	struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
179
180	ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
181	ablkcipher_request_set_callback(subreq, req->creq.base.flags,
182					req->creq.base.complete,
183					req->creq.base.data);
184	ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
185				     req->creq.nbytes, req->creq.info);
186
187	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
188		goto postpone;
189
190	if (ctx->queue.qlen) {
191		clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
192		goto postpone;
193	}
194
195	return async_chainiv_givencrypt_tail(req);
196
197postpone:
198	return async_chainiv_postpone_request(req);
199}
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201static void async_chainiv_do_postponed(struct work_struct *work)
202{
203	struct async_chainiv_ctx *ctx = container_of(work,
204						     struct async_chainiv_ctx,
205						     postponed);
206	struct skcipher_givcrypt_request *req;
207	struct ablkcipher_request *subreq;
208	int err;
209
210	/* Only handle one request at a time to avoid hogging keventd. */
211	spin_lock_bh(&ctx->lock);
212	req = skcipher_dequeue_givcrypt(&ctx->queue);
213	spin_unlock_bh(&ctx->lock);
214
215	if (!req) {
216		async_chainiv_schedule_work(ctx);
217		return;
218	}
219
220	subreq = skcipher_givcrypt_reqctx(req);
221	subreq->base.flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
222
223	err = async_chainiv_givencrypt_tail(req);
224
225	local_bh_disable();
226	skcipher_givcrypt_complete(req, err);
227	local_bh_enable();
228}
229
230static int async_chainiv_init(struct crypto_tfm *tfm)
231{
232	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
233	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
234	char *iv;
235
236	spin_lock_init(&ctx->lock);
237
238	crypto_init_queue(&ctx->queue, 100);
239	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
240
241	iv = NULL;
242	if (!crypto_get_default_rng()) {
243		crypto_ablkcipher_crt(geniv)->givencrypt =
244			async_chainiv_givencrypt;
245		iv = ctx->iv;
246	}
247
248	return chainiv_init_common(tfm, iv);
249}
250
251static void async_chainiv_exit(struct crypto_tfm *tfm)
252{
253	struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm);
254
255	BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen);
256
257	skcipher_geniv_exit(tfm);
258}
259
260static struct crypto_template chainiv_tmpl;
261
262static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
263{
264	struct crypto_attr_type *algt;
265	struct crypto_instance *inst;
 
266
267	algt = crypto_get_attr_type(tb);
 
268	if (IS_ERR(algt))
269		return ERR_CAST(algt);
 
 
 
 
270
271	inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
272	if (IS_ERR(inst))
273		goto out;
 
 
274
275	inst->alg.cra_init = chainiv_init;
276	inst->alg.cra_exit = skcipher_geniv_exit;
277
278	inst->alg.cra_ctxsize = sizeof(struct chainiv_ctx);
279
280	if (!crypto_requires_sync(algt->type, algt->mask)) {
281		inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
282
 
 
 
283		inst->alg.cra_init = async_chainiv_init;
284		inst->alg.cra_exit = async_chainiv_exit;
285
286		inst->alg.cra_ctxsize = sizeof(struct async_chainiv_ctx);
287	}
288
289	inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
290
291out:
292	return inst;
 
 
 
 
 
 
 
 
 
 
293}
294
295static struct crypto_template chainiv_tmpl = {
296	.name = "chainiv",
297	.alloc = chainiv_alloc,
298	.free = skcipher_geniv_free,
299	.module = THIS_MODULE,
300};
301
302static int __init chainiv_module_init(void)
303{
304	return crypto_register_template(&chainiv_tmpl);
305}
306
307static void chainiv_module_exit(void)
308{
309	crypto_unregister_template(&chainiv_tmpl);
310}
311
312module_init(chainiv_module_init);
313module_exit(chainiv_module_exit);
314
315MODULE_LICENSE("GPL");
316MODULE_DESCRIPTION("Chain IV Generator");
317MODULE_ALIAS_CRYPTO("chainiv");