Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Software async crypto daemon.
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * Added AEAD support to cryptd.
  7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8 *             Adrian Hoban <adrian.hoban@intel.com>
  9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 11 *    Copyright (c) 2010, Intel Corporation.
 12 *
 13 * This program is free software; you can redistribute it and/or modify it
 14 * under the terms of the GNU General Public License as published by the Free
 15 * Software Foundation; either version 2 of the License, or (at your option)
 16 * any later version.
 17 *
 18 */
 19
 20#include <crypto/algapi.h>
 21#include <crypto/internal/hash.h>
 22#include <crypto/internal/aead.h>
 23#include <crypto/cryptd.h>
 24#include <crypto/crypto_wq.h>
 25#include <linux/err.h>
 26#include <linux/init.h>
 27#include <linux/kernel.h>
 28#include <linux/list.h>
 29#include <linux/module.h>
 30#include <linux/scatterlist.h>
 31#include <linux/sched.h>
 32#include <linux/slab.h>
 33
 34#define CRYPTD_MAX_CPU_QLEN 100
 35
 36struct cryptd_cpu_queue {
 37	struct crypto_queue queue;
 38	struct work_struct work;
 39};
 40
 41struct cryptd_queue {
 42	struct cryptd_cpu_queue __percpu *cpu_queue;
 43};
 44
 45struct cryptd_instance_ctx {
 46	struct crypto_spawn spawn;
 47	struct cryptd_queue *queue;
 48};
 49
 50struct hashd_instance_ctx {
 51	struct crypto_shash_spawn spawn;
 52	struct cryptd_queue *queue;
 53};
 54
 55struct aead_instance_ctx {
 56	struct crypto_aead_spawn aead_spawn;
 57	struct cryptd_queue *queue;
 58};
 59
 60struct cryptd_blkcipher_ctx {
 61	struct crypto_blkcipher *child;
 62};
 63
 64struct cryptd_blkcipher_request_ctx {
 65	crypto_completion_t complete;
 66};
 67
 68struct cryptd_hash_ctx {
 69	struct crypto_shash *child;
 70};
 71
 72struct cryptd_hash_request_ctx {
 73	crypto_completion_t complete;
 74	struct shash_desc desc;
 75};
 76
 77struct cryptd_aead_ctx {
 78	struct crypto_aead *child;
 79};
 80
 81struct cryptd_aead_request_ctx {
 82	crypto_completion_t complete;
 83};
 84
 85static void cryptd_queue_worker(struct work_struct *work);
 86
 87static int cryptd_init_queue(struct cryptd_queue *queue,
 88			     unsigned int max_cpu_qlen)
 89{
 90	int cpu;
 91	struct cryptd_cpu_queue *cpu_queue;
 92
 93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 94	if (!queue->cpu_queue)
 95		return -ENOMEM;
 96	for_each_possible_cpu(cpu) {
 97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126	put_cpu();
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/*
141	 * Only handle one request at a time to avoid hogging crypto workqueue.
142	 * preempt_disable/enable is used to prevent being preempted by
143	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144	 * cryptd_enqueue_request() being accessed from software interrupts.
145	 */
146	local_bh_disable();
147	preempt_disable();
148	backlog = crypto_get_backlog(&cpu_queue->queue);
149	req = crypto_dequeue_request(&cpu_queue->queue);
150	preempt_enable();
151	local_bh_enable();
152
153	if (!req)
154		return;
155
156	if (backlog)
157		backlog->complete(backlog, -EINPROGRESS);
158	req->complete(req, 0);
159
160	if (cpu_queue->queue.qlen)
161		queue_work(kcrypto_wq, &cpu_queue->work);
162}
163
164static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165{
166	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168	return ictx->queue;
169}
170
171static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
172				   const u8 *key, unsigned int keylen)
173{
174	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
175	struct crypto_blkcipher *child = ctx->child;
176	int err;
177
178	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
179	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
180					  CRYPTO_TFM_REQ_MASK);
181	err = crypto_blkcipher_setkey(child, key, keylen);
182	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
183					    CRYPTO_TFM_RES_MASK);
184	return err;
185}
186
187static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
188				   struct crypto_blkcipher *child,
189				   int err,
190				   int (*crypt)(struct blkcipher_desc *desc,
191						struct scatterlist *dst,
192						struct scatterlist *src,
193						unsigned int len))
194{
195	struct cryptd_blkcipher_request_ctx *rctx;
196	struct blkcipher_desc desc;
197
198	rctx = ablkcipher_request_ctx(req);
199
200	if (unlikely(err == -EINPROGRESS))
201		goto out;
202
203	desc.tfm = child;
204	desc.info = req->info;
205	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
206
207	err = crypt(&desc, req->dst, req->src, req->nbytes);
208
209	req->base.complete = rctx->complete;
210
211out:
212	local_bh_disable();
213	rctx->complete(&req->base, err);
214	local_bh_enable();
215}
216
217static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
218{
219	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
220	struct crypto_blkcipher *child = ctx->child;
221
222	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
223			       crypto_blkcipher_crt(child)->encrypt);
224}
225
226static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
227{
228	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
229	struct crypto_blkcipher *child = ctx->child;
230
231	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
232			       crypto_blkcipher_crt(child)->decrypt);
233}
234
235static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
236				    crypto_completion_t complete)
237{
238	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
239	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
240	struct cryptd_queue *queue;
241
242	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
243	rctx->complete = req->base.complete;
244	req->base.complete = complete;
245
246	return cryptd_enqueue_request(queue, &req->base);
247}
248
249static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
250{
251	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
252}
253
254static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
255{
256	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
257}
258
259static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
260{
261	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
262	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
263	struct crypto_spawn *spawn = &ictx->spawn;
264	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
265	struct crypto_blkcipher *cipher;
266
267	cipher = crypto_spawn_blkcipher(spawn);
268	if (IS_ERR(cipher))
269		return PTR_ERR(cipher);
270
271	ctx->child = cipher;
272	tfm->crt_ablkcipher.reqsize =
273		sizeof(struct cryptd_blkcipher_request_ctx);
274	return 0;
275}
276
277static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
278{
279	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
280
281	crypto_free_blkcipher(ctx->child);
282}
283
284static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
285				   unsigned int tail)
286{
287	char *p;
288	struct crypto_instance *inst;
289	int err;
290
291	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
292	if (!p)
293		return ERR_PTR(-ENOMEM);
294
295	inst = (void *)(p + head);
296
297	err = -ENAMETOOLONG;
298	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
299		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
300		goto out_free_inst;
301
302	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
303
304	inst->alg.cra_priority = alg->cra_priority + 50;
305	inst->alg.cra_blocksize = alg->cra_blocksize;
306	inst->alg.cra_alignmask = alg->cra_alignmask;
307
308out:
309	return p;
310
311out_free_inst:
312	kfree(p);
313	p = ERR_PTR(err);
314	goto out;
315}
316
317static int cryptd_create_blkcipher(struct crypto_template *tmpl,
318				   struct rtattr **tb,
319				   struct cryptd_queue *queue)
320{
321	struct cryptd_instance_ctx *ctx;
322	struct crypto_instance *inst;
323	struct crypto_alg *alg;
324	int err;
325
326	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
327				  CRYPTO_ALG_TYPE_MASK);
328	if (IS_ERR(alg))
329		return PTR_ERR(alg);
330
331	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
332	err = PTR_ERR(inst);
333	if (IS_ERR(inst))
334		goto out_put_alg;
335
336	ctx = crypto_instance_ctx(inst);
337	ctx->queue = queue;
338
339	err = crypto_init_spawn(&ctx->spawn, alg, inst,
340				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
341	if (err)
342		goto out_free_inst;
343
344	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
345	inst->alg.cra_type = &crypto_ablkcipher_type;
346
347	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
348	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
349	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
350
351	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
352
353	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
354
355	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
356	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
357
358	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
359	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
360	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
361
362	err = crypto_register_instance(tmpl, inst);
363	if (err) {
364		crypto_drop_spawn(&ctx->spawn);
365out_free_inst:
366		kfree(inst);
367	}
368
369out_put_alg:
370	crypto_mod_put(alg);
371	return err;
372}
373
374static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
375{
376	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
377	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
378	struct crypto_shash_spawn *spawn = &ictx->spawn;
379	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
380	struct crypto_shash *hash;
381
382	hash = crypto_spawn_shash(spawn);
383	if (IS_ERR(hash))
384		return PTR_ERR(hash);
385
386	ctx->child = hash;
387	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
388				 sizeof(struct cryptd_hash_request_ctx) +
389				 crypto_shash_descsize(hash));
390	return 0;
391}
392
393static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
394{
395	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
396
397	crypto_free_shash(ctx->child);
398}
399
400static int cryptd_hash_setkey(struct crypto_ahash *parent,
401				   const u8 *key, unsigned int keylen)
402{
403	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
404	struct crypto_shash *child = ctx->child;
405	int err;
406
407	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
408	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
409				      CRYPTO_TFM_REQ_MASK);
410	err = crypto_shash_setkey(child, key, keylen);
411	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
412				       CRYPTO_TFM_RES_MASK);
413	return err;
414}
415
416static int cryptd_hash_enqueue(struct ahash_request *req,
417				crypto_completion_t complete)
418{
419	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
420	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
421	struct cryptd_queue *queue =
422		cryptd_get_queue(crypto_ahash_tfm(tfm));
423
424	rctx->complete = req->base.complete;
425	req->base.complete = complete;
426
427	return cryptd_enqueue_request(queue, &req->base);
428}
429
430static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
431{
432	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
433	struct crypto_shash *child = ctx->child;
434	struct ahash_request *req = ahash_request_cast(req_async);
435	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
436	struct shash_desc *desc = &rctx->desc;
437
438	if (unlikely(err == -EINPROGRESS))
439		goto out;
440
441	desc->tfm = child;
442	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
443
444	err = crypto_shash_init(desc);
445
446	req->base.complete = rctx->complete;
447
448out:
449	local_bh_disable();
450	rctx->complete(&req->base, err);
451	local_bh_enable();
452}
453
454static int cryptd_hash_init_enqueue(struct ahash_request *req)
455{
456	return cryptd_hash_enqueue(req, cryptd_hash_init);
457}
458
459static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
460{
461	struct ahash_request *req = ahash_request_cast(req_async);
462	struct cryptd_hash_request_ctx *rctx;
463
464	rctx = ahash_request_ctx(req);
465
466	if (unlikely(err == -EINPROGRESS))
467		goto out;
468
469	err = shash_ahash_update(req, &rctx->desc);
470
471	req->base.complete = rctx->complete;
472
473out:
474	local_bh_disable();
475	rctx->complete(&req->base, err);
476	local_bh_enable();
477}
478
479static int cryptd_hash_update_enqueue(struct ahash_request *req)
480{
481	return cryptd_hash_enqueue(req, cryptd_hash_update);
482}
483
484static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
485{
486	struct ahash_request *req = ahash_request_cast(req_async);
487	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
488
489	if (unlikely(err == -EINPROGRESS))
490		goto out;
491
492	err = crypto_shash_final(&rctx->desc, req->result);
493
494	req->base.complete = rctx->complete;
495
496out:
497	local_bh_disable();
498	rctx->complete(&req->base, err);
499	local_bh_enable();
500}
501
502static int cryptd_hash_final_enqueue(struct ahash_request *req)
503{
504	return cryptd_hash_enqueue(req, cryptd_hash_final);
505}
506
507static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
508{
509	struct ahash_request *req = ahash_request_cast(req_async);
510	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
511
512	if (unlikely(err == -EINPROGRESS))
513		goto out;
514
515	err = shash_ahash_finup(req, &rctx->desc);
516
517	req->base.complete = rctx->complete;
518
519out:
520	local_bh_disable();
521	rctx->complete(&req->base, err);
522	local_bh_enable();
523}
524
525static int cryptd_hash_finup_enqueue(struct ahash_request *req)
526{
527	return cryptd_hash_enqueue(req, cryptd_hash_finup);
528}
529
530static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
531{
532	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
533	struct crypto_shash *child = ctx->child;
534	struct ahash_request *req = ahash_request_cast(req_async);
535	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
536	struct shash_desc *desc = &rctx->desc;
537
538	if (unlikely(err == -EINPROGRESS))
539		goto out;
540
541	desc->tfm = child;
542	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
543
544	err = shash_ahash_digest(req, desc);
545
546	req->base.complete = rctx->complete;
547
548out:
549	local_bh_disable();
550	rctx->complete(&req->base, err);
551	local_bh_enable();
552}
553
554static int cryptd_hash_digest_enqueue(struct ahash_request *req)
555{
556	return cryptd_hash_enqueue(req, cryptd_hash_digest);
557}
558
559static int cryptd_hash_export(struct ahash_request *req, void *out)
560{
561	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
562
563	return crypto_shash_export(&rctx->desc, out);
564}
565
566static int cryptd_hash_import(struct ahash_request *req, const void *in)
567{
568	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
569
570	return crypto_shash_import(&rctx->desc, in);
571}
572
573static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
574			      struct cryptd_queue *queue)
575{
576	struct hashd_instance_ctx *ctx;
577	struct ahash_instance *inst;
578	struct shash_alg *salg;
579	struct crypto_alg *alg;
580	int err;
581
582	salg = shash_attr_alg(tb[1], 0, 0);
583	if (IS_ERR(salg))
584		return PTR_ERR(salg);
585
586	alg = &salg->base;
587	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
588				     sizeof(*ctx));
589	err = PTR_ERR(inst);
590	if (IS_ERR(inst))
591		goto out_put_alg;
592
593	ctx = ahash_instance_ctx(inst);
594	ctx->queue = queue;
595
596	err = crypto_init_shash_spawn(&ctx->spawn, salg,
597				      ahash_crypto_instance(inst));
598	if (err)
599		goto out_free_inst;
600
601	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
602
603	inst->alg.halg.digestsize = salg->digestsize;
604	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
605
606	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
607	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
608
609	inst->alg.init   = cryptd_hash_init_enqueue;
610	inst->alg.update = cryptd_hash_update_enqueue;
611	inst->alg.final  = cryptd_hash_final_enqueue;
612	inst->alg.finup  = cryptd_hash_finup_enqueue;
613	inst->alg.export = cryptd_hash_export;
614	inst->alg.import = cryptd_hash_import;
615	inst->alg.setkey = cryptd_hash_setkey;
616	inst->alg.digest = cryptd_hash_digest_enqueue;
617
618	err = ahash_register_instance(tmpl, inst);
619	if (err) {
620		crypto_drop_shash(&ctx->spawn);
621out_free_inst:
622		kfree(inst);
623	}
624
625out_put_alg:
626	crypto_mod_put(alg);
627	return err;
628}
629
630static void cryptd_aead_crypt(struct aead_request *req,
631			struct crypto_aead *child,
632			int err,
633			int (*crypt)(struct aead_request *req))
634{
635	struct cryptd_aead_request_ctx *rctx;
636	rctx = aead_request_ctx(req);
637
638	if (unlikely(err == -EINPROGRESS))
639		goto out;
640	aead_request_set_tfm(req, child);
641	err = crypt( req );
642	req->base.complete = rctx->complete;
643out:
644	local_bh_disable();
645	rctx->complete(&req->base, err);
646	local_bh_enable();
647}
648
649static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
650{
651	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
652	struct crypto_aead *child = ctx->child;
653	struct aead_request *req;
654
655	req = container_of(areq, struct aead_request, base);
656	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
657}
658
659static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
660{
661	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
662	struct crypto_aead *child = ctx->child;
663	struct aead_request *req;
664
665	req = container_of(areq, struct aead_request, base);
666	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
667}
668
669static int cryptd_aead_enqueue(struct aead_request *req,
670				    crypto_completion_t complete)
671{
672	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
673	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
674	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
675
676	rctx->complete = req->base.complete;
677	req->base.complete = complete;
678	return cryptd_enqueue_request(queue, &req->base);
679}
680
681static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
682{
683	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
684}
685
686static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
687{
688	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
689}
690
691static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
692{
693	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
694	struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
695	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
696	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
697	struct crypto_aead *cipher;
698
699	cipher = crypto_spawn_aead(spawn);
700	if (IS_ERR(cipher))
701		return PTR_ERR(cipher);
702
703	crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
704	ctx->child = cipher;
705	tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
706	return 0;
707}
708
709static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
710{
711	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
712	crypto_free_aead(ctx->child);
713}
714
715static int cryptd_create_aead(struct crypto_template *tmpl,
716		              struct rtattr **tb,
717			      struct cryptd_queue *queue)
718{
719	struct aead_instance_ctx *ctx;
720	struct crypto_instance *inst;
721	struct crypto_alg *alg;
722	int err;
723
724	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
725				CRYPTO_ALG_TYPE_MASK);
726        if (IS_ERR(alg))
727		return PTR_ERR(alg);
728
729	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
730	err = PTR_ERR(inst);
731	if (IS_ERR(inst))
732		goto out_put_alg;
733
734	ctx = crypto_instance_ctx(inst);
735	ctx->queue = queue;
736
737	err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
738			CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
739	if (err)
740		goto out_free_inst;
741
742	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
743	inst->alg.cra_type = alg->cra_type;
744	inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
745	inst->alg.cra_init = cryptd_aead_init_tfm;
746	inst->alg.cra_exit = cryptd_aead_exit_tfm;
747	inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
748	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
749	inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
750	inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
751	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
752	inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
753	inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
754	inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
755	inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
756
757	err = crypto_register_instance(tmpl, inst);
758	if (err) {
759		crypto_drop_spawn(&ctx->aead_spawn.base);
760out_free_inst:
761		kfree(inst);
762	}
763out_put_alg:
764	crypto_mod_put(alg);
765	return err;
766}
767
768static struct cryptd_queue queue;
769
770static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
771{
772	struct crypto_attr_type *algt;
773
774	algt = crypto_get_attr_type(tb);
775	if (IS_ERR(algt))
776		return PTR_ERR(algt);
777
778	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
779	case CRYPTO_ALG_TYPE_BLKCIPHER:
780		return cryptd_create_blkcipher(tmpl, tb, &queue);
781	case CRYPTO_ALG_TYPE_DIGEST:
782		return cryptd_create_hash(tmpl, tb, &queue);
783	case CRYPTO_ALG_TYPE_AEAD:
784		return cryptd_create_aead(tmpl, tb, &queue);
785	}
786
787	return -EINVAL;
788}
789
790static void cryptd_free(struct crypto_instance *inst)
791{
792	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
793	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
794	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
795
796	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
797	case CRYPTO_ALG_TYPE_AHASH:
798		crypto_drop_shash(&hctx->spawn);
799		kfree(ahash_instance(inst));
800		return;
801	case CRYPTO_ALG_TYPE_AEAD:
802		crypto_drop_spawn(&aead_ctx->aead_spawn.base);
803		kfree(inst);
804		return;
805	default:
806		crypto_drop_spawn(&ctx->spawn);
807		kfree(inst);
808	}
809}
810
811static struct crypto_template cryptd_tmpl = {
812	.name = "cryptd",
813	.create = cryptd_create,
814	.free = cryptd_free,
815	.module = THIS_MODULE,
816};
817
818struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
819						  u32 type, u32 mask)
820{
821	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
822	struct crypto_tfm *tfm;
823
824	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
825		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
826		return ERR_PTR(-EINVAL);
827	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
828	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
829	mask &= ~CRYPTO_ALG_TYPE_MASK;
830	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
831	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
832	if (IS_ERR(tfm))
833		return ERR_CAST(tfm);
834	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
835		crypto_free_tfm(tfm);
836		return ERR_PTR(-EINVAL);
837	}
838
839	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
840}
841EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
842
843struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
844{
845	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
846	return ctx->child;
847}
848EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
849
850void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
851{
852	crypto_free_ablkcipher(&tfm->base);
853}
854EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
855
856struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
857					u32 type, u32 mask)
858{
859	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
860	struct crypto_ahash *tfm;
861
862	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
863		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
864		return ERR_PTR(-EINVAL);
865	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
866	if (IS_ERR(tfm))
867		return ERR_CAST(tfm);
868	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
869		crypto_free_ahash(tfm);
870		return ERR_PTR(-EINVAL);
871	}
872
873	return __cryptd_ahash_cast(tfm);
874}
875EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
876
877struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
878{
879	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
880
881	return ctx->child;
882}
883EXPORT_SYMBOL_GPL(cryptd_ahash_child);
884
885struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
886{
887	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
888	return &rctx->desc;
889}
890EXPORT_SYMBOL_GPL(cryptd_shash_desc);
891
892void cryptd_free_ahash(struct cryptd_ahash *tfm)
893{
894	crypto_free_ahash(&tfm->base);
895}
896EXPORT_SYMBOL_GPL(cryptd_free_ahash);
897
898struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
899						  u32 type, u32 mask)
900{
901	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
902	struct crypto_aead *tfm;
903
904	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
905		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
906		return ERR_PTR(-EINVAL);
907	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
908	if (IS_ERR(tfm))
909		return ERR_CAST(tfm);
910	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
911		crypto_free_aead(tfm);
912		return ERR_PTR(-EINVAL);
913	}
914	return __cryptd_aead_cast(tfm);
915}
916EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
917
918struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
919{
920	struct cryptd_aead_ctx *ctx;
921	ctx = crypto_aead_ctx(&tfm->base);
922	return ctx->child;
923}
924EXPORT_SYMBOL_GPL(cryptd_aead_child);
925
926void cryptd_free_aead(struct cryptd_aead *tfm)
927{
928	crypto_free_aead(&tfm->base);
929}
930EXPORT_SYMBOL_GPL(cryptd_free_aead);
931
932static int __init cryptd_init(void)
933{
934	int err;
935
936	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
937	if (err)
938		return err;
939
940	err = crypto_register_template(&cryptd_tmpl);
941	if (err)
942		cryptd_fini_queue(&queue);
943
944	return err;
945}
946
947static void __exit cryptd_exit(void)
948{
949	cryptd_fini_queue(&queue);
950	crypto_unregister_template(&cryptd_tmpl);
951}
952
953subsys_initcall(cryptd_init);
954module_exit(cryptd_exit);
955
956MODULE_LICENSE("GPL");
957MODULE_DESCRIPTION("Software async crypto daemon");
v3.5.6
  1/*
  2 * Software async crypto daemon.
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * Added AEAD support to cryptd.
  7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8 *             Adrian Hoban <adrian.hoban@intel.com>
  9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 11 *    Copyright (c) 2010, Intel Corporation.
 12 *
 13 * This program is free software; you can redistribute it and/or modify it
 14 * under the terms of the GNU General Public License as published by the Free
 15 * Software Foundation; either version 2 of the License, or (at your option)
 16 * any later version.
 17 *
 18 */
 19
 20#include <crypto/algapi.h>
 21#include <crypto/internal/hash.h>
 22#include <crypto/internal/aead.h>
 23#include <crypto/cryptd.h>
 24#include <crypto/crypto_wq.h>
 25#include <linux/err.h>
 26#include <linux/init.h>
 27#include <linux/kernel.h>
 28#include <linux/list.h>
 29#include <linux/module.h>
 30#include <linux/scatterlist.h>
 31#include <linux/sched.h>
 32#include <linux/slab.h>
 33
 34#define CRYPTD_MAX_CPU_QLEN 100
 35
 36struct cryptd_cpu_queue {
 37	struct crypto_queue queue;
 38	struct work_struct work;
 39};
 40
 41struct cryptd_queue {
 42	struct cryptd_cpu_queue __percpu *cpu_queue;
 43};
 44
 45struct cryptd_instance_ctx {
 46	struct crypto_spawn spawn;
 47	struct cryptd_queue *queue;
 48};
 49
 50struct hashd_instance_ctx {
 51	struct crypto_shash_spawn spawn;
 52	struct cryptd_queue *queue;
 53};
 54
 55struct aead_instance_ctx {
 56	struct crypto_aead_spawn aead_spawn;
 57	struct cryptd_queue *queue;
 58};
 59
 60struct cryptd_blkcipher_ctx {
 61	struct crypto_blkcipher *child;
 62};
 63
 64struct cryptd_blkcipher_request_ctx {
 65	crypto_completion_t complete;
 66};
 67
 68struct cryptd_hash_ctx {
 69	struct crypto_shash *child;
 70};
 71
 72struct cryptd_hash_request_ctx {
 73	crypto_completion_t complete;
 74	struct shash_desc desc;
 75};
 76
 77struct cryptd_aead_ctx {
 78	struct crypto_aead *child;
 79};
 80
 81struct cryptd_aead_request_ctx {
 82	crypto_completion_t complete;
 83};
 84
 85static void cryptd_queue_worker(struct work_struct *work);
 86
 87static int cryptd_init_queue(struct cryptd_queue *queue,
 88			     unsigned int max_cpu_qlen)
 89{
 90	int cpu;
 91	struct cryptd_cpu_queue *cpu_queue;
 92
 93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 94	if (!queue->cpu_queue)
 95		return -ENOMEM;
 96	for_each_possible_cpu(cpu) {
 97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126	put_cpu();
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/* Only handle one request at a time to avoid hogging crypto
141	 * workqueue. preempt_disable/enable is used to prevent
142	 * being preempted by cryptd_enqueue_request() */
 
 
 
 
143	preempt_disable();
144	backlog = crypto_get_backlog(&cpu_queue->queue);
145	req = crypto_dequeue_request(&cpu_queue->queue);
146	preempt_enable();
 
147
148	if (!req)
149		return;
150
151	if (backlog)
152		backlog->complete(backlog, -EINPROGRESS);
153	req->complete(req, 0);
154
155	if (cpu_queue->queue.qlen)
156		queue_work(kcrypto_wq, &cpu_queue->work);
157}
158
159static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
160{
161	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
162	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
163	return ictx->queue;
164}
165
166static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
167				   const u8 *key, unsigned int keylen)
168{
169	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
170	struct crypto_blkcipher *child = ctx->child;
171	int err;
172
173	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
174	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
175					  CRYPTO_TFM_REQ_MASK);
176	err = crypto_blkcipher_setkey(child, key, keylen);
177	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
178					    CRYPTO_TFM_RES_MASK);
179	return err;
180}
181
182static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
183				   struct crypto_blkcipher *child,
184				   int err,
185				   int (*crypt)(struct blkcipher_desc *desc,
186						struct scatterlist *dst,
187						struct scatterlist *src,
188						unsigned int len))
189{
190	struct cryptd_blkcipher_request_ctx *rctx;
191	struct blkcipher_desc desc;
192
193	rctx = ablkcipher_request_ctx(req);
194
195	if (unlikely(err == -EINPROGRESS))
196		goto out;
197
198	desc.tfm = child;
199	desc.info = req->info;
200	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
201
202	err = crypt(&desc, req->dst, req->src, req->nbytes);
203
204	req->base.complete = rctx->complete;
205
206out:
207	local_bh_disable();
208	rctx->complete(&req->base, err);
209	local_bh_enable();
210}
211
212static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
213{
214	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
215	struct crypto_blkcipher *child = ctx->child;
216
217	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
218			       crypto_blkcipher_crt(child)->encrypt);
219}
220
221static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
222{
223	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
224	struct crypto_blkcipher *child = ctx->child;
225
226	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
227			       crypto_blkcipher_crt(child)->decrypt);
228}
229
230static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
231				    crypto_completion_t complete)
232{
233	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
234	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
235	struct cryptd_queue *queue;
236
237	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
238	rctx->complete = req->base.complete;
239	req->base.complete = complete;
240
241	return cryptd_enqueue_request(queue, &req->base);
242}
243
244static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
245{
246	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
247}
248
249static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
250{
251	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
252}
253
254static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
255{
256	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
257	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
258	struct crypto_spawn *spawn = &ictx->spawn;
259	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
260	struct crypto_blkcipher *cipher;
261
262	cipher = crypto_spawn_blkcipher(spawn);
263	if (IS_ERR(cipher))
264		return PTR_ERR(cipher);
265
266	ctx->child = cipher;
267	tfm->crt_ablkcipher.reqsize =
268		sizeof(struct cryptd_blkcipher_request_ctx);
269	return 0;
270}
271
272static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
273{
274	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
275
276	crypto_free_blkcipher(ctx->child);
277}
278
279static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
280				   unsigned int tail)
281{
282	char *p;
283	struct crypto_instance *inst;
284	int err;
285
286	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
287	if (!p)
288		return ERR_PTR(-ENOMEM);
289
290	inst = (void *)(p + head);
291
292	err = -ENAMETOOLONG;
293	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
294		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
295		goto out_free_inst;
296
297	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
298
299	inst->alg.cra_priority = alg->cra_priority + 50;
300	inst->alg.cra_blocksize = alg->cra_blocksize;
301	inst->alg.cra_alignmask = alg->cra_alignmask;
302
303out:
304	return p;
305
306out_free_inst:
307	kfree(p);
308	p = ERR_PTR(err);
309	goto out;
310}
311
312static int cryptd_create_blkcipher(struct crypto_template *tmpl,
313				   struct rtattr **tb,
314				   struct cryptd_queue *queue)
315{
316	struct cryptd_instance_ctx *ctx;
317	struct crypto_instance *inst;
318	struct crypto_alg *alg;
319	int err;
320
321	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
322				  CRYPTO_ALG_TYPE_MASK);
323	if (IS_ERR(alg))
324		return PTR_ERR(alg);
325
326	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
327	err = PTR_ERR(inst);
328	if (IS_ERR(inst))
329		goto out_put_alg;
330
331	ctx = crypto_instance_ctx(inst);
332	ctx->queue = queue;
333
334	err = crypto_init_spawn(&ctx->spawn, alg, inst,
335				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
336	if (err)
337		goto out_free_inst;
338
339	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
340	inst->alg.cra_type = &crypto_ablkcipher_type;
341
342	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
343	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
344	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
345
346	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
347
348	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
349
350	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
351	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
352
353	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
354	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
355	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
356
357	err = crypto_register_instance(tmpl, inst);
358	if (err) {
359		crypto_drop_spawn(&ctx->spawn);
360out_free_inst:
361		kfree(inst);
362	}
363
364out_put_alg:
365	crypto_mod_put(alg);
366	return err;
367}
368
369static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
370{
371	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
372	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
373	struct crypto_shash_spawn *spawn = &ictx->spawn;
374	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
375	struct crypto_shash *hash;
376
377	hash = crypto_spawn_shash(spawn);
378	if (IS_ERR(hash))
379		return PTR_ERR(hash);
380
381	ctx->child = hash;
382	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
383				 sizeof(struct cryptd_hash_request_ctx) +
384				 crypto_shash_descsize(hash));
385	return 0;
386}
387
388static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
389{
390	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
391
392	crypto_free_shash(ctx->child);
393}
394
395static int cryptd_hash_setkey(struct crypto_ahash *parent,
396				   const u8 *key, unsigned int keylen)
397{
398	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
399	struct crypto_shash *child = ctx->child;
400	int err;
401
402	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
403	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
404				      CRYPTO_TFM_REQ_MASK);
405	err = crypto_shash_setkey(child, key, keylen);
406	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
407				       CRYPTO_TFM_RES_MASK);
408	return err;
409}
410
411static int cryptd_hash_enqueue(struct ahash_request *req,
412				crypto_completion_t complete)
413{
414	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
415	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416	struct cryptd_queue *queue =
417		cryptd_get_queue(crypto_ahash_tfm(tfm));
418
419	rctx->complete = req->base.complete;
420	req->base.complete = complete;
421
422	return cryptd_enqueue_request(queue, &req->base);
423}
424
425static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
426{
427	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
428	struct crypto_shash *child = ctx->child;
429	struct ahash_request *req = ahash_request_cast(req_async);
430	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
431	struct shash_desc *desc = &rctx->desc;
432
433	if (unlikely(err == -EINPROGRESS))
434		goto out;
435
436	desc->tfm = child;
437	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
438
439	err = crypto_shash_init(desc);
440
441	req->base.complete = rctx->complete;
442
443out:
444	local_bh_disable();
445	rctx->complete(&req->base, err);
446	local_bh_enable();
447}
448
449static int cryptd_hash_init_enqueue(struct ahash_request *req)
450{
451	return cryptd_hash_enqueue(req, cryptd_hash_init);
452}
453
454static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
455{
456	struct ahash_request *req = ahash_request_cast(req_async);
457	struct cryptd_hash_request_ctx *rctx;
458
459	rctx = ahash_request_ctx(req);
460
461	if (unlikely(err == -EINPROGRESS))
462		goto out;
463
464	err = shash_ahash_update(req, &rctx->desc);
465
466	req->base.complete = rctx->complete;
467
468out:
469	local_bh_disable();
470	rctx->complete(&req->base, err);
471	local_bh_enable();
472}
473
474static int cryptd_hash_update_enqueue(struct ahash_request *req)
475{
476	return cryptd_hash_enqueue(req, cryptd_hash_update);
477}
478
479static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
480{
481	struct ahash_request *req = ahash_request_cast(req_async);
482	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
483
484	if (unlikely(err == -EINPROGRESS))
485		goto out;
486
487	err = crypto_shash_final(&rctx->desc, req->result);
488
489	req->base.complete = rctx->complete;
490
491out:
492	local_bh_disable();
493	rctx->complete(&req->base, err);
494	local_bh_enable();
495}
496
497static int cryptd_hash_final_enqueue(struct ahash_request *req)
498{
499	return cryptd_hash_enqueue(req, cryptd_hash_final);
500}
501
502static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
503{
504	struct ahash_request *req = ahash_request_cast(req_async);
505	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
506
507	if (unlikely(err == -EINPROGRESS))
508		goto out;
509
510	err = shash_ahash_finup(req, &rctx->desc);
511
512	req->base.complete = rctx->complete;
513
514out:
515	local_bh_disable();
516	rctx->complete(&req->base, err);
517	local_bh_enable();
518}
519
520static int cryptd_hash_finup_enqueue(struct ahash_request *req)
521{
522	return cryptd_hash_enqueue(req, cryptd_hash_finup);
523}
524
525static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
526{
527	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
528	struct crypto_shash *child = ctx->child;
529	struct ahash_request *req = ahash_request_cast(req_async);
530	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
531	struct shash_desc *desc = &rctx->desc;
532
533	if (unlikely(err == -EINPROGRESS))
534		goto out;
535
536	desc->tfm = child;
537	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
538
539	err = shash_ahash_digest(req, desc);
540
541	req->base.complete = rctx->complete;
542
543out:
544	local_bh_disable();
545	rctx->complete(&req->base, err);
546	local_bh_enable();
547}
548
549static int cryptd_hash_digest_enqueue(struct ahash_request *req)
550{
551	return cryptd_hash_enqueue(req, cryptd_hash_digest);
552}
553
554static int cryptd_hash_export(struct ahash_request *req, void *out)
555{
556	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
557
558	return crypto_shash_export(&rctx->desc, out);
559}
560
561static int cryptd_hash_import(struct ahash_request *req, const void *in)
562{
563	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
564
565	return crypto_shash_import(&rctx->desc, in);
566}
567
568static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
569			      struct cryptd_queue *queue)
570{
571	struct hashd_instance_ctx *ctx;
572	struct ahash_instance *inst;
573	struct shash_alg *salg;
574	struct crypto_alg *alg;
575	int err;
576
577	salg = shash_attr_alg(tb[1], 0, 0);
578	if (IS_ERR(salg))
579		return PTR_ERR(salg);
580
581	alg = &salg->base;
582	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
583				     sizeof(*ctx));
584	err = PTR_ERR(inst);
585	if (IS_ERR(inst))
586		goto out_put_alg;
587
588	ctx = ahash_instance_ctx(inst);
589	ctx->queue = queue;
590
591	err = crypto_init_shash_spawn(&ctx->spawn, salg,
592				      ahash_crypto_instance(inst));
593	if (err)
594		goto out_free_inst;
595
596	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
597
598	inst->alg.halg.digestsize = salg->digestsize;
599	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
600
601	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
602	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
603
604	inst->alg.init   = cryptd_hash_init_enqueue;
605	inst->alg.update = cryptd_hash_update_enqueue;
606	inst->alg.final  = cryptd_hash_final_enqueue;
607	inst->alg.finup  = cryptd_hash_finup_enqueue;
608	inst->alg.export = cryptd_hash_export;
609	inst->alg.import = cryptd_hash_import;
610	inst->alg.setkey = cryptd_hash_setkey;
611	inst->alg.digest = cryptd_hash_digest_enqueue;
612
613	err = ahash_register_instance(tmpl, inst);
614	if (err) {
615		crypto_drop_shash(&ctx->spawn);
616out_free_inst:
617		kfree(inst);
618	}
619
620out_put_alg:
621	crypto_mod_put(alg);
622	return err;
623}
624
625static void cryptd_aead_crypt(struct aead_request *req,
626			struct crypto_aead *child,
627			int err,
628			int (*crypt)(struct aead_request *req))
629{
630	struct cryptd_aead_request_ctx *rctx;
631	rctx = aead_request_ctx(req);
632
633	if (unlikely(err == -EINPROGRESS))
634		goto out;
635	aead_request_set_tfm(req, child);
636	err = crypt( req );
637	req->base.complete = rctx->complete;
638out:
639	local_bh_disable();
640	rctx->complete(&req->base, err);
641	local_bh_enable();
642}
643
644static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
645{
646	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
647	struct crypto_aead *child = ctx->child;
648	struct aead_request *req;
649
650	req = container_of(areq, struct aead_request, base);
651	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
652}
653
654static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
655{
656	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
657	struct crypto_aead *child = ctx->child;
658	struct aead_request *req;
659
660	req = container_of(areq, struct aead_request, base);
661	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
662}
663
664static int cryptd_aead_enqueue(struct aead_request *req,
665				    crypto_completion_t complete)
666{
667	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
668	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
669	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
670
671	rctx->complete = req->base.complete;
672	req->base.complete = complete;
673	return cryptd_enqueue_request(queue, &req->base);
674}
675
676static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
677{
678	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
679}
680
681static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
682{
683	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
684}
685
686static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
687{
688	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
689	struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
690	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
691	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
692	struct crypto_aead *cipher;
693
694	cipher = crypto_spawn_aead(spawn);
695	if (IS_ERR(cipher))
696		return PTR_ERR(cipher);
697
698	crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
699	ctx->child = cipher;
700	tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
701	return 0;
702}
703
704static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
705{
706	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
707	crypto_free_aead(ctx->child);
708}
709
710static int cryptd_create_aead(struct crypto_template *tmpl,
711		              struct rtattr **tb,
712			      struct cryptd_queue *queue)
713{
714	struct aead_instance_ctx *ctx;
715	struct crypto_instance *inst;
716	struct crypto_alg *alg;
717	int err;
718
719	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
720				CRYPTO_ALG_TYPE_MASK);
721        if (IS_ERR(alg))
722		return PTR_ERR(alg);
723
724	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
725	err = PTR_ERR(inst);
726	if (IS_ERR(inst))
727		goto out_put_alg;
728
729	ctx = crypto_instance_ctx(inst);
730	ctx->queue = queue;
731
732	err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
733			CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
734	if (err)
735		goto out_free_inst;
736
737	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
738	inst->alg.cra_type = alg->cra_type;
739	inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
740	inst->alg.cra_init = cryptd_aead_init_tfm;
741	inst->alg.cra_exit = cryptd_aead_exit_tfm;
742	inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
743	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
744	inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
745	inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
746	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
747	inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
748	inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
749	inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
750	inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
751
752	err = crypto_register_instance(tmpl, inst);
753	if (err) {
754		crypto_drop_spawn(&ctx->aead_spawn.base);
755out_free_inst:
756		kfree(inst);
757	}
758out_put_alg:
759	crypto_mod_put(alg);
760	return err;
761}
762
763static struct cryptd_queue queue;
764
765static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
766{
767	struct crypto_attr_type *algt;
768
769	algt = crypto_get_attr_type(tb);
770	if (IS_ERR(algt))
771		return PTR_ERR(algt);
772
773	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
774	case CRYPTO_ALG_TYPE_BLKCIPHER:
775		return cryptd_create_blkcipher(tmpl, tb, &queue);
776	case CRYPTO_ALG_TYPE_DIGEST:
777		return cryptd_create_hash(tmpl, tb, &queue);
778	case CRYPTO_ALG_TYPE_AEAD:
779		return cryptd_create_aead(tmpl, tb, &queue);
780	}
781
782	return -EINVAL;
783}
784
785static void cryptd_free(struct crypto_instance *inst)
786{
787	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
788	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
789	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
790
791	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
792	case CRYPTO_ALG_TYPE_AHASH:
793		crypto_drop_shash(&hctx->spawn);
794		kfree(ahash_instance(inst));
795		return;
796	case CRYPTO_ALG_TYPE_AEAD:
797		crypto_drop_spawn(&aead_ctx->aead_spawn.base);
798		kfree(inst);
799		return;
800	default:
801		crypto_drop_spawn(&ctx->spawn);
802		kfree(inst);
803	}
804}
805
806static struct crypto_template cryptd_tmpl = {
807	.name = "cryptd",
808	.create = cryptd_create,
809	.free = cryptd_free,
810	.module = THIS_MODULE,
811};
812
813struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
814						  u32 type, u32 mask)
815{
816	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
817	struct crypto_tfm *tfm;
818
819	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
820		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
821		return ERR_PTR(-EINVAL);
822	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
823	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
824	mask &= ~CRYPTO_ALG_TYPE_MASK;
825	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
826	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
827	if (IS_ERR(tfm))
828		return ERR_CAST(tfm);
829	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
830		crypto_free_tfm(tfm);
831		return ERR_PTR(-EINVAL);
832	}
833
834	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
835}
836EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
837
838struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
839{
840	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
841	return ctx->child;
842}
843EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
844
845void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
846{
847	crypto_free_ablkcipher(&tfm->base);
848}
849EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
850
851struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
852					u32 type, u32 mask)
853{
854	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
855	struct crypto_ahash *tfm;
856
857	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
858		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
859		return ERR_PTR(-EINVAL);
860	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
861	if (IS_ERR(tfm))
862		return ERR_CAST(tfm);
863	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
864		crypto_free_ahash(tfm);
865		return ERR_PTR(-EINVAL);
866	}
867
868	return __cryptd_ahash_cast(tfm);
869}
870EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
871
872struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
873{
874	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
875
876	return ctx->child;
877}
878EXPORT_SYMBOL_GPL(cryptd_ahash_child);
879
880struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
881{
882	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
883	return &rctx->desc;
884}
885EXPORT_SYMBOL_GPL(cryptd_shash_desc);
886
887void cryptd_free_ahash(struct cryptd_ahash *tfm)
888{
889	crypto_free_ahash(&tfm->base);
890}
891EXPORT_SYMBOL_GPL(cryptd_free_ahash);
892
893struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
894						  u32 type, u32 mask)
895{
896	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
897	struct crypto_aead *tfm;
898
899	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
900		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
901		return ERR_PTR(-EINVAL);
902	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
903	if (IS_ERR(tfm))
904		return ERR_CAST(tfm);
905	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
906		crypto_free_aead(tfm);
907		return ERR_PTR(-EINVAL);
908	}
909	return __cryptd_aead_cast(tfm);
910}
911EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
912
913struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
914{
915	struct cryptd_aead_ctx *ctx;
916	ctx = crypto_aead_ctx(&tfm->base);
917	return ctx->child;
918}
919EXPORT_SYMBOL_GPL(cryptd_aead_child);
920
921void cryptd_free_aead(struct cryptd_aead *tfm)
922{
923	crypto_free_aead(&tfm->base);
924}
925EXPORT_SYMBOL_GPL(cryptd_free_aead);
926
927static int __init cryptd_init(void)
928{
929	int err;
930
931	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
932	if (err)
933		return err;
934
935	err = crypto_register_template(&cryptd_tmpl);
936	if (err)
937		cryptd_fini_queue(&queue);
938
939	return err;
940}
941
942static void __exit cryptd_exit(void)
943{
944	cryptd_fini_queue(&queue);
945	crypto_unregister_template(&cryptd_tmpl);
946}
947
948subsys_initcall(cryptd_init);
949module_exit(cryptd_exit);
950
951MODULE_LICENSE("GPL");
952MODULE_DESCRIPTION("Software async crypto daemon");