Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 /*
43 * Protected by disabling BH to allow enqueueing from softinterrupt and
44 * dequeuing from kworker (cryptd_queue_worker()).
45 */
46 struct cryptd_cpu_queue __percpu *cpu_queue;
47};
48
49struct cryptd_instance_ctx {
50 struct crypto_spawn spawn;
51 struct cryptd_queue *queue;
52};
53
54struct skcipherd_instance_ctx {
55 struct crypto_skcipher_spawn spawn;
56 struct cryptd_queue *queue;
57};
58
59struct hashd_instance_ctx {
60 struct crypto_shash_spawn spawn;
61 struct cryptd_queue *queue;
62};
63
64struct aead_instance_ctx {
65 struct crypto_aead_spawn aead_spawn;
66 struct cryptd_queue *queue;
67};
68
69struct cryptd_skcipher_ctx {
70 refcount_t refcnt;
71 struct crypto_skcipher *child;
72};
73
74struct cryptd_skcipher_request_ctx {
75 struct skcipher_request req;
76};
77
78struct cryptd_hash_ctx {
79 refcount_t refcnt;
80 struct crypto_shash *child;
81};
82
83struct cryptd_hash_request_ctx {
84 crypto_completion_t complete;
85 void *data;
86 struct shash_desc desc;
87};
88
89struct cryptd_aead_ctx {
90 refcount_t refcnt;
91 struct crypto_aead *child;
92};
93
94struct cryptd_aead_request_ctx {
95 struct aead_request req;
96};
97
98static void cryptd_queue_worker(struct work_struct *work);
99
100static int cryptd_init_queue(struct cryptd_queue *queue,
101 unsigned int max_cpu_qlen)
102{
103 int cpu;
104 struct cryptd_cpu_queue *cpu_queue;
105
106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 if (!queue->cpu_queue)
108 return -ENOMEM;
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113 }
114 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115 return 0;
116}
117
118static void cryptd_fini_queue(struct cryptd_queue *queue)
119{
120 int cpu;
121 struct cryptd_cpu_queue *cpu_queue;
122
123 for_each_possible_cpu(cpu) {
124 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125 BUG_ON(cpu_queue->queue.qlen);
126 }
127 free_percpu(queue->cpu_queue);
128}
129
130static int cryptd_enqueue_request(struct cryptd_queue *queue,
131 struct crypto_async_request *request)
132{
133 int err;
134 struct cryptd_cpu_queue *cpu_queue;
135 refcount_t *refcnt;
136
137 local_bh_disable();
138 cpu_queue = this_cpu_ptr(queue->cpu_queue);
139 err = crypto_enqueue_request(&cpu_queue->queue, request);
140
141 refcnt = crypto_tfm_ctx(request->tfm);
142
143 if (err == -ENOSPC)
144 goto out;
145
146 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147
148 if (!refcount_read(refcnt))
149 goto out;
150
151 refcount_inc(refcnt);
152
153out:
154 local_bh_enable();
155
156 return err;
157}
158
159/* Called in workqueue context, do one real cryption work (via
160 * req->complete) and reschedule itself if there are more work to
161 * do. */
162static void cryptd_queue_worker(struct work_struct *work)
163{
164 struct cryptd_cpu_queue *cpu_queue;
165 struct crypto_async_request *req, *backlog;
166
167 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168 /*
169 * Only handle one request at a time to avoid hogging crypto workqueue.
170 */
171 local_bh_disable();
172 backlog = crypto_get_backlog(&cpu_queue->queue);
173 req = crypto_dequeue_request(&cpu_queue->queue);
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 crypto_request_complete(backlog, -EINPROGRESS);
181 crypto_request_complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_skcipher *child = ctx->child;
232
233 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_skcipher_setkey(child, key, keylen);
238}
239
240static struct skcipher_request *cryptd_skcipher_prepare(
241 struct skcipher_request *req, int err)
242{
243 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
244 struct skcipher_request *subreq = &rctx->req;
245 struct cryptd_skcipher_ctx *ctx;
246 struct crypto_skcipher *child;
247
248 req->base.complete = subreq->base.complete;
249 req->base.data = subreq->base.data;
250
251 if (unlikely(err == -EINPROGRESS))
252 return NULL;
253
254 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
255 child = ctx->child;
256
257 skcipher_request_set_tfm(subreq, child);
258 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
259 NULL, NULL);
260 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
261 req->iv);
262
263 return subreq;
264}
265
266static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
267 crypto_completion_t complete)
268{
269 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
270 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
271 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
272 struct skcipher_request *subreq = &rctx->req;
273 int refcnt = refcount_read(&ctx->refcnt);
274
275 local_bh_disable();
276 skcipher_request_complete(req, err);
277 local_bh_enable();
278
279 if (unlikely(err == -EINPROGRESS)) {
280 subreq->base.complete = req->base.complete;
281 subreq->base.data = req->base.data;
282 req->base.complete = complete;
283 req->base.data = req;
284 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
285 crypto_free_skcipher(tfm);
286}
287
288static void cryptd_skcipher_encrypt(void *data, int err)
289{
290 struct skcipher_request *req = data;
291 struct skcipher_request *subreq;
292
293 subreq = cryptd_skcipher_prepare(req, err);
294 if (likely(subreq))
295 err = crypto_skcipher_encrypt(subreq);
296
297 cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
298}
299
300static void cryptd_skcipher_decrypt(void *data, int err)
301{
302 struct skcipher_request *req = data;
303 struct skcipher_request *subreq;
304
305 subreq = cryptd_skcipher_prepare(req, err);
306 if (likely(subreq))
307 err = crypto_skcipher_decrypt(subreq);
308
309 cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
310}
311
312static int cryptd_skcipher_enqueue(struct skcipher_request *req,
313 crypto_completion_t compl)
314{
315 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
316 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
317 struct skcipher_request *subreq = &rctx->req;
318 struct cryptd_queue *queue;
319
320 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
321 subreq->base.complete = req->base.complete;
322 subreq->base.data = req->base.data;
323 req->base.complete = compl;
324 req->base.data = req;
325
326 return cryptd_enqueue_request(queue, &req->base);
327}
328
329static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
330{
331 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
332}
333
334static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
335{
336 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
337}
338
339static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
340{
341 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
342 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
343 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
344 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
345 struct crypto_skcipher *cipher;
346
347 cipher = crypto_spawn_skcipher(spawn);
348 if (IS_ERR(cipher))
349 return PTR_ERR(cipher);
350
351 ctx->child = cipher;
352 crypto_skcipher_set_reqsize(
353 tfm, sizeof(struct cryptd_skcipher_request_ctx) +
354 crypto_skcipher_reqsize(cipher));
355 return 0;
356}
357
358static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
359{
360 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
361
362 crypto_free_skcipher(ctx->child);
363}
364
365static void cryptd_skcipher_free(struct skcipher_instance *inst)
366{
367 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
368
369 crypto_drop_skcipher(&ctx->spawn);
370 kfree(inst);
371}
372
373static int cryptd_create_skcipher(struct crypto_template *tmpl,
374 struct rtattr **tb,
375 struct crypto_attr_type *algt,
376 struct cryptd_queue *queue)
377{
378 struct skcipherd_instance_ctx *ctx;
379 struct skcipher_instance *inst;
380 struct skcipher_alg_common *alg;
381 u32 type;
382 u32 mask;
383 int err;
384
385 cryptd_type_and_mask(algt, &type, &mask);
386
387 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
388 if (!inst)
389 return -ENOMEM;
390
391 ctx = skcipher_instance_ctx(inst);
392 ctx->queue = queue;
393
394 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
395 crypto_attr_alg_name(tb[1]), type, mask);
396 if (err)
397 goto err_free_inst;
398
399 alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
400 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
401 if (err)
402 goto err_free_inst;
403
404 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
405 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
406 inst->alg.ivsize = alg->ivsize;
407 inst->alg.chunksize = alg->chunksize;
408 inst->alg.min_keysize = alg->min_keysize;
409 inst->alg.max_keysize = alg->max_keysize;
410
411 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
412
413 inst->alg.init = cryptd_skcipher_init_tfm;
414 inst->alg.exit = cryptd_skcipher_exit_tfm;
415
416 inst->alg.setkey = cryptd_skcipher_setkey;
417 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
418 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
419
420 inst->free = cryptd_skcipher_free;
421
422 err = skcipher_register_instance(tmpl, inst);
423 if (err) {
424err_free_inst:
425 cryptd_skcipher_free(inst);
426 }
427 return err;
428}
429
430static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
431{
432 struct ahash_instance *inst = ahash_alg_instance(tfm);
433 struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
434 struct crypto_shash_spawn *spawn = &ictx->spawn;
435 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
436 struct crypto_shash *hash;
437
438 hash = crypto_spawn_shash(spawn);
439 if (IS_ERR(hash))
440 return PTR_ERR(hash);
441
442 ctx->child = hash;
443 crypto_ahash_set_reqsize(tfm,
444 sizeof(struct cryptd_hash_request_ctx) +
445 crypto_shash_descsize(hash));
446 return 0;
447}
448
449static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
450 struct crypto_ahash *tfm)
451{
452 struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
453 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
454 struct crypto_shash *hash;
455
456 hash = crypto_clone_shash(ctx->child);
457 if (IS_ERR(hash))
458 return PTR_ERR(hash);
459
460 nctx->child = hash;
461 return 0;
462}
463
464static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
465{
466 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
467
468 crypto_free_shash(ctx->child);
469}
470
471static int cryptd_hash_setkey(struct crypto_ahash *parent,
472 const u8 *key, unsigned int keylen)
473{
474 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
475 struct crypto_shash *child = ctx->child;
476
477 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
478 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
479 CRYPTO_TFM_REQ_MASK);
480 return crypto_shash_setkey(child, key, keylen);
481}
482
483static int cryptd_hash_enqueue(struct ahash_request *req,
484 crypto_completion_t compl)
485{
486 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
487 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
488 struct cryptd_queue *queue =
489 cryptd_get_queue(crypto_ahash_tfm(tfm));
490
491 rctx->complete = req->base.complete;
492 rctx->data = req->base.data;
493 req->base.complete = compl;
494 req->base.data = req;
495
496 return cryptd_enqueue_request(queue, &req->base);
497}
498
499static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
500 int err)
501{
502 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
503
504 req->base.complete = rctx->complete;
505 req->base.data = rctx->data;
506
507 if (unlikely(err == -EINPROGRESS))
508 return NULL;
509
510 return &rctx->desc;
511}
512
513static void cryptd_hash_complete(struct ahash_request *req, int err,
514 crypto_completion_t complete)
515{
516 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
517 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
518 int refcnt = refcount_read(&ctx->refcnt);
519
520 local_bh_disable();
521 ahash_request_complete(req, err);
522 local_bh_enable();
523
524 if (err == -EINPROGRESS) {
525 req->base.complete = complete;
526 req->base.data = req;
527 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
528 crypto_free_ahash(tfm);
529}
530
531static void cryptd_hash_init(void *data, int err)
532{
533 struct ahash_request *req = data;
534 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
535 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
536 struct crypto_shash *child = ctx->child;
537 struct shash_desc *desc;
538
539 desc = cryptd_hash_prepare(req, err);
540 if (unlikely(!desc))
541 goto out;
542
543 desc->tfm = child;
544
545 err = crypto_shash_init(desc);
546
547out:
548 cryptd_hash_complete(req, err, cryptd_hash_init);
549}
550
551static int cryptd_hash_init_enqueue(struct ahash_request *req)
552{
553 return cryptd_hash_enqueue(req, cryptd_hash_init);
554}
555
556static void cryptd_hash_update(void *data, int err)
557{
558 struct ahash_request *req = data;
559 struct shash_desc *desc;
560
561 desc = cryptd_hash_prepare(req, err);
562 if (likely(desc))
563 err = shash_ahash_update(req, desc);
564
565 cryptd_hash_complete(req, err, cryptd_hash_update);
566}
567
568static int cryptd_hash_update_enqueue(struct ahash_request *req)
569{
570 return cryptd_hash_enqueue(req, cryptd_hash_update);
571}
572
573static void cryptd_hash_final(void *data, int err)
574{
575 struct ahash_request *req = data;
576 struct shash_desc *desc;
577
578 desc = cryptd_hash_prepare(req, err);
579 if (likely(desc))
580 err = crypto_shash_final(desc, req->result);
581
582 cryptd_hash_complete(req, err, cryptd_hash_final);
583}
584
585static int cryptd_hash_final_enqueue(struct ahash_request *req)
586{
587 return cryptd_hash_enqueue(req, cryptd_hash_final);
588}
589
590static void cryptd_hash_finup(void *data, int err)
591{
592 struct ahash_request *req = data;
593 struct shash_desc *desc;
594
595 desc = cryptd_hash_prepare(req, err);
596 if (likely(desc))
597 err = shash_ahash_finup(req, desc);
598
599 cryptd_hash_complete(req, err, cryptd_hash_finup);
600}
601
602static int cryptd_hash_finup_enqueue(struct ahash_request *req)
603{
604 return cryptd_hash_enqueue(req, cryptd_hash_finup);
605}
606
607static void cryptd_hash_digest(void *data, int err)
608{
609 struct ahash_request *req = data;
610 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
611 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
612 struct crypto_shash *child = ctx->child;
613 struct shash_desc *desc;
614
615 desc = cryptd_hash_prepare(req, err);
616 if (unlikely(!desc))
617 goto out;
618
619 desc->tfm = child;
620
621 err = shash_ahash_digest(req, desc);
622
623out:
624 cryptd_hash_complete(req, err, cryptd_hash_digest);
625}
626
627static int cryptd_hash_digest_enqueue(struct ahash_request *req)
628{
629 return cryptd_hash_enqueue(req, cryptd_hash_digest);
630}
631
632static int cryptd_hash_export(struct ahash_request *req, void *out)
633{
634 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635
636 return crypto_shash_export(&rctx->desc, out);
637}
638
639static int cryptd_hash_import(struct ahash_request *req, const void *in)
640{
641 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
642 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
643 struct shash_desc *desc = cryptd_shash_desc(req);
644
645 desc->tfm = ctx->child;
646
647 return crypto_shash_import(desc, in);
648}
649
650static void cryptd_hash_free(struct ahash_instance *inst)
651{
652 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
653
654 crypto_drop_shash(&ctx->spawn);
655 kfree(inst);
656}
657
658static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
659 struct crypto_attr_type *algt,
660 struct cryptd_queue *queue)
661{
662 struct hashd_instance_ctx *ctx;
663 struct ahash_instance *inst;
664 struct shash_alg *alg;
665 u32 type;
666 u32 mask;
667 int err;
668
669 cryptd_type_and_mask(algt, &type, &mask);
670
671 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
672 if (!inst)
673 return -ENOMEM;
674
675 ctx = ahash_instance_ctx(inst);
676 ctx->queue = queue;
677
678 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
679 crypto_attr_alg_name(tb[1]), type, mask);
680 if (err)
681 goto err_free_inst;
682 alg = crypto_spawn_shash_alg(&ctx->spawn);
683
684 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
685 if (err)
686 goto err_free_inst;
687
688 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
689 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
690 CRYPTO_ALG_OPTIONAL_KEY));
691 inst->alg.halg.digestsize = alg->digestsize;
692 inst->alg.halg.statesize = alg->statesize;
693 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
694
695 inst->alg.init_tfm = cryptd_hash_init_tfm;
696 inst->alg.clone_tfm = cryptd_hash_clone_tfm;
697 inst->alg.exit_tfm = cryptd_hash_exit_tfm;
698
699 inst->alg.init = cryptd_hash_init_enqueue;
700 inst->alg.update = cryptd_hash_update_enqueue;
701 inst->alg.final = cryptd_hash_final_enqueue;
702 inst->alg.finup = cryptd_hash_finup_enqueue;
703 inst->alg.export = cryptd_hash_export;
704 inst->alg.import = cryptd_hash_import;
705 if (crypto_shash_alg_has_setkey(alg))
706 inst->alg.setkey = cryptd_hash_setkey;
707 inst->alg.digest = cryptd_hash_digest_enqueue;
708
709 inst->free = cryptd_hash_free;
710
711 err = ahash_register_instance(tmpl, inst);
712 if (err) {
713err_free_inst:
714 cryptd_hash_free(inst);
715 }
716 return err;
717}
718
719static int cryptd_aead_setkey(struct crypto_aead *parent,
720 const u8 *key, unsigned int keylen)
721{
722 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
723 struct crypto_aead *child = ctx->child;
724
725 return crypto_aead_setkey(child, key, keylen);
726}
727
728static int cryptd_aead_setauthsize(struct crypto_aead *parent,
729 unsigned int authsize)
730{
731 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
732 struct crypto_aead *child = ctx->child;
733
734 return crypto_aead_setauthsize(child, authsize);
735}
736
737static void cryptd_aead_crypt(struct aead_request *req,
738 struct crypto_aead *child, int err,
739 int (*crypt)(struct aead_request *req),
740 crypto_completion_t compl)
741{
742 struct cryptd_aead_request_ctx *rctx;
743 struct aead_request *subreq;
744 struct cryptd_aead_ctx *ctx;
745 struct crypto_aead *tfm;
746 int refcnt;
747
748 rctx = aead_request_ctx(req);
749 subreq = &rctx->req;
750 req->base.complete = subreq->base.complete;
751 req->base.data = subreq->base.data;
752
753 tfm = crypto_aead_reqtfm(req);
754
755 if (unlikely(err == -EINPROGRESS))
756 goto out;
757
758 aead_request_set_tfm(subreq, child);
759 aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
760 NULL, NULL);
761 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
762 req->iv);
763 aead_request_set_ad(subreq, req->assoclen);
764
765 err = crypt(subreq);
766
767out:
768 ctx = crypto_aead_ctx(tfm);
769 refcnt = refcount_read(&ctx->refcnt);
770
771 local_bh_disable();
772 aead_request_complete(req, err);
773 local_bh_enable();
774
775 if (err == -EINPROGRESS) {
776 subreq->base.complete = req->base.complete;
777 subreq->base.data = req->base.data;
778 req->base.complete = compl;
779 req->base.data = req;
780 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
781 crypto_free_aead(tfm);
782}
783
784static void cryptd_aead_encrypt(void *data, int err)
785{
786 struct aead_request *req = data;
787 struct cryptd_aead_ctx *ctx;
788 struct crypto_aead *child;
789
790 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
791 child = ctx->child;
792 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
793 cryptd_aead_encrypt);
794}
795
796static void cryptd_aead_decrypt(void *data, int err)
797{
798 struct aead_request *req = data;
799 struct cryptd_aead_ctx *ctx;
800 struct crypto_aead *child;
801
802 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
803 child = ctx->child;
804 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
805 cryptd_aead_decrypt);
806}
807
808static int cryptd_aead_enqueue(struct aead_request *req,
809 crypto_completion_t compl)
810{
811 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
812 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
813 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
814 struct aead_request *subreq = &rctx->req;
815
816 subreq->base.complete = req->base.complete;
817 subreq->base.data = req->base.data;
818 req->base.complete = compl;
819 req->base.data = req;
820 return cryptd_enqueue_request(queue, &req->base);
821}
822
823static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
824{
825 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
826}
827
828static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
829{
830 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
831}
832
833static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
834{
835 struct aead_instance *inst = aead_alg_instance(tfm);
836 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
837 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
838 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
839 struct crypto_aead *cipher;
840
841 cipher = crypto_spawn_aead(spawn);
842 if (IS_ERR(cipher))
843 return PTR_ERR(cipher);
844
845 ctx->child = cipher;
846 crypto_aead_set_reqsize(
847 tfm, sizeof(struct cryptd_aead_request_ctx) +
848 crypto_aead_reqsize(cipher));
849 return 0;
850}
851
852static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
853{
854 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
855 crypto_free_aead(ctx->child);
856}
857
858static void cryptd_aead_free(struct aead_instance *inst)
859{
860 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
861
862 crypto_drop_aead(&ctx->aead_spawn);
863 kfree(inst);
864}
865
866static int cryptd_create_aead(struct crypto_template *tmpl,
867 struct rtattr **tb,
868 struct crypto_attr_type *algt,
869 struct cryptd_queue *queue)
870{
871 struct aead_instance_ctx *ctx;
872 struct aead_instance *inst;
873 struct aead_alg *alg;
874 u32 type;
875 u32 mask;
876 int err;
877
878 cryptd_type_and_mask(algt, &type, &mask);
879
880 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
881 if (!inst)
882 return -ENOMEM;
883
884 ctx = aead_instance_ctx(inst);
885 ctx->queue = queue;
886
887 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
888 crypto_attr_alg_name(tb[1]), type, mask);
889 if (err)
890 goto err_free_inst;
891
892 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
893 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
894 if (err)
895 goto err_free_inst;
896
897 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
898 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
899 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
900
901 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
902 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
903
904 inst->alg.init = cryptd_aead_init_tfm;
905 inst->alg.exit = cryptd_aead_exit_tfm;
906 inst->alg.setkey = cryptd_aead_setkey;
907 inst->alg.setauthsize = cryptd_aead_setauthsize;
908 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
909 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
910
911 inst->free = cryptd_aead_free;
912
913 err = aead_register_instance(tmpl, inst);
914 if (err) {
915err_free_inst:
916 cryptd_aead_free(inst);
917 }
918 return err;
919}
920
921static struct cryptd_queue queue;
922
923static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
924{
925 struct crypto_attr_type *algt;
926
927 algt = crypto_get_attr_type(tb);
928 if (IS_ERR(algt))
929 return PTR_ERR(algt);
930
931 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
932 case CRYPTO_ALG_TYPE_LSKCIPHER:
933 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
934 case CRYPTO_ALG_TYPE_HASH:
935 return cryptd_create_hash(tmpl, tb, algt, &queue);
936 case CRYPTO_ALG_TYPE_AEAD:
937 return cryptd_create_aead(tmpl, tb, algt, &queue);
938 }
939
940 return -EINVAL;
941}
942
943static struct crypto_template cryptd_tmpl = {
944 .name = "cryptd",
945 .create = cryptd_create,
946 .module = THIS_MODULE,
947};
948
949struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
950 u32 type, u32 mask)
951{
952 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
953 struct cryptd_skcipher_ctx *ctx;
954 struct crypto_skcipher *tfm;
955
956 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
957 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
958 return ERR_PTR(-EINVAL);
959
960 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
961 if (IS_ERR(tfm))
962 return ERR_CAST(tfm);
963
964 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
965 crypto_free_skcipher(tfm);
966 return ERR_PTR(-EINVAL);
967 }
968
969 ctx = crypto_skcipher_ctx(tfm);
970 refcount_set(&ctx->refcnt, 1);
971
972 return container_of(tfm, struct cryptd_skcipher, base);
973}
974EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
975
976struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
977{
978 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
979
980 return ctx->child;
981}
982EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
983
984bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
985{
986 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
987
988 return refcount_read(&ctx->refcnt) - 1;
989}
990EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
991
992void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
993{
994 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
995
996 if (refcount_dec_and_test(&ctx->refcnt))
997 crypto_free_skcipher(&tfm->base);
998}
999EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1000
1001struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1002 u32 type, u32 mask)
1003{
1004 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1005 struct cryptd_hash_ctx *ctx;
1006 struct crypto_ahash *tfm;
1007
1008 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1009 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1010 return ERR_PTR(-EINVAL);
1011 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1012 if (IS_ERR(tfm))
1013 return ERR_CAST(tfm);
1014 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1015 crypto_free_ahash(tfm);
1016 return ERR_PTR(-EINVAL);
1017 }
1018
1019 ctx = crypto_ahash_ctx(tfm);
1020 refcount_set(&ctx->refcnt, 1);
1021
1022 return __cryptd_ahash_cast(tfm);
1023}
1024EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1025
1026struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1027{
1028 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1029
1030 return ctx->child;
1031}
1032EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1033
1034struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1035{
1036 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1037 return &rctx->desc;
1038}
1039EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1040
1041bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1042{
1043 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1044
1045 return refcount_read(&ctx->refcnt) - 1;
1046}
1047EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1048
1049void cryptd_free_ahash(struct cryptd_ahash *tfm)
1050{
1051 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1052
1053 if (refcount_dec_and_test(&ctx->refcnt))
1054 crypto_free_ahash(&tfm->base);
1055}
1056EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1057
1058struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1059 u32 type, u32 mask)
1060{
1061 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1062 struct cryptd_aead_ctx *ctx;
1063 struct crypto_aead *tfm;
1064
1065 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1066 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1067 return ERR_PTR(-EINVAL);
1068 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1069 if (IS_ERR(tfm))
1070 return ERR_CAST(tfm);
1071 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1072 crypto_free_aead(tfm);
1073 return ERR_PTR(-EINVAL);
1074 }
1075
1076 ctx = crypto_aead_ctx(tfm);
1077 refcount_set(&ctx->refcnt, 1);
1078
1079 return __cryptd_aead_cast(tfm);
1080}
1081EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1082
1083struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1084{
1085 struct cryptd_aead_ctx *ctx;
1086 ctx = crypto_aead_ctx(&tfm->base);
1087 return ctx->child;
1088}
1089EXPORT_SYMBOL_GPL(cryptd_aead_child);
1090
1091bool cryptd_aead_queued(struct cryptd_aead *tfm)
1092{
1093 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1094
1095 return refcount_read(&ctx->refcnt) - 1;
1096}
1097EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1098
1099void cryptd_free_aead(struct cryptd_aead *tfm)
1100{
1101 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1102
1103 if (refcount_dec_and_test(&ctx->refcnt))
1104 crypto_free_aead(&tfm->base);
1105}
1106EXPORT_SYMBOL_GPL(cryptd_free_aead);
1107
1108static int __init cryptd_init(void)
1109{
1110 int err;
1111
1112 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1113 1);
1114 if (!cryptd_wq)
1115 return -ENOMEM;
1116
1117 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1118 if (err)
1119 goto err_destroy_wq;
1120
1121 err = crypto_register_template(&cryptd_tmpl);
1122 if (err)
1123 goto err_fini_queue;
1124
1125 return 0;
1126
1127err_fini_queue:
1128 cryptd_fini_queue(&queue);
1129err_destroy_wq:
1130 destroy_workqueue(cryptd_wq);
1131 return err;
1132}
1133
1134static void __exit cryptd_exit(void)
1135{
1136 destroy_workqueue(cryptd_wq);
1137 cryptd_fini_queue(&queue);
1138 crypto_unregister_template(&cryptd_tmpl);
1139}
1140
1141subsys_initcall(cryptd_init);
1142module_exit(cryptd_exit);
1143
1144MODULE_LICENSE("GPL");
1145MODULE_DESCRIPTION("Software async crypto daemon");
1146MODULE_ALIAS_CRYPTO("cryptd");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 struct cryptd_cpu_queue __percpu *cpu_queue;
43};
44
45struct cryptd_instance_ctx {
46 struct crypto_spawn spawn;
47 struct cryptd_queue *queue;
48};
49
50struct skcipherd_instance_ctx {
51 struct crypto_skcipher_spawn spawn;
52 struct cryptd_queue *queue;
53};
54
55struct hashd_instance_ctx {
56 struct crypto_shash_spawn spawn;
57 struct cryptd_queue *queue;
58};
59
60struct aead_instance_ctx {
61 struct crypto_aead_spawn aead_spawn;
62 struct cryptd_queue *queue;
63};
64
65struct cryptd_skcipher_ctx {
66 refcount_t refcnt;
67 struct crypto_sync_skcipher *child;
68};
69
70struct cryptd_skcipher_request_ctx {
71 crypto_completion_t complete;
72};
73
74struct cryptd_hash_ctx {
75 refcount_t refcnt;
76 struct crypto_shash *child;
77};
78
79struct cryptd_hash_request_ctx {
80 crypto_completion_t complete;
81 struct shash_desc desc;
82};
83
84struct cryptd_aead_ctx {
85 refcount_t refcnt;
86 struct crypto_aead *child;
87};
88
89struct cryptd_aead_request_ctx {
90 crypto_completion_t complete;
91};
92
93static void cryptd_queue_worker(struct work_struct *work);
94
95static int cryptd_init_queue(struct cryptd_queue *queue,
96 unsigned int max_cpu_qlen)
97{
98 int cpu;
99 struct cryptd_cpu_queue *cpu_queue;
100
101 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
102 if (!queue->cpu_queue)
103 return -ENOMEM;
104 for_each_possible_cpu(cpu) {
105 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
106 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
107 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
108 }
109 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
110 return 0;
111}
112
113static void cryptd_fini_queue(struct cryptd_queue *queue)
114{
115 int cpu;
116 struct cryptd_cpu_queue *cpu_queue;
117
118 for_each_possible_cpu(cpu) {
119 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
120 BUG_ON(cpu_queue->queue.qlen);
121 }
122 free_percpu(queue->cpu_queue);
123}
124
125static int cryptd_enqueue_request(struct cryptd_queue *queue,
126 struct crypto_async_request *request)
127{
128 int cpu, err;
129 struct cryptd_cpu_queue *cpu_queue;
130 refcount_t *refcnt;
131
132 cpu = get_cpu();
133 cpu_queue = this_cpu_ptr(queue->cpu_queue);
134 err = crypto_enqueue_request(&cpu_queue->queue, request);
135
136 refcnt = crypto_tfm_ctx(request->tfm);
137
138 if (err == -ENOSPC)
139 goto out_put_cpu;
140
141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
142
143 if (!refcount_read(refcnt))
144 goto out_put_cpu;
145
146 refcount_inc(refcnt);
147
148out_put_cpu:
149 put_cpu();
150
151 return err;
152}
153
154/* Called in workqueue context, do one real cryption work (via
155 * req->complete) and reschedule itself if there are more work to
156 * do. */
157static void cryptd_queue_worker(struct work_struct *work)
158{
159 struct cryptd_cpu_queue *cpu_queue;
160 struct crypto_async_request *req, *backlog;
161
162 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
163 /*
164 * Only handle one request at a time to avoid hogging crypto workqueue.
165 * preempt_disable/enable is used to prevent being preempted by
166 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
167 * cryptd_enqueue_request() being accessed from software interrupts.
168 */
169 local_bh_disable();
170 preempt_disable();
171 backlog = crypto_get_backlog(&cpu_queue->queue);
172 req = crypto_dequeue_request(&cpu_queue->queue);
173 preempt_enable();
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 backlog->complete(backlog, -EINPROGRESS);
181 req->complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_sync_skcipher *child = ctx->child;
232
233 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_sync_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_sync_skcipher_setkey(child, key, keylen);
238}
239
240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
241{
242 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
243 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
244 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
245 int refcnt = refcount_read(&ctx->refcnt);
246
247 local_bh_disable();
248 rctx->complete(&req->base, err);
249 local_bh_enable();
250
251 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
252 crypto_free_skcipher(tfm);
253}
254
255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
256 int err)
257{
258 struct skcipher_request *req = skcipher_request_cast(base);
259 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
260 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
261 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
262 struct crypto_sync_skcipher *child = ctx->child;
263 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
264
265 if (unlikely(err == -EINPROGRESS))
266 goto out;
267
268 skcipher_request_set_sync_tfm(subreq, child);
269 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
270 NULL, NULL);
271 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
272 req->iv);
273
274 err = crypto_skcipher_encrypt(subreq);
275 skcipher_request_zero(subreq);
276
277 req->base.complete = rctx->complete;
278
279out:
280 cryptd_skcipher_complete(req, err);
281}
282
283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
284 int err)
285{
286 struct skcipher_request *req = skcipher_request_cast(base);
287 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
288 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
290 struct crypto_sync_skcipher *child = ctx->child;
291 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
292
293 if (unlikely(err == -EINPROGRESS))
294 goto out;
295
296 skcipher_request_set_sync_tfm(subreq, child);
297 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
298 NULL, NULL);
299 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
300 req->iv);
301
302 err = crypto_skcipher_decrypt(subreq);
303 skcipher_request_zero(subreq);
304
305 req->base.complete = rctx->complete;
306
307out:
308 cryptd_skcipher_complete(req, err);
309}
310
311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
312 crypto_completion_t compl)
313{
314 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
315 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 struct cryptd_queue *queue;
317
318 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
319 rctx->complete = req->base.complete;
320 req->base.complete = compl;
321
322 return cryptd_enqueue_request(queue, &req->base);
323}
324
325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
326{
327 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
328}
329
330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
331{
332 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
333}
334
335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
336{
337 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
338 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
339 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
340 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
341 struct crypto_skcipher *cipher;
342
343 cipher = crypto_spawn_skcipher(spawn);
344 if (IS_ERR(cipher))
345 return PTR_ERR(cipher);
346
347 ctx->child = (struct crypto_sync_skcipher *)cipher;
348 crypto_skcipher_set_reqsize(
349 tfm, sizeof(struct cryptd_skcipher_request_ctx));
350 return 0;
351}
352
353static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
354{
355 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
356
357 crypto_free_sync_skcipher(ctx->child);
358}
359
360static void cryptd_skcipher_free(struct skcipher_instance *inst)
361{
362 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
363
364 crypto_drop_skcipher(&ctx->spawn);
365 kfree(inst);
366}
367
368static int cryptd_create_skcipher(struct crypto_template *tmpl,
369 struct rtattr **tb,
370 struct crypto_attr_type *algt,
371 struct cryptd_queue *queue)
372{
373 struct skcipherd_instance_ctx *ctx;
374 struct skcipher_instance *inst;
375 struct skcipher_alg *alg;
376 u32 type;
377 u32 mask;
378 int err;
379
380 cryptd_type_and_mask(algt, &type, &mask);
381
382 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
383 if (!inst)
384 return -ENOMEM;
385
386 ctx = skcipher_instance_ctx(inst);
387 ctx->queue = queue;
388
389 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
390 crypto_attr_alg_name(tb[1]), type, mask);
391 if (err)
392 goto err_free_inst;
393
394 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
395 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
396 if (err)
397 goto err_free_inst;
398
399 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
400 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
401 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
402 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
403 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
404 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
405
406 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
407
408 inst->alg.init = cryptd_skcipher_init_tfm;
409 inst->alg.exit = cryptd_skcipher_exit_tfm;
410
411 inst->alg.setkey = cryptd_skcipher_setkey;
412 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
413 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
414
415 inst->free = cryptd_skcipher_free;
416
417 err = skcipher_register_instance(tmpl, inst);
418 if (err) {
419err_free_inst:
420 cryptd_skcipher_free(inst);
421 }
422 return err;
423}
424
425static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
426{
427 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
428 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
429 struct crypto_shash_spawn *spawn = &ictx->spawn;
430 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
431 struct crypto_shash *hash;
432
433 hash = crypto_spawn_shash(spawn);
434 if (IS_ERR(hash))
435 return PTR_ERR(hash);
436
437 ctx->child = hash;
438 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
439 sizeof(struct cryptd_hash_request_ctx) +
440 crypto_shash_descsize(hash));
441 return 0;
442}
443
444static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
445{
446 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
447
448 crypto_free_shash(ctx->child);
449}
450
451static int cryptd_hash_setkey(struct crypto_ahash *parent,
452 const u8 *key, unsigned int keylen)
453{
454 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
455 struct crypto_shash *child = ctx->child;
456
457 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
458 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
459 CRYPTO_TFM_REQ_MASK);
460 return crypto_shash_setkey(child, key, keylen);
461}
462
463static int cryptd_hash_enqueue(struct ahash_request *req,
464 crypto_completion_t compl)
465{
466 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
467 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
468 struct cryptd_queue *queue =
469 cryptd_get_queue(crypto_ahash_tfm(tfm));
470
471 rctx->complete = req->base.complete;
472 req->base.complete = compl;
473
474 return cryptd_enqueue_request(queue, &req->base);
475}
476
477static void cryptd_hash_complete(struct ahash_request *req, int err)
478{
479 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
480 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
481 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
482 int refcnt = refcount_read(&ctx->refcnt);
483
484 local_bh_disable();
485 rctx->complete(&req->base, err);
486 local_bh_enable();
487
488 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
489 crypto_free_ahash(tfm);
490}
491
492static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
493{
494 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
495 struct crypto_shash *child = ctx->child;
496 struct ahash_request *req = ahash_request_cast(req_async);
497 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
498 struct shash_desc *desc = &rctx->desc;
499
500 if (unlikely(err == -EINPROGRESS))
501 goto out;
502
503 desc->tfm = child;
504
505 err = crypto_shash_init(desc);
506
507 req->base.complete = rctx->complete;
508
509out:
510 cryptd_hash_complete(req, err);
511}
512
513static int cryptd_hash_init_enqueue(struct ahash_request *req)
514{
515 return cryptd_hash_enqueue(req, cryptd_hash_init);
516}
517
518static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
519{
520 struct ahash_request *req = ahash_request_cast(req_async);
521 struct cryptd_hash_request_ctx *rctx;
522
523 rctx = ahash_request_ctx(req);
524
525 if (unlikely(err == -EINPROGRESS))
526 goto out;
527
528 err = shash_ahash_update(req, &rctx->desc);
529
530 req->base.complete = rctx->complete;
531
532out:
533 cryptd_hash_complete(req, err);
534}
535
536static int cryptd_hash_update_enqueue(struct ahash_request *req)
537{
538 return cryptd_hash_enqueue(req, cryptd_hash_update);
539}
540
541static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
542{
543 struct ahash_request *req = ahash_request_cast(req_async);
544 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
545
546 if (unlikely(err == -EINPROGRESS))
547 goto out;
548
549 err = crypto_shash_final(&rctx->desc, req->result);
550
551 req->base.complete = rctx->complete;
552
553out:
554 cryptd_hash_complete(req, err);
555}
556
557static int cryptd_hash_final_enqueue(struct ahash_request *req)
558{
559 return cryptd_hash_enqueue(req, cryptd_hash_final);
560}
561
562static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
563{
564 struct ahash_request *req = ahash_request_cast(req_async);
565 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
566
567 if (unlikely(err == -EINPROGRESS))
568 goto out;
569
570 err = shash_ahash_finup(req, &rctx->desc);
571
572 req->base.complete = rctx->complete;
573
574out:
575 cryptd_hash_complete(req, err);
576}
577
578static int cryptd_hash_finup_enqueue(struct ahash_request *req)
579{
580 return cryptd_hash_enqueue(req, cryptd_hash_finup);
581}
582
583static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
584{
585 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
586 struct crypto_shash *child = ctx->child;
587 struct ahash_request *req = ahash_request_cast(req_async);
588 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
589 struct shash_desc *desc = &rctx->desc;
590
591 if (unlikely(err == -EINPROGRESS))
592 goto out;
593
594 desc->tfm = child;
595
596 err = shash_ahash_digest(req, desc);
597
598 req->base.complete = rctx->complete;
599
600out:
601 cryptd_hash_complete(req, err);
602}
603
604static int cryptd_hash_digest_enqueue(struct ahash_request *req)
605{
606 return cryptd_hash_enqueue(req, cryptd_hash_digest);
607}
608
609static int cryptd_hash_export(struct ahash_request *req, void *out)
610{
611 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
612
613 return crypto_shash_export(&rctx->desc, out);
614}
615
616static int cryptd_hash_import(struct ahash_request *req, const void *in)
617{
618 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
619 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
620 struct shash_desc *desc = cryptd_shash_desc(req);
621
622 desc->tfm = ctx->child;
623
624 return crypto_shash_import(desc, in);
625}
626
627static void cryptd_hash_free(struct ahash_instance *inst)
628{
629 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
630
631 crypto_drop_shash(&ctx->spawn);
632 kfree(inst);
633}
634
635static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
636 struct crypto_attr_type *algt,
637 struct cryptd_queue *queue)
638{
639 struct hashd_instance_ctx *ctx;
640 struct ahash_instance *inst;
641 struct shash_alg *alg;
642 u32 type;
643 u32 mask;
644 int err;
645
646 cryptd_type_and_mask(algt, &type, &mask);
647
648 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
649 if (!inst)
650 return -ENOMEM;
651
652 ctx = ahash_instance_ctx(inst);
653 ctx->queue = queue;
654
655 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
656 crypto_attr_alg_name(tb[1]), type, mask);
657 if (err)
658 goto err_free_inst;
659 alg = crypto_spawn_shash_alg(&ctx->spawn);
660
661 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
662 if (err)
663 goto err_free_inst;
664
665 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
666 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
667 CRYPTO_ALG_OPTIONAL_KEY));
668 inst->alg.halg.digestsize = alg->digestsize;
669 inst->alg.halg.statesize = alg->statesize;
670 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
671
672 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
673 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
674
675 inst->alg.init = cryptd_hash_init_enqueue;
676 inst->alg.update = cryptd_hash_update_enqueue;
677 inst->alg.final = cryptd_hash_final_enqueue;
678 inst->alg.finup = cryptd_hash_finup_enqueue;
679 inst->alg.export = cryptd_hash_export;
680 inst->alg.import = cryptd_hash_import;
681 if (crypto_shash_alg_has_setkey(alg))
682 inst->alg.setkey = cryptd_hash_setkey;
683 inst->alg.digest = cryptd_hash_digest_enqueue;
684
685 inst->free = cryptd_hash_free;
686
687 err = ahash_register_instance(tmpl, inst);
688 if (err) {
689err_free_inst:
690 cryptd_hash_free(inst);
691 }
692 return err;
693}
694
695static int cryptd_aead_setkey(struct crypto_aead *parent,
696 const u8 *key, unsigned int keylen)
697{
698 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
699 struct crypto_aead *child = ctx->child;
700
701 return crypto_aead_setkey(child, key, keylen);
702}
703
704static int cryptd_aead_setauthsize(struct crypto_aead *parent,
705 unsigned int authsize)
706{
707 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
708 struct crypto_aead *child = ctx->child;
709
710 return crypto_aead_setauthsize(child, authsize);
711}
712
713static void cryptd_aead_crypt(struct aead_request *req,
714 struct crypto_aead *child,
715 int err,
716 int (*crypt)(struct aead_request *req))
717{
718 struct cryptd_aead_request_ctx *rctx;
719 struct cryptd_aead_ctx *ctx;
720 crypto_completion_t compl;
721 struct crypto_aead *tfm;
722 int refcnt;
723
724 rctx = aead_request_ctx(req);
725 compl = rctx->complete;
726
727 tfm = crypto_aead_reqtfm(req);
728
729 if (unlikely(err == -EINPROGRESS))
730 goto out;
731 aead_request_set_tfm(req, child);
732 err = crypt( req );
733
734out:
735 ctx = crypto_aead_ctx(tfm);
736 refcnt = refcount_read(&ctx->refcnt);
737
738 local_bh_disable();
739 compl(&req->base, err);
740 local_bh_enable();
741
742 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
743 crypto_free_aead(tfm);
744}
745
746static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
747{
748 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
749 struct crypto_aead *child = ctx->child;
750 struct aead_request *req;
751
752 req = container_of(areq, struct aead_request, base);
753 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
754}
755
756static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
757{
758 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
759 struct crypto_aead *child = ctx->child;
760 struct aead_request *req;
761
762 req = container_of(areq, struct aead_request, base);
763 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
764}
765
766static int cryptd_aead_enqueue(struct aead_request *req,
767 crypto_completion_t compl)
768{
769 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
770 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
771 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
772
773 rctx->complete = req->base.complete;
774 req->base.complete = compl;
775 return cryptd_enqueue_request(queue, &req->base);
776}
777
778static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
779{
780 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
781}
782
783static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
784{
785 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
786}
787
788static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
789{
790 struct aead_instance *inst = aead_alg_instance(tfm);
791 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
792 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
793 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
794 struct crypto_aead *cipher;
795
796 cipher = crypto_spawn_aead(spawn);
797 if (IS_ERR(cipher))
798 return PTR_ERR(cipher);
799
800 ctx->child = cipher;
801 crypto_aead_set_reqsize(
802 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
803 crypto_aead_reqsize(cipher)));
804 return 0;
805}
806
807static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
808{
809 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
810 crypto_free_aead(ctx->child);
811}
812
813static void cryptd_aead_free(struct aead_instance *inst)
814{
815 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
816
817 crypto_drop_aead(&ctx->aead_spawn);
818 kfree(inst);
819}
820
821static int cryptd_create_aead(struct crypto_template *tmpl,
822 struct rtattr **tb,
823 struct crypto_attr_type *algt,
824 struct cryptd_queue *queue)
825{
826 struct aead_instance_ctx *ctx;
827 struct aead_instance *inst;
828 struct aead_alg *alg;
829 u32 type;
830 u32 mask;
831 int err;
832
833 cryptd_type_and_mask(algt, &type, &mask);
834
835 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
836 if (!inst)
837 return -ENOMEM;
838
839 ctx = aead_instance_ctx(inst);
840 ctx->queue = queue;
841
842 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
843 crypto_attr_alg_name(tb[1]), type, mask);
844 if (err)
845 goto err_free_inst;
846
847 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
848 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
849 if (err)
850 goto err_free_inst;
851
852 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
853 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
854 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
855
856 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
857 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
858
859 inst->alg.init = cryptd_aead_init_tfm;
860 inst->alg.exit = cryptd_aead_exit_tfm;
861 inst->alg.setkey = cryptd_aead_setkey;
862 inst->alg.setauthsize = cryptd_aead_setauthsize;
863 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
864 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
865
866 inst->free = cryptd_aead_free;
867
868 err = aead_register_instance(tmpl, inst);
869 if (err) {
870err_free_inst:
871 cryptd_aead_free(inst);
872 }
873 return err;
874}
875
876static struct cryptd_queue queue;
877
878static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
879{
880 struct crypto_attr_type *algt;
881
882 algt = crypto_get_attr_type(tb);
883 if (IS_ERR(algt))
884 return PTR_ERR(algt);
885
886 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
887 case CRYPTO_ALG_TYPE_SKCIPHER:
888 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
889 case CRYPTO_ALG_TYPE_HASH:
890 return cryptd_create_hash(tmpl, tb, algt, &queue);
891 case CRYPTO_ALG_TYPE_AEAD:
892 return cryptd_create_aead(tmpl, tb, algt, &queue);
893 }
894
895 return -EINVAL;
896}
897
898static struct crypto_template cryptd_tmpl = {
899 .name = "cryptd",
900 .create = cryptd_create,
901 .module = THIS_MODULE,
902};
903
904struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
905 u32 type, u32 mask)
906{
907 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
908 struct cryptd_skcipher_ctx *ctx;
909 struct crypto_skcipher *tfm;
910
911 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
912 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
913 return ERR_PTR(-EINVAL);
914
915 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
916 if (IS_ERR(tfm))
917 return ERR_CAST(tfm);
918
919 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
920 crypto_free_skcipher(tfm);
921 return ERR_PTR(-EINVAL);
922 }
923
924 ctx = crypto_skcipher_ctx(tfm);
925 refcount_set(&ctx->refcnt, 1);
926
927 return container_of(tfm, struct cryptd_skcipher, base);
928}
929EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
930
931struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
932{
933 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
934
935 return &ctx->child->base;
936}
937EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
938
939bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
940{
941 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
942
943 return refcount_read(&ctx->refcnt) - 1;
944}
945EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
946
947void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
948{
949 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
950
951 if (refcount_dec_and_test(&ctx->refcnt))
952 crypto_free_skcipher(&tfm->base);
953}
954EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
955
956struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
957 u32 type, u32 mask)
958{
959 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
960 struct cryptd_hash_ctx *ctx;
961 struct crypto_ahash *tfm;
962
963 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
964 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
965 return ERR_PTR(-EINVAL);
966 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
967 if (IS_ERR(tfm))
968 return ERR_CAST(tfm);
969 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
970 crypto_free_ahash(tfm);
971 return ERR_PTR(-EINVAL);
972 }
973
974 ctx = crypto_ahash_ctx(tfm);
975 refcount_set(&ctx->refcnt, 1);
976
977 return __cryptd_ahash_cast(tfm);
978}
979EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
980
981struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
982{
983 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
984
985 return ctx->child;
986}
987EXPORT_SYMBOL_GPL(cryptd_ahash_child);
988
989struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
990{
991 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
992 return &rctx->desc;
993}
994EXPORT_SYMBOL_GPL(cryptd_shash_desc);
995
996bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
997{
998 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
999
1000 return refcount_read(&ctx->refcnt) - 1;
1001}
1002EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1003
1004void cryptd_free_ahash(struct cryptd_ahash *tfm)
1005{
1006 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1007
1008 if (refcount_dec_and_test(&ctx->refcnt))
1009 crypto_free_ahash(&tfm->base);
1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1012
1013struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1014 u32 type, u32 mask)
1015{
1016 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1017 struct cryptd_aead_ctx *ctx;
1018 struct crypto_aead *tfm;
1019
1020 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1021 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1022 return ERR_PTR(-EINVAL);
1023 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1024 if (IS_ERR(tfm))
1025 return ERR_CAST(tfm);
1026 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1027 crypto_free_aead(tfm);
1028 return ERR_PTR(-EINVAL);
1029 }
1030
1031 ctx = crypto_aead_ctx(tfm);
1032 refcount_set(&ctx->refcnt, 1);
1033
1034 return __cryptd_aead_cast(tfm);
1035}
1036EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1037
1038struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1039{
1040 struct cryptd_aead_ctx *ctx;
1041 ctx = crypto_aead_ctx(&tfm->base);
1042 return ctx->child;
1043}
1044EXPORT_SYMBOL_GPL(cryptd_aead_child);
1045
1046bool cryptd_aead_queued(struct cryptd_aead *tfm)
1047{
1048 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1049
1050 return refcount_read(&ctx->refcnt) - 1;
1051}
1052EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1053
1054void cryptd_free_aead(struct cryptd_aead *tfm)
1055{
1056 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1057
1058 if (refcount_dec_and_test(&ctx->refcnt))
1059 crypto_free_aead(&tfm->base);
1060}
1061EXPORT_SYMBOL_GPL(cryptd_free_aead);
1062
1063static int __init cryptd_init(void)
1064{
1065 int err;
1066
1067 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1068 1);
1069 if (!cryptd_wq)
1070 return -ENOMEM;
1071
1072 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1073 if (err)
1074 goto err_destroy_wq;
1075
1076 err = crypto_register_template(&cryptd_tmpl);
1077 if (err)
1078 goto err_fini_queue;
1079
1080 return 0;
1081
1082err_fini_queue:
1083 cryptd_fini_queue(&queue);
1084err_destroy_wq:
1085 destroy_workqueue(cryptd_wq);
1086 return err;
1087}
1088
1089static void __exit cryptd_exit(void)
1090{
1091 destroy_workqueue(cryptd_wq);
1092 cryptd_fini_queue(&queue);
1093 crypto_unregister_template(&cryptd_tmpl);
1094}
1095
1096subsys_initcall(cryptd_init);
1097module_exit(cryptd_exit);
1098
1099MODULE_LICENSE("GPL");
1100MODULE_DESCRIPTION("Software async crypto daemon");
1101MODULE_ALIAS_CRYPTO("cryptd");