Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 /*
43 * Protected by disabling BH to allow enqueueing from softinterrupt and
44 * dequeuing from kworker (cryptd_queue_worker()).
45 */
46 struct cryptd_cpu_queue __percpu *cpu_queue;
47};
48
49struct cryptd_instance_ctx {
50 struct crypto_spawn spawn;
51 struct cryptd_queue *queue;
52};
53
54struct skcipherd_instance_ctx {
55 struct crypto_skcipher_spawn spawn;
56 struct cryptd_queue *queue;
57};
58
59struct hashd_instance_ctx {
60 struct crypto_shash_spawn spawn;
61 struct cryptd_queue *queue;
62};
63
64struct aead_instance_ctx {
65 struct crypto_aead_spawn aead_spawn;
66 struct cryptd_queue *queue;
67};
68
69struct cryptd_skcipher_ctx {
70 refcount_t refcnt;
71 struct crypto_skcipher *child;
72};
73
74struct cryptd_skcipher_request_ctx {
75 crypto_completion_t complete;
76 struct skcipher_request req;
77};
78
79struct cryptd_hash_ctx {
80 refcount_t refcnt;
81 struct crypto_shash *child;
82};
83
84struct cryptd_hash_request_ctx {
85 crypto_completion_t complete;
86 struct shash_desc desc;
87};
88
89struct cryptd_aead_ctx {
90 refcount_t refcnt;
91 struct crypto_aead *child;
92};
93
94struct cryptd_aead_request_ctx {
95 crypto_completion_t complete;
96};
97
98static void cryptd_queue_worker(struct work_struct *work);
99
100static int cryptd_init_queue(struct cryptd_queue *queue,
101 unsigned int max_cpu_qlen)
102{
103 int cpu;
104 struct cryptd_cpu_queue *cpu_queue;
105
106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 if (!queue->cpu_queue)
108 return -ENOMEM;
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113 }
114 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115 return 0;
116}
117
118static void cryptd_fini_queue(struct cryptd_queue *queue)
119{
120 int cpu;
121 struct cryptd_cpu_queue *cpu_queue;
122
123 for_each_possible_cpu(cpu) {
124 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125 BUG_ON(cpu_queue->queue.qlen);
126 }
127 free_percpu(queue->cpu_queue);
128}
129
130static int cryptd_enqueue_request(struct cryptd_queue *queue,
131 struct crypto_async_request *request)
132{
133 int err;
134 struct cryptd_cpu_queue *cpu_queue;
135 refcount_t *refcnt;
136
137 local_bh_disable();
138 cpu_queue = this_cpu_ptr(queue->cpu_queue);
139 err = crypto_enqueue_request(&cpu_queue->queue, request);
140
141 refcnt = crypto_tfm_ctx(request->tfm);
142
143 if (err == -ENOSPC)
144 goto out;
145
146 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147
148 if (!refcount_read(refcnt))
149 goto out;
150
151 refcount_inc(refcnt);
152
153out:
154 local_bh_enable();
155
156 return err;
157}
158
159/* Called in workqueue context, do one real cryption work (via
160 * req->complete) and reschedule itself if there are more work to
161 * do. */
162static void cryptd_queue_worker(struct work_struct *work)
163{
164 struct cryptd_cpu_queue *cpu_queue;
165 struct crypto_async_request *req, *backlog;
166
167 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168 /*
169 * Only handle one request at a time to avoid hogging crypto workqueue.
170 */
171 local_bh_disable();
172 backlog = crypto_get_backlog(&cpu_queue->queue);
173 req = crypto_dequeue_request(&cpu_queue->queue);
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 backlog->complete(backlog, -EINPROGRESS);
181 req->complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_skcipher *child = ctx->child;
232
233 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_skcipher_setkey(child, key, keylen);
238}
239
240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
241{
242 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
243 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
244 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
245 int refcnt = refcount_read(&ctx->refcnt);
246
247 local_bh_disable();
248 rctx->complete(&req->base, err);
249 local_bh_enable();
250
251 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
252 crypto_free_skcipher(tfm);
253}
254
255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
256 int err)
257{
258 struct skcipher_request *req = skcipher_request_cast(base);
259 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
260 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
261 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
262 struct skcipher_request *subreq = &rctx->req;
263 struct crypto_skcipher *child = ctx->child;
264
265 if (unlikely(err == -EINPROGRESS))
266 goto out;
267
268 skcipher_request_set_tfm(subreq, child);
269 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
270 NULL, NULL);
271 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
272 req->iv);
273
274 err = crypto_skcipher_encrypt(subreq);
275 skcipher_request_zero(subreq);
276
277 req->base.complete = rctx->complete;
278
279out:
280 cryptd_skcipher_complete(req, err);
281}
282
283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
284 int err)
285{
286 struct skcipher_request *req = skcipher_request_cast(base);
287 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
288 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
290 struct skcipher_request *subreq = &rctx->req;
291 struct crypto_skcipher *child = ctx->child;
292
293 if (unlikely(err == -EINPROGRESS))
294 goto out;
295
296 skcipher_request_set_tfm(subreq, child);
297 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
298 NULL, NULL);
299 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
300 req->iv);
301
302 err = crypto_skcipher_decrypt(subreq);
303 skcipher_request_zero(subreq);
304
305 req->base.complete = rctx->complete;
306
307out:
308 cryptd_skcipher_complete(req, err);
309}
310
311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
312 crypto_completion_t compl)
313{
314 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
315 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 struct cryptd_queue *queue;
317
318 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
319 rctx->complete = req->base.complete;
320 req->base.complete = compl;
321
322 return cryptd_enqueue_request(queue, &req->base);
323}
324
325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
326{
327 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
328}
329
330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
331{
332 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
333}
334
335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
336{
337 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
338 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
339 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
340 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
341 struct crypto_skcipher *cipher;
342
343 cipher = crypto_spawn_skcipher(spawn);
344 if (IS_ERR(cipher))
345 return PTR_ERR(cipher);
346
347 ctx->child = cipher;
348 crypto_skcipher_set_reqsize(
349 tfm, sizeof(struct cryptd_skcipher_request_ctx) +
350 crypto_skcipher_reqsize(cipher));
351 return 0;
352}
353
354static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
355{
356 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
357
358 crypto_free_skcipher(ctx->child);
359}
360
361static void cryptd_skcipher_free(struct skcipher_instance *inst)
362{
363 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
364
365 crypto_drop_skcipher(&ctx->spawn);
366 kfree(inst);
367}
368
369static int cryptd_create_skcipher(struct crypto_template *tmpl,
370 struct rtattr **tb,
371 struct crypto_attr_type *algt,
372 struct cryptd_queue *queue)
373{
374 struct skcipherd_instance_ctx *ctx;
375 struct skcipher_instance *inst;
376 struct skcipher_alg *alg;
377 u32 type;
378 u32 mask;
379 int err;
380
381 cryptd_type_and_mask(algt, &type, &mask);
382
383 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
384 if (!inst)
385 return -ENOMEM;
386
387 ctx = skcipher_instance_ctx(inst);
388 ctx->queue = queue;
389
390 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
391 crypto_attr_alg_name(tb[1]), type, mask);
392 if (err)
393 goto err_free_inst;
394
395 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
396 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
397 if (err)
398 goto err_free_inst;
399
400 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
401 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
402 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
403 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
404 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
405 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
406
407 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
408
409 inst->alg.init = cryptd_skcipher_init_tfm;
410 inst->alg.exit = cryptd_skcipher_exit_tfm;
411
412 inst->alg.setkey = cryptd_skcipher_setkey;
413 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
414 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
415
416 inst->free = cryptd_skcipher_free;
417
418 err = skcipher_register_instance(tmpl, inst);
419 if (err) {
420err_free_inst:
421 cryptd_skcipher_free(inst);
422 }
423 return err;
424}
425
426static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
427{
428 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
429 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
430 struct crypto_shash_spawn *spawn = &ictx->spawn;
431 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
432 struct crypto_shash *hash;
433
434 hash = crypto_spawn_shash(spawn);
435 if (IS_ERR(hash))
436 return PTR_ERR(hash);
437
438 ctx->child = hash;
439 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
440 sizeof(struct cryptd_hash_request_ctx) +
441 crypto_shash_descsize(hash));
442 return 0;
443}
444
445static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
446{
447 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
448
449 crypto_free_shash(ctx->child);
450}
451
452static int cryptd_hash_setkey(struct crypto_ahash *parent,
453 const u8 *key, unsigned int keylen)
454{
455 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
456 struct crypto_shash *child = ctx->child;
457
458 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
459 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
460 CRYPTO_TFM_REQ_MASK);
461 return crypto_shash_setkey(child, key, keylen);
462}
463
464static int cryptd_hash_enqueue(struct ahash_request *req,
465 crypto_completion_t compl)
466{
467 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
468 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
469 struct cryptd_queue *queue =
470 cryptd_get_queue(crypto_ahash_tfm(tfm));
471
472 rctx->complete = req->base.complete;
473 req->base.complete = compl;
474
475 return cryptd_enqueue_request(queue, &req->base);
476}
477
478static void cryptd_hash_complete(struct ahash_request *req, int err)
479{
480 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
481 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
482 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
483 int refcnt = refcount_read(&ctx->refcnt);
484
485 local_bh_disable();
486 rctx->complete(&req->base, err);
487 local_bh_enable();
488
489 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
490 crypto_free_ahash(tfm);
491}
492
493static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
494{
495 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
496 struct crypto_shash *child = ctx->child;
497 struct ahash_request *req = ahash_request_cast(req_async);
498 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
499 struct shash_desc *desc = &rctx->desc;
500
501 if (unlikely(err == -EINPROGRESS))
502 goto out;
503
504 desc->tfm = child;
505
506 err = crypto_shash_init(desc);
507
508 req->base.complete = rctx->complete;
509
510out:
511 cryptd_hash_complete(req, err);
512}
513
514static int cryptd_hash_init_enqueue(struct ahash_request *req)
515{
516 return cryptd_hash_enqueue(req, cryptd_hash_init);
517}
518
519static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
520{
521 struct ahash_request *req = ahash_request_cast(req_async);
522 struct cryptd_hash_request_ctx *rctx;
523
524 rctx = ahash_request_ctx(req);
525
526 if (unlikely(err == -EINPROGRESS))
527 goto out;
528
529 err = shash_ahash_update(req, &rctx->desc);
530
531 req->base.complete = rctx->complete;
532
533out:
534 cryptd_hash_complete(req, err);
535}
536
537static int cryptd_hash_update_enqueue(struct ahash_request *req)
538{
539 return cryptd_hash_enqueue(req, cryptd_hash_update);
540}
541
542static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
543{
544 struct ahash_request *req = ahash_request_cast(req_async);
545 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
546
547 if (unlikely(err == -EINPROGRESS))
548 goto out;
549
550 err = crypto_shash_final(&rctx->desc, req->result);
551
552 req->base.complete = rctx->complete;
553
554out:
555 cryptd_hash_complete(req, err);
556}
557
558static int cryptd_hash_final_enqueue(struct ahash_request *req)
559{
560 return cryptd_hash_enqueue(req, cryptd_hash_final);
561}
562
563static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
564{
565 struct ahash_request *req = ahash_request_cast(req_async);
566 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
567
568 if (unlikely(err == -EINPROGRESS))
569 goto out;
570
571 err = shash_ahash_finup(req, &rctx->desc);
572
573 req->base.complete = rctx->complete;
574
575out:
576 cryptd_hash_complete(req, err);
577}
578
579static int cryptd_hash_finup_enqueue(struct ahash_request *req)
580{
581 return cryptd_hash_enqueue(req, cryptd_hash_finup);
582}
583
584static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
585{
586 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
587 struct crypto_shash *child = ctx->child;
588 struct ahash_request *req = ahash_request_cast(req_async);
589 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
590 struct shash_desc *desc = &rctx->desc;
591
592 if (unlikely(err == -EINPROGRESS))
593 goto out;
594
595 desc->tfm = child;
596
597 err = shash_ahash_digest(req, desc);
598
599 req->base.complete = rctx->complete;
600
601out:
602 cryptd_hash_complete(req, err);
603}
604
605static int cryptd_hash_digest_enqueue(struct ahash_request *req)
606{
607 return cryptd_hash_enqueue(req, cryptd_hash_digest);
608}
609
610static int cryptd_hash_export(struct ahash_request *req, void *out)
611{
612 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
613
614 return crypto_shash_export(&rctx->desc, out);
615}
616
617static int cryptd_hash_import(struct ahash_request *req, const void *in)
618{
619 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
620 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
621 struct shash_desc *desc = cryptd_shash_desc(req);
622
623 desc->tfm = ctx->child;
624
625 return crypto_shash_import(desc, in);
626}
627
628static void cryptd_hash_free(struct ahash_instance *inst)
629{
630 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
631
632 crypto_drop_shash(&ctx->spawn);
633 kfree(inst);
634}
635
636static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
637 struct crypto_attr_type *algt,
638 struct cryptd_queue *queue)
639{
640 struct hashd_instance_ctx *ctx;
641 struct ahash_instance *inst;
642 struct shash_alg *alg;
643 u32 type;
644 u32 mask;
645 int err;
646
647 cryptd_type_and_mask(algt, &type, &mask);
648
649 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
650 if (!inst)
651 return -ENOMEM;
652
653 ctx = ahash_instance_ctx(inst);
654 ctx->queue = queue;
655
656 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
657 crypto_attr_alg_name(tb[1]), type, mask);
658 if (err)
659 goto err_free_inst;
660 alg = crypto_spawn_shash_alg(&ctx->spawn);
661
662 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
663 if (err)
664 goto err_free_inst;
665
666 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
667 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
668 CRYPTO_ALG_OPTIONAL_KEY));
669 inst->alg.halg.digestsize = alg->digestsize;
670 inst->alg.halg.statesize = alg->statesize;
671 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
672
673 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
674 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
675
676 inst->alg.init = cryptd_hash_init_enqueue;
677 inst->alg.update = cryptd_hash_update_enqueue;
678 inst->alg.final = cryptd_hash_final_enqueue;
679 inst->alg.finup = cryptd_hash_finup_enqueue;
680 inst->alg.export = cryptd_hash_export;
681 inst->alg.import = cryptd_hash_import;
682 if (crypto_shash_alg_has_setkey(alg))
683 inst->alg.setkey = cryptd_hash_setkey;
684 inst->alg.digest = cryptd_hash_digest_enqueue;
685
686 inst->free = cryptd_hash_free;
687
688 err = ahash_register_instance(tmpl, inst);
689 if (err) {
690err_free_inst:
691 cryptd_hash_free(inst);
692 }
693 return err;
694}
695
696static int cryptd_aead_setkey(struct crypto_aead *parent,
697 const u8 *key, unsigned int keylen)
698{
699 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
700 struct crypto_aead *child = ctx->child;
701
702 return crypto_aead_setkey(child, key, keylen);
703}
704
705static int cryptd_aead_setauthsize(struct crypto_aead *parent,
706 unsigned int authsize)
707{
708 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
709 struct crypto_aead *child = ctx->child;
710
711 return crypto_aead_setauthsize(child, authsize);
712}
713
714static void cryptd_aead_crypt(struct aead_request *req,
715 struct crypto_aead *child,
716 int err,
717 int (*crypt)(struct aead_request *req))
718{
719 struct cryptd_aead_request_ctx *rctx;
720 struct cryptd_aead_ctx *ctx;
721 crypto_completion_t compl;
722 struct crypto_aead *tfm;
723 int refcnt;
724
725 rctx = aead_request_ctx(req);
726 compl = rctx->complete;
727
728 tfm = crypto_aead_reqtfm(req);
729
730 if (unlikely(err == -EINPROGRESS))
731 goto out;
732 aead_request_set_tfm(req, child);
733 err = crypt( req );
734
735out:
736 ctx = crypto_aead_ctx(tfm);
737 refcnt = refcount_read(&ctx->refcnt);
738
739 local_bh_disable();
740 compl(&req->base, err);
741 local_bh_enable();
742
743 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
744 crypto_free_aead(tfm);
745}
746
747static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
748{
749 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
750 struct crypto_aead *child = ctx->child;
751 struct aead_request *req;
752
753 req = container_of(areq, struct aead_request, base);
754 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
755}
756
757static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
758{
759 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
760 struct crypto_aead *child = ctx->child;
761 struct aead_request *req;
762
763 req = container_of(areq, struct aead_request, base);
764 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
765}
766
767static int cryptd_aead_enqueue(struct aead_request *req,
768 crypto_completion_t compl)
769{
770 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
771 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
772 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
773
774 rctx->complete = req->base.complete;
775 req->base.complete = compl;
776 return cryptd_enqueue_request(queue, &req->base);
777}
778
779static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
780{
781 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
782}
783
784static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
785{
786 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
787}
788
789static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
790{
791 struct aead_instance *inst = aead_alg_instance(tfm);
792 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
793 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
794 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
795 struct crypto_aead *cipher;
796
797 cipher = crypto_spawn_aead(spawn);
798 if (IS_ERR(cipher))
799 return PTR_ERR(cipher);
800
801 ctx->child = cipher;
802 crypto_aead_set_reqsize(
803 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
804 crypto_aead_reqsize(cipher)));
805 return 0;
806}
807
808static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
809{
810 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
811 crypto_free_aead(ctx->child);
812}
813
814static void cryptd_aead_free(struct aead_instance *inst)
815{
816 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
817
818 crypto_drop_aead(&ctx->aead_spawn);
819 kfree(inst);
820}
821
822static int cryptd_create_aead(struct crypto_template *tmpl,
823 struct rtattr **tb,
824 struct crypto_attr_type *algt,
825 struct cryptd_queue *queue)
826{
827 struct aead_instance_ctx *ctx;
828 struct aead_instance *inst;
829 struct aead_alg *alg;
830 u32 type;
831 u32 mask;
832 int err;
833
834 cryptd_type_and_mask(algt, &type, &mask);
835
836 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
837 if (!inst)
838 return -ENOMEM;
839
840 ctx = aead_instance_ctx(inst);
841 ctx->queue = queue;
842
843 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
844 crypto_attr_alg_name(tb[1]), type, mask);
845 if (err)
846 goto err_free_inst;
847
848 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
849 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
850 if (err)
851 goto err_free_inst;
852
853 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
854 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
855 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
856
857 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
858 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
859
860 inst->alg.init = cryptd_aead_init_tfm;
861 inst->alg.exit = cryptd_aead_exit_tfm;
862 inst->alg.setkey = cryptd_aead_setkey;
863 inst->alg.setauthsize = cryptd_aead_setauthsize;
864 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
865 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
866
867 inst->free = cryptd_aead_free;
868
869 err = aead_register_instance(tmpl, inst);
870 if (err) {
871err_free_inst:
872 cryptd_aead_free(inst);
873 }
874 return err;
875}
876
877static struct cryptd_queue queue;
878
879static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
880{
881 struct crypto_attr_type *algt;
882
883 algt = crypto_get_attr_type(tb);
884 if (IS_ERR(algt))
885 return PTR_ERR(algt);
886
887 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
888 case CRYPTO_ALG_TYPE_SKCIPHER:
889 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
890 case CRYPTO_ALG_TYPE_HASH:
891 return cryptd_create_hash(tmpl, tb, algt, &queue);
892 case CRYPTO_ALG_TYPE_AEAD:
893 return cryptd_create_aead(tmpl, tb, algt, &queue);
894 }
895
896 return -EINVAL;
897}
898
899static struct crypto_template cryptd_tmpl = {
900 .name = "cryptd",
901 .create = cryptd_create,
902 .module = THIS_MODULE,
903};
904
905struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
906 u32 type, u32 mask)
907{
908 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
909 struct cryptd_skcipher_ctx *ctx;
910 struct crypto_skcipher *tfm;
911
912 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
913 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
914 return ERR_PTR(-EINVAL);
915
916 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
917 if (IS_ERR(tfm))
918 return ERR_CAST(tfm);
919
920 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
921 crypto_free_skcipher(tfm);
922 return ERR_PTR(-EINVAL);
923 }
924
925 ctx = crypto_skcipher_ctx(tfm);
926 refcount_set(&ctx->refcnt, 1);
927
928 return container_of(tfm, struct cryptd_skcipher, base);
929}
930EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
931
932struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
933{
934 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
935
936 return ctx->child;
937}
938EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
939
940bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
941{
942 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
943
944 return refcount_read(&ctx->refcnt) - 1;
945}
946EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
947
948void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
949{
950 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
951
952 if (refcount_dec_and_test(&ctx->refcnt))
953 crypto_free_skcipher(&tfm->base);
954}
955EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
956
957struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
958 u32 type, u32 mask)
959{
960 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
961 struct cryptd_hash_ctx *ctx;
962 struct crypto_ahash *tfm;
963
964 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
965 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
966 return ERR_PTR(-EINVAL);
967 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
968 if (IS_ERR(tfm))
969 return ERR_CAST(tfm);
970 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
971 crypto_free_ahash(tfm);
972 return ERR_PTR(-EINVAL);
973 }
974
975 ctx = crypto_ahash_ctx(tfm);
976 refcount_set(&ctx->refcnt, 1);
977
978 return __cryptd_ahash_cast(tfm);
979}
980EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
981
982struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
983{
984 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
985
986 return ctx->child;
987}
988EXPORT_SYMBOL_GPL(cryptd_ahash_child);
989
990struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
991{
992 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
993 return &rctx->desc;
994}
995EXPORT_SYMBOL_GPL(cryptd_shash_desc);
996
997bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
998{
999 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1000
1001 return refcount_read(&ctx->refcnt) - 1;
1002}
1003EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1004
1005void cryptd_free_ahash(struct cryptd_ahash *tfm)
1006{
1007 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1008
1009 if (refcount_dec_and_test(&ctx->refcnt))
1010 crypto_free_ahash(&tfm->base);
1011}
1012EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1013
1014struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1015 u32 type, u32 mask)
1016{
1017 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1018 struct cryptd_aead_ctx *ctx;
1019 struct crypto_aead *tfm;
1020
1021 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1022 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1023 return ERR_PTR(-EINVAL);
1024 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1025 if (IS_ERR(tfm))
1026 return ERR_CAST(tfm);
1027 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1028 crypto_free_aead(tfm);
1029 return ERR_PTR(-EINVAL);
1030 }
1031
1032 ctx = crypto_aead_ctx(tfm);
1033 refcount_set(&ctx->refcnt, 1);
1034
1035 return __cryptd_aead_cast(tfm);
1036}
1037EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1038
1039struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1040{
1041 struct cryptd_aead_ctx *ctx;
1042 ctx = crypto_aead_ctx(&tfm->base);
1043 return ctx->child;
1044}
1045EXPORT_SYMBOL_GPL(cryptd_aead_child);
1046
1047bool cryptd_aead_queued(struct cryptd_aead *tfm)
1048{
1049 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1050
1051 return refcount_read(&ctx->refcnt) - 1;
1052}
1053EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1054
1055void cryptd_free_aead(struct cryptd_aead *tfm)
1056{
1057 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1058
1059 if (refcount_dec_and_test(&ctx->refcnt))
1060 crypto_free_aead(&tfm->base);
1061}
1062EXPORT_SYMBOL_GPL(cryptd_free_aead);
1063
1064static int __init cryptd_init(void)
1065{
1066 int err;
1067
1068 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1069 1);
1070 if (!cryptd_wq)
1071 return -ENOMEM;
1072
1073 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1074 if (err)
1075 goto err_destroy_wq;
1076
1077 err = crypto_register_template(&cryptd_tmpl);
1078 if (err)
1079 goto err_fini_queue;
1080
1081 return 0;
1082
1083err_fini_queue:
1084 cryptd_fini_queue(&queue);
1085err_destroy_wq:
1086 destroy_workqueue(cryptd_wq);
1087 return err;
1088}
1089
1090static void __exit cryptd_exit(void)
1091{
1092 destroy_workqueue(cryptd_wq);
1093 cryptd_fini_queue(&queue);
1094 crypto_unregister_template(&cryptd_tmpl);
1095}
1096
1097subsys_initcall(cryptd_init);
1098module_exit(cryptd_exit);
1099
1100MODULE_LICENSE("GPL");
1101MODULE_DESCRIPTION("Software async crypto daemon");
1102MODULE_ALIAS_CRYPTO("cryptd");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 /*
43 * Protected by disabling BH to allow enqueueing from softinterrupt and
44 * dequeuing from kworker (cryptd_queue_worker()).
45 */
46 struct cryptd_cpu_queue __percpu *cpu_queue;
47};
48
49struct cryptd_instance_ctx {
50 struct crypto_spawn spawn;
51 struct cryptd_queue *queue;
52};
53
54struct skcipherd_instance_ctx {
55 struct crypto_skcipher_spawn spawn;
56 struct cryptd_queue *queue;
57};
58
59struct hashd_instance_ctx {
60 struct crypto_shash_spawn spawn;
61 struct cryptd_queue *queue;
62};
63
64struct aead_instance_ctx {
65 struct crypto_aead_spawn aead_spawn;
66 struct cryptd_queue *queue;
67};
68
69struct cryptd_skcipher_ctx {
70 refcount_t refcnt;
71 struct crypto_skcipher *child;
72};
73
74struct cryptd_skcipher_request_ctx {
75 struct skcipher_request req;
76};
77
78struct cryptd_hash_ctx {
79 refcount_t refcnt;
80 struct crypto_shash *child;
81};
82
83struct cryptd_hash_request_ctx {
84 crypto_completion_t complete;
85 void *data;
86 struct shash_desc desc;
87};
88
89struct cryptd_aead_ctx {
90 refcount_t refcnt;
91 struct crypto_aead *child;
92};
93
94struct cryptd_aead_request_ctx {
95 struct aead_request req;
96};
97
98static void cryptd_queue_worker(struct work_struct *work);
99
100static int cryptd_init_queue(struct cryptd_queue *queue,
101 unsigned int max_cpu_qlen)
102{
103 int cpu;
104 struct cryptd_cpu_queue *cpu_queue;
105
106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 if (!queue->cpu_queue)
108 return -ENOMEM;
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113 }
114 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115 return 0;
116}
117
118static void cryptd_fini_queue(struct cryptd_queue *queue)
119{
120 int cpu;
121 struct cryptd_cpu_queue *cpu_queue;
122
123 for_each_possible_cpu(cpu) {
124 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125 BUG_ON(cpu_queue->queue.qlen);
126 }
127 free_percpu(queue->cpu_queue);
128}
129
130static int cryptd_enqueue_request(struct cryptd_queue *queue,
131 struct crypto_async_request *request)
132{
133 int err;
134 struct cryptd_cpu_queue *cpu_queue;
135 refcount_t *refcnt;
136
137 local_bh_disable();
138 cpu_queue = this_cpu_ptr(queue->cpu_queue);
139 err = crypto_enqueue_request(&cpu_queue->queue, request);
140
141 refcnt = crypto_tfm_ctx(request->tfm);
142
143 if (err == -ENOSPC)
144 goto out;
145
146 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147
148 if (!refcount_read(refcnt))
149 goto out;
150
151 refcount_inc(refcnt);
152
153out:
154 local_bh_enable();
155
156 return err;
157}
158
159/* Called in workqueue context, do one real cryption work (via
160 * req->complete) and reschedule itself if there are more work to
161 * do. */
162static void cryptd_queue_worker(struct work_struct *work)
163{
164 struct cryptd_cpu_queue *cpu_queue;
165 struct crypto_async_request *req, *backlog;
166
167 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168 /*
169 * Only handle one request at a time to avoid hogging crypto workqueue.
170 */
171 local_bh_disable();
172 backlog = crypto_get_backlog(&cpu_queue->queue);
173 req = crypto_dequeue_request(&cpu_queue->queue);
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 crypto_request_complete(backlog, -EINPROGRESS);
181 crypto_request_complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_skcipher *child = ctx->child;
232
233 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_skcipher_setkey(child, key, keylen);
238}
239
240static struct skcipher_request *cryptd_skcipher_prepare(
241 struct skcipher_request *req, int err)
242{
243 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
244 struct skcipher_request *subreq = &rctx->req;
245 struct cryptd_skcipher_ctx *ctx;
246 struct crypto_skcipher *child;
247
248 req->base.complete = subreq->base.complete;
249 req->base.data = subreq->base.data;
250
251 if (unlikely(err == -EINPROGRESS))
252 return NULL;
253
254 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
255 child = ctx->child;
256
257 skcipher_request_set_tfm(subreq, child);
258 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
259 NULL, NULL);
260 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
261 req->iv);
262
263 return subreq;
264}
265
266static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
267 crypto_completion_t complete)
268{
269 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
270 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
271 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
272 struct skcipher_request *subreq = &rctx->req;
273 int refcnt = refcount_read(&ctx->refcnt);
274
275 local_bh_disable();
276 skcipher_request_complete(req, err);
277 local_bh_enable();
278
279 if (unlikely(err == -EINPROGRESS)) {
280 subreq->base.complete = req->base.complete;
281 subreq->base.data = req->base.data;
282 req->base.complete = complete;
283 req->base.data = req;
284 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
285 crypto_free_skcipher(tfm);
286}
287
288static void cryptd_skcipher_encrypt(void *data, int err)
289{
290 struct skcipher_request *req = data;
291 struct skcipher_request *subreq;
292
293 subreq = cryptd_skcipher_prepare(req, err);
294 if (likely(subreq))
295 err = crypto_skcipher_encrypt(subreq);
296
297 cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
298}
299
300static void cryptd_skcipher_decrypt(void *data, int err)
301{
302 struct skcipher_request *req = data;
303 struct skcipher_request *subreq;
304
305 subreq = cryptd_skcipher_prepare(req, err);
306 if (likely(subreq))
307 err = crypto_skcipher_decrypt(subreq);
308
309 cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
310}
311
312static int cryptd_skcipher_enqueue(struct skcipher_request *req,
313 crypto_completion_t compl)
314{
315 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
316 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
317 struct skcipher_request *subreq = &rctx->req;
318 struct cryptd_queue *queue;
319
320 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
321 subreq->base.complete = req->base.complete;
322 subreq->base.data = req->base.data;
323 req->base.complete = compl;
324 req->base.data = req;
325
326 return cryptd_enqueue_request(queue, &req->base);
327}
328
329static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
330{
331 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
332}
333
334static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
335{
336 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
337}
338
339static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
340{
341 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
342 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
343 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
344 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
345 struct crypto_skcipher *cipher;
346
347 cipher = crypto_spawn_skcipher(spawn);
348 if (IS_ERR(cipher))
349 return PTR_ERR(cipher);
350
351 ctx->child = cipher;
352 crypto_skcipher_set_reqsize(
353 tfm, sizeof(struct cryptd_skcipher_request_ctx) +
354 crypto_skcipher_reqsize(cipher));
355 return 0;
356}
357
358static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
359{
360 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
361
362 crypto_free_skcipher(ctx->child);
363}
364
365static void cryptd_skcipher_free(struct skcipher_instance *inst)
366{
367 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
368
369 crypto_drop_skcipher(&ctx->spawn);
370 kfree(inst);
371}
372
373static int cryptd_create_skcipher(struct crypto_template *tmpl,
374 struct rtattr **tb,
375 struct crypto_attr_type *algt,
376 struct cryptd_queue *queue)
377{
378 struct skcipherd_instance_ctx *ctx;
379 struct skcipher_instance *inst;
380 struct skcipher_alg_common *alg;
381 u32 type;
382 u32 mask;
383 int err;
384
385 cryptd_type_and_mask(algt, &type, &mask);
386
387 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
388 if (!inst)
389 return -ENOMEM;
390
391 ctx = skcipher_instance_ctx(inst);
392 ctx->queue = queue;
393
394 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
395 crypto_attr_alg_name(tb[1]), type, mask);
396 if (err)
397 goto err_free_inst;
398
399 alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
400 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
401 if (err)
402 goto err_free_inst;
403
404 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
405 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
406 inst->alg.ivsize = alg->ivsize;
407 inst->alg.chunksize = alg->chunksize;
408 inst->alg.min_keysize = alg->min_keysize;
409 inst->alg.max_keysize = alg->max_keysize;
410
411 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
412
413 inst->alg.init = cryptd_skcipher_init_tfm;
414 inst->alg.exit = cryptd_skcipher_exit_tfm;
415
416 inst->alg.setkey = cryptd_skcipher_setkey;
417 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
418 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
419
420 inst->free = cryptd_skcipher_free;
421
422 err = skcipher_register_instance(tmpl, inst);
423 if (err) {
424err_free_inst:
425 cryptd_skcipher_free(inst);
426 }
427 return err;
428}
429
430static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
431{
432 struct ahash_instance *inst = ahash_alg_instance(tfm);
433 struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
434 struct crypto_shash_spawn *spawn = &ictx->spawn;
435 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
436 struct crypto_shash *hash;
437
438 hash = crypto_spawn_shash(spawn);
439 if (IS_ERR(hash))
440 return PTR_ERR(hash);
441
442 ctx->child = hash;
443 crypto_ahash_set_reqsize(tfm,
444 sizeof(struct cryptd_hash_request_ctx) +
445 crypto_shash_descsize(hash));
446 return 0;
447}
448
449static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
450 struct crypto_ahash *tfm)
451{
452 struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
453 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
454 struct crypto_shash *hash;
455
456 hash = crypto_clone_shash(ctx->child);
457 if (IS_ERR(hash))
458 return PTR_ERR(hash);
459
460 nctx->child = hash;
461 return 0;
462}
463
464static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
465{
466 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
467
468 crypto_free_shash(ctx->child);
469}
470
471static int cryptd_hash_setkey(struct crypto_ahash *parent,
472 const u8 *key, unsigned int keylen)
473{
474 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
475 struct crypto_shash *child = ctx->child;
476
477 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
478 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
479 CRYPTO_TFM_REQ_MASK);
480 return crypto_shash_setkey(child, key, keylen);
481}
482
483static int cryptd_hash_enqueue(struct ahash_request *req,
484 crypto_completion_t compl)
485{
486 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
487 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
488 struct cryptd_queue *queue =
489 cryptd_get_queue(crypto_ahash_tfm(tfm));
490
491 rctx->complete = req->base.complete;
492 rctx->data = req->base.data;
493 req->base.complete = compl;
494 req->base.data = req;
495
496 return cryptd_enqueue_request(queue, &req->base);
497}
498
499static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
500 int err)
501{
502 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
503
504 req->base.complete = rctx->complete;
505 req->base.data = rctx->data;
506
507 if (unlikely(err == -EINPROGRESS))
508 return NULL;
509
510 return &rctx->desc;
511}
512
513static void cryptd_hash_complete(struct ahash_request *req, int err,
514 crypto_completion_t complete)
515{
516 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
517 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
518 int refcnt = refcount_read(&ctx->refcnt);
519
520 local_bh_disable();
521 ahash_request_complete(req, err);
522 local_bh_enable();
523
524 if (err == -EINPROGRESS) {
525 req->base.complete = complete;
526 req->base.data = req;
527 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
528 crypto_free_ahash(tfm);
529}
530
531static void cryptd_hash_init(void *data, int err)
532{
533 struct ahash_request *req = data;
534 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
535 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
536 struct crypto_shash *child = ctx->child;
537 struct shash_desc *desc;
538
539 desc = cryptd_hash_prepare(req, err);
540 if (unlikely(!desc))
541 goto out;
542
543 desc->tfm = child;
544
545 err = crypto_shash_init(desc);
546
547out:
548 cryptd_hash_complete(req, err, cryptd_hash_init);
549}
550
551static int cryptd_hash_init_enqueue(struct ahash_request *req)
552{
553 return cryptd_hash_enqueue(req, cryptd_hash_init);
554}
555
556static void cryptd_hash_update(void *data, int err)
557{
558 struct ahash_request *req = data;
559 struct shash_desc *desc;
560
561 desc = cryptd_hash_prepare(req, err);
562 if (likely(desc))
563 err = shash_ahash_update(req, desc);
564
565 cryptd_hash_complete(req, err, cryptd_hash_update);
566}
567
568static int cryptd_hash_update_enqueue(struct ahash_request *req)
569{
570 return cryptd_hash_enqueue(req, cryptd_hash_update);
571}
572
573static void cryptd_hash_final(void *data, int err)
574{
575 struct ahash_request *req = data;
576 struct shash_desc *desc;
577
578 desc = cryptd_hash_prepare(req, err);
579 if (likely(desc))
580 err = crypto_shash_final(desc, req->result);
581
582 cryptd_hash_complete(req, err, cryptd_hash_final);
583}
584
585static int cryptd_hash_final_enqueue(struct ahash_request *req)
586{
587 return cryptd_hash_enqueue(req, cryptd_hash_final);
588}
589
590static void cryptd_hash_finup(void *data, int err)
591{
592 struct ahash_request *req = data;
593 struct shash_desc *desc;
594
595 desc = cryptd_hash_prepare(req, err);
596 if (likely(desc))
597 err = shash_ahash_finup(req, desc);
598
599 cryptd_hash_complete(req, err, cryptd_hash_finup);
600}
601
602static int cryptd_hash_finup_enqueue(struct ahash_request *req)
603{
604 return cryptd_hash_enqueue(req, cryptd_hash_finup);
605}
606
607static void cryptd_hash_digest(void *data, int err)
608{
609 struct ahash_request *req = data;
610 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
611 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
612 struct crypto_shash *child = ctx->child;
613 struct shash_desc *desc;
614
615 desc = cryptd_hash_prepare(req, err);
616 if (unlikely(!desc))
617 goto out;
618
619 desc->tfm = child;
620
621 err = shash_ahash_digest(req, desc);
622
623out:
624 cryptd_hash_complete(req, err, cryptd_hash_digest);
625}
626
627static int cryptd_hash_digest_enqueue(struct ahash_request *req)
628{
629 return cryptd_hash_enqueue(req, cryptd_hash_digest);
630}
631
632static int cryptd_hash_export(struct ahash_request *req, void *out)
633{
634 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635
636 return crypto_shash_export(&rctx->desc, out);
637}
638
639static int cryptd_hash_import(struct ahash_request *req, const void *in)
640{
641 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
642 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
643 struct shash_desc *desc = cryptd_shash_desc(req);
644
645 desc->tfm = ctx->child;
646
647 return crypto_shash_import(desc, in);
648}
649
650static void cryptd_hash_free(struct ahash_instance *inst)
651{
652 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
653
654 crypto_drop_shash(&ctx->spawn);
655 kfree(inst);
656}
657
658static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
659 struct crypto_attr_type *algt,
660 struct cryptd_queue *queue)
661{
662 struct hashd_instance_ctx *ctx;
663 struct ahash_instance *inst;
664 struct shash_alg *alg;
665 u32 type;
666 u32 mask;
667 int err;
668
669 cryptd_type_and_mask(algt, &type, &mask);
670
671 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
672 if (!inst)
673 return -ENOMEM;
674
675 ctx = ahash_instance_ctx(inst);
676 ctx->queue = queue;
677
678 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
679 crypto_attr_alg_name(tb[1]), type, mask);
680 if (err)
681 goto err_free_inst;
682 alg = crypto_spawn_shash_alg(&ctx->spawn);
683
684 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
685 if (err)
686 goto err_free_inst;
687
688 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
689 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
690 CRYPTO_ALG_OPTIONAL_KEY));
691 inst->alg.halg.digestsize = alg->digestsize;
692 inst->alg.halg.statesize = alg->statesize;
693 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
694
695 inst->alg.init_tfm = cryptd_hash_init_tfm;
696 inst->alg.clone_tfm = cryptd_hash_clone_tfm;
697 inst->alg.exit_tfm = cryptd_hash_exit_tfm;
698
699 inst->alg.init = cryptd_hash_init_enqueue;
700 inst->alg.update = cryptd_hash_update_enqueue;
701 inst->alg.final = cryptd_hash_final_enqueue;
702 inst->alg.finup = cryptd_hash_finup_enqueue;
703 inst->alg.export = cryptd_hash_export;
704 inst->alg.import = cryptd_hash_import;
705 if (crypto_shash_alg_has_setkey(alg))
706 inst->alg.setkey = cryptd_hash_setkey;
707 inst->alg.digest = cryptd_hash_digest_enqueue;
708
709 inst->free = cryptd_hash_free;
710
711 err = ahash_register_instance(tmpl, inst);
712 if (err) {
713err_free_inst:
714 cryptd_hash_free(inst);
715 }
716 return err;
717}
718
719static int cryptd_aead_setkey(struct crypto_aead *parent,
720 const u8 *key, unsigned int keylen)
721{
722 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
723 struct crypto_aead *child = ctx->child;
724
725 return crypto_aead_setkey(child, key, keylen);
726}
727
728static int cryptd_aead_setauthsize(struct crypto_aead *parent,
729 unsigned int authsize)
730{
731 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
732 struct crypto_aead *child = ctx->child;
733
734 return crypto_aead_setauthsize(child, authsize);
735}
736
737static void cryptd_aead_crypt(struct aead_request *req,
738 struct crypto_aead *child, int err,
739 int (*crypt)(struct aead_request *req),
740 crypto_completion_t compl)
741{
742 struct cryptd_aead_request_ctx *rctx;
743 struct aead_request *subreq;
744 struct cryptd_aead_ctx *ctx;
745 struct crypto_aead *tfm;
746 int refcnt;
747
748 rctx = aead_request_ctx(req);
749 subreq = &rctx->req;
750 req->base.complete = subreq->base.complete;
751 req->base.data = subreq->base.data;
752
753 tfm = crypto_aead_reqtfm(req);
754
755 if (unlikely(err == -EINPROGRESS))
756 goto out;
757
758 aead_request_set_tfm(subreq, child);
759 aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
760 NULL, NULL);
761 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
762 req->iv);
763 aead_request_set_ad(subreq, req->assoclen);
764
765 err = crypt(subreq);
766
767out:
768 ctx = crypto_aead_ctx(tfm);
769 refcnt = refcount_read(&ctx->refcnt);
770
771 local_bh_disable();
772 aead_request_complete(req, err);
773 local_bh_enable();
774
775 if (err == -EINPROGRESS) {
776 subreq->base.complete = req->base.complete;
777 subreq->base.data = req->base.data;
778 req->base.complete = compl;
779 req->base.data = req;
780 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
781 crypto_free_aead(tfm);
782}
783
784static void cryptd_aead_encrypt(void *data, int err)
785{
786 struct aead_request *req = data;
787 struct cryptd_aead_ctx *ctx;
788 struct crypto_aead *child;
789
790 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
791 child = ctx->child;
792 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
793 cryptd_aead_encrypt);
794}
795
796static void cryptd_aead_decrypt(void *data, int err)
797{
798 struct aead_request *req = data;
799 struct cryptd_aead_ctx *ctx;
800 struct crypto_aead *child;
801
802 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
803 child = ctx->child;
804 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
805 cryptd_aead_decrypt);
806}
807
808static int cryptd_aead_enqueue(struct aead_request *req,
809 crypto_completion_t compl)
810{
811 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
812 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
813 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
814 struct aead_request *subreq = &rctx->req;
815
816 subreq->base.complete = req->base.complete;
817 subreq->base.data = req->base.data;
818 req->base.complete = compl;
819 req->base.data = req;
820 return cryptd_enqueue_request(queue, &req->base);
821}
822
823static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
824{
825 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
826}
827
828static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
829{
830 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
831}
832
833static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
834{
835 struct aead_instance *inst = aead_alg_instance(tfm);
836 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
837 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
838 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
839 struct crypto_aead *cipher;
840
841 cipher = crypto_spawn_aead(spawn);
842 if (IS_ERR(cipher))
843 return PTR_ERR(cipher);
844
845 ctx->child = cipher;
846 crypto_aead_set_reqsize(
847 tfm, sizeof(struct cryptd_aead_request_ctx) +
848 crypto_aead_reqsize(cipher));
849 return 0;
850}
851
852static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
853{
854 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
855 crypto_free_aead(ctx->child);
856}
857
858static void cryptd_aead_free(struct aead_instance *inst)
859{
860 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
861
862 crypto_drop_aead(&ctx->aead_spawn);
863 kfree(inst);
864}
865
866static int cryptd_create_aead(struct crypto_template *tmpl,
867 struct rtattr **tb,
868 struct crypto_attr_type *algt,
869 struct cryptd_queue *queue)
870{
871 struct aead_instance_ctx *ctx;
872 struct aead_instance *inst;
873 struct aead_alg *alg;
874 u32 type;
875 u32 mask;
876 int err;
877
878 cryptd_type_and_mask(algt, &type, &mask);
879
880 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
881 if (!inst)
882 return -ENOMEM;
883
884 ctx = aead_instance_ctx(inst);
885 ctx->queue = queue;
886
887 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
888 crypto_attr_alg_name(tb[1]), type, mask);
889 if (err)
890 goto err_free_inst;
891
892 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
893 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
894 if (err)
895 goto err_free_inst;
896
897 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
898 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
899 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
900
901 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
902 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
903
904 inst->alg.init = cryptd_aead_init_tfm;
905 inst->alg.exit = cryptd_aead_exit_tfm;
906 inst->alg.setkey = cryptd_aead_setkey;
907 inst->alg.setauthsize = cryptd_aead_setauthsize;
908 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
909 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
910
911 inst->free = cryptd_aead_free;
912
913 err = aead_register_instance(tmpl, inst);
914 if (err) {
915err_free_inst:
916 cryptd_aead_free(inst);
917 }
918 return err;
919}
920
921static struct cryptd_queue queue;
922
923static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
924{
925 struct crypto_attr_type *algt;
926
927 algt = crypto_get_attr_type(tb);
928 if (IS_ERR(algt))
929 return PTR_ERR(algt);
930
931 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
932 case CRYPTO_ALG_TYPE_LSKCIPHER:
933 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
934 case CRYPTO_ALG_TYPE_HASH:
935 return cryptd_create_hash(tmpl, tb, algt, &queue);
936 case CRYPTO_ALG_TYPE_AEAD:
937 return cryptd_create_aead(tmpl, tb, algt, &queue);
938 }
939
940 return -EINVAL;
941}
942
943static struct crypto_template cryptd_tmpl = {
944 .name = "cryptd",
945 .create = cryptd_create,
946 .module = THIS_MODULE,
947};
948
949struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
950 u32 type, u32 mask)
951{
952 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
953 struct cryptd_skcipher_ctx *ctx;
954 struct crypto_skcipher *tfm;
955
956 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
957 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
958 return ERR_PTR(-EINVAL);
959
960 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
961 if (IS_ERR(tfm))
962 return ERR_CAST(tfm);
963
964 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
965 crypto_free_skcipher(tfm);
966 return ERR_PTR(-EINVAL);
967 }
968
969 ctx = crypto_skcipher_ctx(tfm);
970 refcount_set(&ctx->refcnt, 1);
971
972 return container_of(tfm, struct cryptd_skcipher, base);
973}
974EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
975
976struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
977{
978 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
979
980 return ctx->child;
981}
982EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
983
984bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
985{
986 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
987
988 return refcount_read(&ctx->refcnt) - 1;
989}
990EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
991
992void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
993{
994 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
995
996 if (refcount_dec_and_test(&ctx->refcnt))
997 crypto_free_skcipher(&tfm->base);
998}
999EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1000
1001struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1002 u32 type, u32 mask)
1003{
1004 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1005 struct cryptd_hash_ctx *ctx;
1006 struct crypto_ahash *tfm;
1007
1008 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1009 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1010 return ERR_PTR(-EINVAL);
1011 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1012 if (IS_ERR(tfm))
1013 return ERR_CAST(tfm);
1014 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1015 crypto_free_ahash(tfm);
1016 return ERR_PTR(-EINVAL);
1017 }
1018
1019 ctx = crypto_ahash_ctx(tfm);
1020 refcount_set(&ctx->refcnt, 1);
1021
1022 return __cryptd_ahash_cast(tfm);
1023}
1024EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1025
1026struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1027{
1028 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1029
1030 return ctx->child;
1031}
1032EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1033
1034struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1035{
1036 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1037 return &rctx->desc;
1038}
1039EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1040
1041bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1042{
1043 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1044
1045 return refcount_read(&ctx->refcnt) - 1;
1046}
1047EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1048
1049void cryptd_free_ahash(struct cryptd_ahash *tfm)
1050{
1051 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1052
1053 if (refcount_dec_and_test(&ctx->refcnt))
1054 crypto_free_ahash(&tfm->base);
1055}
1056EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1057
1058struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1059 u32 type, u32 mask)
1060{
1061 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1062 struct cryptd_aead_ctx *ctx;
1063 struct crypto_aead *tfm;
1064
1065 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1066 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1067 return ERR_PTR(-EINVAL);
1068 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1069 if (IS_ERR(tfm))
1070 return ERR_CAST(tfm);
1071 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1072 crypto_free_aead(tfm);
1073 return ERR_PTR(-EINVAL);
1074 }
1075
1076 ctx = crypto_aead_ctx(tfm);
1077 refcount_set(&ctx->refcnt, 1);
1078
1079 return __cryptd_aead_cast(tfm);
1080}
1081EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1082
1083struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1084{
1085 struct cryptd_aead_ctx *ctx;
1086 ctx = crypto_aead_ctx(&tfm->base);
1087 return ctx->child;
1088}
1089EXPORT_SYMBOL_GPL(cryptd_aead_child);
1090
1091bool cryptd_aead_queued(struct cryptd_aead *tfm)
1092{
1093 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1094
1095 return refcount_read(&ctx->refcnt) - 1;
1096}
1097EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1098
1099void cryptd_free_aead(struct cryptd_aead *tfm)
1100{
1101 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1102
1103 if (refcount_dec_and_test(&ctx->refcnt))
1104 crypto_free_aead(&tfm->base);
1105}
1106EXPORT_SYMBOL_GPL(cryptd_free_aead);
1107
1108static int __init cryptd_init(void)
1109{
1110 int err;
1111
1112 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1113 1);
1114 if (!cryptd_wq)
1115 return -ENOMEM;
1116
1117 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1118 if (err)
1119 goto err_destroy_wq;
1120
1121 err = crypto_register_template(&cryptd_tmpl);
1122 if (err)
1123 goto err_fini_queue;
1124
1125 return 0;
1126
1127err_fini_queue:
1128 cryptd_fini_queue(&queue);
1129err_destroy_wq:
1130 destroy_workqueue(cryptd_wq);
1131 return err;
1132}
1133
1134static void __exit cryptd_exit(void)
1135{
1136 destroy_workqueue(cryptd_wq);
1137 cryptd_fini_queue(&queue);
1138 crypto_unregister_template(&cryptd_tmpl);
1139}
1140
1141subsys_initcall(cryptd_init);
1142module_exit(cryptd_exit);
1143
1144MODULE_LICENSE("GPL");
1145MODULE_DESCRIPTION("Software async crypto daemon");
1146MODULE_ALIAS_CRYPTO("cryptd");