Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 /*
43 * Protected by disabling BH to allow enqueueing from softinterrupt and
44 * dequeuing from kworker (cryptd_queue_worker()).
45 */
46 struct cryptd_cpu_queue __percpu *cpu_queue;
47};
48
49struct cryptd_instance_ctx {
50 struct crypto_spawn spawn;
51 struct cryptd_queue *queue;
52};
53
54struct skcipherd_instance_ctx {
55 struct crypto_skcipher_spawn spawn;
56 struct cryptd_queue *queue;
57};
58
59struct hashd_instance_ctx {
60 struct crypto_shash_spawn spawn;
61 struct cryptd_queue *queue;
62};
63
64struct aead_instance_ctx {
65 struct crypto_aead_spawn aead_spawn;
66 struct cryptd_queue *queue;
67};
68
69struct cryptd_skcipher_ctx {
70 refcount_t refcnt;
71 struct crypto_skcipher *child;
72};
73
74struct cryptd_skcipher_request_ctx {
75 crypto_completion_t complete;
76 struct skcipher_request req;
77};
78
79struct cryptd_hash_ctx {
80 refcount_t refcnt;
81 struct crypto_shash *child;
82};
83
84struct cryptd_hash_request_ctx {
85 crypto_completion_t complete;
86 struct shash_desc desc;
87};
88
89struct cryptd_aead_ctx {
90 refcount_t refcnt;
91 struct crypto_aead *child;
92};
93
94struct cryptd_aead_request_ctx {
95 crypto_completion_t complete;
96};
97
98static void cryptd_queue_worker(struct work_struct *work);
99
100static int cryptd_init_queue(struct cryptd_queue *queue,
101 unsigned int max_cpu_qlen)
102{
103 int cpu;
104 struct cryptd_cpu_queue *cpu_queue;
105
106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 if (!queue->cpu_queue)
108 return -ENOMEM;
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113 }
114 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115 return 0;
116}
117
118static void cryptd_fini_queue(struct cryptd_queue *queue)
119{
120 int cpu;
121 struct cryptd_cpu_queue *cpu_queue;
122
123 for_each_possible_cpu(cpu) {
124 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125 BUG_ON(cpu_queue->queue.qlen);
126 }
127 free_percpu(queue->cpu_queue);
128}
129
130static int cryptd_enqueue_request(struct cryptd_queue *queue,
131 struct crypto_async_request *request)
132{
133 int err;
134 struct cryptd_cpu_queue *cpu_queue;
135 refcount_t *refcnt;
136
137 local_bh_disable();
138 cpu_queue = this_cpu_ptr(queue->cpu_queue);
139 err = crypto_enqueue_request(&cpu_queue->queue, request);
140
141 refcnt = crypto_tfm_ctx(request->tfm);
142
143 if (err == -ENOSPC)
144 goto out;
145
146 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147
148 if (!refcount_read(refcnt))
149 goto out;
150
151 refcount_inc(refcnt);
152
153out:
154 local_bh_enable();
155
156 return err;
157}
158
159/* Called in workqueue context, do one real cryption work (via
160 * req->complete) and reschedule itself if there are more work to
161 * do. */
162static void cryptd_queue_worker(struct work_struct *work)
163{
164 struct cryptd_cpu_queue *cpu_queue;
165 struct crypto_async_request *req, *backlog;
166
167 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168 /*
169 * Only handle one request at a time to avoid hogging crypto workqueue.
170 */
171 local_bh_disable();
172 backlog = crypto_get_backlog(&cpu_queue->queue);
173 req = crypto_dequeue_request(&cpu_queue->queue);
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 backlog->complete(backlog, -EINPROGRESS);
181 req->complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_skcipher *child = ctx->child;
232
233 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_skcipher_setkey(child, key, keylen);
238}
239
240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
241{
242 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
243 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
244 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
245 int refcnt = refcount_read(&ctx->refcnt);
246
247 local_bh_disable();
248 rctx->complete(&req->base, err);
249 local_bh_enable();
250
251 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
252 crypto_free_skcipher(tfm);
253}
254
255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
256 int err)
257{
258 struct skcipher_request *req = skcipher_request_cast(base);
259 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
260 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
261 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
262 struct skcipher_request *subreq = &rctx->req;
263 struct crypto_skcipher *child = ctx->child;
264
265 if (unlikely(err == -EINPROGRESS))
266 goto out;
267
268 skcipher_request_set_tfm(subreq, child);
269 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
270 NULL, NULL);
271 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
272 req->iv);
273
274 err = crypto_skcipher_encrypt(subreq);
275 skcipher_request_zero(subreq);
276
277 req->base.complete = rctx->complete;
278
279out:
280 cryptd_skcipher_complete(req, err);
281}
282
283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
284 int err)
285{
286 struct skcipher_request *req = skcipher_request_cast(base);
287 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
288 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
290 struct skcipher_request *subreq = &rctx->req;
291 struct crypto_skcipher *child = ctx->child;
292
293 if (unlikely(err == -EINPROGRESS))
294 goto out;
295
296 skcipher_request_set_tfm(subreq, child);
297 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
298 NULL, NULL);
299 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
300 req->iv);
301
302 err = crypto_skcipher_decrypt(subreq);
303 skcipher_request_zero(subreq);
304
305 req->base.complete = rctx->complete;
306
307out:
308 cryptd_skcipher_complete(req, err);
309}
310
311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
312 crypto_completion_t compl)
313{
314 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
315 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 struct cryptd_queue *queue;
317
318 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
319 rctx->complete = req->base.complete;
320 req->base.complete = compl;
321
322 return cryptd_enqueue_request(queue, &req->base);
323}
324
325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
326{
327 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
328}
329
330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
331{
332 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
333}
334
335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
336{
337 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
338 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
339 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
340 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
341 struct crypto_skcipher *cipher;
342
343 cipher = crypto_spawn_skcipher(spawn);
344 if (IS_ERR(cipher))
345 return PTR_ERR(cipher);
346
347 ctx->child = cipher;
348 crypto_skcipher_set_reqsize(
349 tfm, sizeof(struct cryptd_skcipher_request_ctx) +
350 crypto_skcipher_reqsize(cipher));
351 return 0;
352}
353
354static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
355{
356 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
357
358 crypto_free_skcipher(ctx->child);
359}
360
361static void cryptd_skcipher_free(struct skcipher_instance *inst)
362{
363 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
364
365 crypto_drop_skcipher(&ctx->spawn);
366 kfree(inst);
367}
368
369static int cryptd_create_skcipher(struct crypto_template *tmpl,
370 struct rtattr **tb,
371 struct crypto_attr_type *algt,
372 struct cryptd_queue *queue)
373{
374 struct skcipherd_instance_ctx *ctx;
375 struct skcipher_instance *inst;
376 struct skcipher_alg *alg;
377 u32 type;
378 u32 mask;
379 int err;
380
381 cryptd_type_and_mask(algt, &type, &mask);
382
383 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
384 if (!inst)
385 return -ENOMEM;
386
387 ctx = skcipher_instance_ctx(inst);
388 ctx->queue = queue;
389
390 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
391 crypto_attr_alg_name(tb[1]), type, mask);
392 if (err)
393 goto err_free_inst;
394
395 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
396 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
397 if (err)
398 goto err_free_inst;
399
400 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
401 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
402 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
403 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
404 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
405 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
406
407 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
408
409 inst->alg.init = cryptd_skcipher_init_tfm;
410 inst->alg.exit = cryptd_skcipher_exit_tfm;
411
412 inst->alg.setkey = cryptd_skcipher_setkey;
413 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
414 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
415
416 inst->free = cryptd_skcipher_free;
417
418 err = skcipher_register_instance(tmpl, inst);
419 if (err) {
420err_free_inst:
421 cryptd_skcipher_free(inst);
422 }
423 return err;
424}
425
426static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
427{
428 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
429 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
430 struct crypto_shash_spawn *spawn = &ictx->spawn;
431 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
432 struct crypto_shash *hash;
433
434 hash = crypto_spawn_shash(spawn);
435 if (IS_ERR(hash))
436 return PTR_ERR(hash);
437
438 ctx->child = hash;
439 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
440 sizeof(struct cryptd_hash_request_ctx) +
441 crypto_shash_descsize(hash));
442 return 0;
443}
444
445static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
446{
447 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
448
449 crypto_free_shash(ctx->child);
450}
451
452static int cryptd_hash_setkey(struct crypto_ahash *parent,
453 const u8 *key, unsigned int keylen)
454{
455 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
456 struct crypto_shash *child = ctx->child;
457
458 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
459 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
460 CRYPTO_TFM_REQ_MASK);
461 return crypto_shash_setkey(child, key, keylen);
462}
463
464static int cryptd_hash_enqueue(struct ahash_request *req,
465 crypto_completion_t compl)
466{
467 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
468 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
469 struct cryptd_queue *queue =
470 cryptd_get_queue(crypto_ahash_tfm(tfm));
471
472 rctx->complete = req->base.complete;
473 req->base.complete = compl;
474
475 return cryptd_enqueue_request(queue, &req->base);
476}
477
478static void cryptd_hash_complete(struct ahash_request *req, int err)
479{
480 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
481 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
482 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
483 int refcnt = refcount_read(&ctx->refcnt);
484
485 local_bh_disable();
486 rctx->complete(&req->base, err);
487 local_bh_enable();
488
489 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
490 crypto_free_ahash(tfm);
491}
492
493static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
494{
495 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
496 struct crypto_shash *child = ctx->child;
497 struct ahash_request *req = ahash_request_cast(req_async);
498 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
499 struct shash_desc *desc = &rctx->desc;
500
501 if (unlikely(err == -EINPROGRESS))
502 goto out;
503
504 desc->tfm = child;
505
506 err = crypto_shash_init(desc);
507
508 req->base.complete = rctx->complete;
509
510out:
511 cryptd_hash_complete(req, err);
512}
513
514static int cryptd_hash_init_enqueue(struct ahash_request *req)
515{
516 return cryptd_hash_enqueue(req, cryptd_hash_init);
517}
518
519static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
520{
521 struct ahash_request *req = ahash_request_cast(req_async);
522 struct cryptd_hash_request_ctx *rctx;
523
524 rctx = ahash_request_ctx(req);
525
526 if (unlikely(err == -EINPROGRESS))
527 goto out;
528
529 err = shash_ahash_update(req, &rctx->desc);
530
531 req->base.complete = rctx->complete;
532
533out:
534 cryptd_hash_complete(req, err);
535}
536
537static int cryptd_hash_update_enqueue(struct ahash_request *req)
538{
539 return cryptd_hash_enqueue(req, cryptd_hash_update);
540}
541
542static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
543{
544 struct ahash_request *req = ahash_request_cast(req_async);
545 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
546
547 if (unlikely(err == -EINPROGRESS))
548 goto out;
549
550 err = crypto_shash_final(&rctx->desc, req->result);
551
552 req->base.complete = rctx->complete;
553
554out:
555 cryptd_hash_complete(req, err);
556}
557
558static int cryptd_hash_final_enqueue(struct ahash_request *req)
559{
560 return cryptd_hash_enqueue(req, cryptd_hash_final);
561}
562
563static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
564{
565 struct ahash_request *req = ahash_request_cast(req_async);
566 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
567
568 if (unlikely(err == -EINPROGRESS))
569 goto out;
570
571 err = shash_ahash_finup(req, &rctx->desc);
572
573 req->base.complete = rctx->complete;
574
575out:
576 cryptd_hash_complete(req, err);
577}
578
579static int cryptd_hash_finup_enqueue(struct ahash_request *req)
580{
581 return cryptd_hash_enqueue(req, cryptd_hash_finup);
582}
583
584static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
585{
586 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
587 struct crypto_shash *child = ctx->child;
588 struct ahash_request *req = ahash_request_cast(req_async);
589 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
590 struct shash_desc *desc = &rctx->desc;
591
592 if (unlikely(err == -EINPROGRESS))
593 goto out;
594
595 desc->tfm = child;
596
597 err = shash_ahash_digest(req, desc);
598
599 req->base.complete = rctx->complete;
600
601out:
602 cryptd_hash_complete(req, err);
603}
604
605static int cryptd_hash_digest_enqueue(struct ahash_request *req)
606{
607 return cryptd_hash_enqueue(req, cryptd_hash_digest);
608}
609
610static int cryptd_hash_export(struct ahash_request *req, void *out)
611{
612 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
613
614 return crypto_shash_export(&rctx->desc, out);
615}
616
617static int cryptd_hash_import(struct ahash_request *req, const void *in)
618{
619 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
620 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
621 struct shash_desc *desc = cryptd_shash_desc(req);
622
623 desc->tfm = ctx->child;
624
625 return crypto_shash_import(desc, in);
626}
627
628static void cryptd_hash_free(struct ahash_instance *inst)
629{
630 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
631
632 crypto_drop_shash(&ctx->spawn);
633 kfree(inst);
634}
635
636static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
637 struct crypto_attr_type *algt,
638 struct cryptd_queue *queue)
639{
640 struct hashd_instance_ctx *ctx;
641 struct ahash_instance *inst;
642 struct shash_alg *alg;
643 u32 type;
644 u32 mask;
645 int err;
646
647 cryptd_type_and_mask(algt, &type, &mask);
648
649 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
650 if (!inst)
651 return -ENOMEM;
652
653 ctx = ahash_instance_ctx(inst);
654 ctx->queue = queue;
655
656 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
657 crypto_attr_alg_name(tb[1]), type, mask);
658 if (err)
659 goto err_free_inst;
660 alg = crypto_spawn_shash_alg(&ctx->spawn);
661
662 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
663 if (err)
664 goto err_free_inst;
665
666 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
667 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
668 CRYPTO_ALG_OPTIONAL_KEY));
669 inst->alg.halg.digestsize = alg->digestsize;
670 inst->alg.halg.statesize = alg->statesize;
671 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
672
673 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
674 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
675
676 inst->alg.init = cryptd_hash_init_enqueue;
677 inst->alg.update = cryptd_hash_update_enqueue;
678 inst->alg.final = cryptd_hash_final_enqueue;
679 inst->alg.finup = cryptd_hash_finup_enqueue;
680 inst->alg.export = cryptd_hash_export;
681 inst->alg.import = cryptd_hash_import;
682 if (crypto_shash_alg_has_setkey(alg))
683 inst->alg.setkey = cryptd_hash_setkey;
684 inst->alg.digest = cryptd_hash_digest_enqueue;
685
686 inst->free = cryptd_hash_free;
687
688 err = ahash_register_instance(tmpl, inst);
689 if (err) {
690err_free_inst:
691 cryptd_hash_free(inst);
692 }
693 return err;
694}
695
696static int cryptd_aead_setkey(struct crypto_aead *parent,
697 const u8 *key, unsigned int keylen)
698{
699 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
700 struct crypto_aead *child = ctx->child;
701
702 return crypto_aead_setkey(child, key, keylen);
703}
704
705static int cryptd_aead_setauthsize(struct crypto_aead *parent,
706 unsigned int authsize)
707{
708 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
709 struct crypto_aead *child = ctx->child;
710
711 return crypto_aead_setauthsize(child, authsize);
712}
713
714static void cryptd_aead_crypt(struct aead_request *req,
715 struct crypto_aead *child,
716 int err,
717 int (*crypt)(struct aead_request *req))
718{
719 struct cryptd_aead_request_ctx *rctx;
720 struct cryptd_aead_ctx *ctx;
721 crypto_completion_t compl;
722 struct crypto_aead *tfm;
723 int refcnt;
724
725 rctx = aead_request_ctx(req);
726 compl = rctx->complete;
727
728 tfm = crypto_aead_reqtfm(req);
729
730 if (unlikely(err == -EINPROGRESS))
731 goto out;
732 aead_request_set_tfm(req, child);
733 err = crypt( req );
734
735out:
736 ctx = crypto_aead_ctx(tfm);
737 refcnt = refcount_read(&ctx->refcnt);
738
739 local_bh_disable();
740 compl(&req->base, err);
741 local_bh_enable();
742
743 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
744 crypto_free_aead(tfm);
745}
746
747static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
748{
749 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
750 struct crypto_aead *child = ctx->child;
751 struct aead_request *req;
752
753 req = container_of(areq, struct aead_request, base);
754 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
755}
756
757static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
758{
759 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
760 struct crypto_aead *child = ctx->child;
761 struct aead_request *req;
762
763 req = container_of(areq, struct aead_request, base);
764 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
765}
766
767static int cryptd_aead_enqueue(struct aead_request *req,
768 crypto_completion_t compl)
769{
770 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
771 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
772 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
773
774 rctx->complete = req->base.complete;
775 req->base.complete = compl;
776 return cryptd_enqueue_request(queue, &req->base);
777}
778
779static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
780{
781 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
782}
783
784static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
785{
786 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
787}
788
789static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
790{
791 struct aead_instance *inst = aead_alg_instance(tfm);
792 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
793 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
794 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
795 struct crypto_aead *cipher;
796
797 cipher = crypto_spawn_aead(spawn);
798 if (IS_ERR(cipher))
799 return PTR_ERR(cipher);
800
801 ctx->child = cipher;
802 crypto_aead_set_reqsize(
803 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
804 crypto_aead_reqsize(cipher)));
805 return 0;
806}
807
808static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
809{
810 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
811 crypto_free_aead(ctx->child);
812}
813
814static void cryptd_aead_free(struct aead_instance *inst)
815{
816 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
817
818 crypto_drop_aead(&ctx->aead_spawn);
819 kfree(inst);
820}
821
822static int cryptd_create_aead(struct crypto_template *tmpl,
823 struct rtattr **tb,
824 struct crypto_attr_type *algt,
825 struct cryptd_queue *queue)
826{
827 struct aead_instance_ctx *ctx;
828 struct aead_instance *inst;
829 struct aead_alg *alg;
830 u32 type;
831 u32 mask;
832 int err;
833
834 cryptd_type_and_mask(algt, &type, &mask);
835
836 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
837 if (!inst)
838 return -ENOMEM;
839
840 ctx = aead_instance_ctx(inst);
841 ctx->queue = queue;
842
843 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
844 crypto_attr_alg_name(tb[1]), type, mask);
845 if (err)
846 goto err_free_inst;
847
848 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
849 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
850 if (err)
851 goto err_free_inst;
852
853 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
854 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
855 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
856
857 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
858 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
859
860 inst->alg.init = cryptd_aead_init_tfm;
861 inst->alg.exit = cryptd_aead_exit_tfm;
862 inst->alg.setkey = cryptd_aead_setkey;
863 inst->alg.setauthsize = cryptd_aead_setauthsize;
864 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
865 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
866
867 inst->free = cryptd_aead_free;
868
869 err = aead_register_instance(tmpl, inst);
870 if (err) {
871err_free_inst:
872 cryptd_aead_free(inst);
873 }
874 return err;
875}
876
877static struct cryptd_queue queue;
878
879static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
880{
881 struct crypto_attr_type *algt;
882
883 algt = crypto_get_attr_type(tb);
884 if (IS_ERR(algt))
885 return PTR_ERR(algt);
886
887 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
888 case CRYPTO_ALG_TYPE_SKCIPHER:
889 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
890 case CRYPTO_ALG_TYPE_HASH:
891 return cryptd_create_hash(tmpl, tb, algt, &queue);
892 case CRYPTO_ALG_TYPE_AEAD:
893 return cryptd_create_aead(tmpl, tb, algt, &queue);
894 }
895
896 return -EINVAL;
897}
898
899static struct crypto_template cryptd_tmpl = {
900 .name = "cryptd",
901 .create = cryptd_create,
902 .module = THIS_MODULE,
903};
904
905struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
906 u32 type, u32 mask)
907{
908 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
909 struct cryptd_skcipher_ctx *ctx;
910 struct crypto_skcipher *tfm;
911
912 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
913 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
914 return ERR_PTR(-EINVAL);
915
916 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
917 if (IS_ERR(tfm))
918 return ERR_CAST(tfm);
919
920 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
921 crypto_free_skcipher(tfm);
922 return ERR_PTR(-EINVAL);
923 }
924
925 ctx = crypto_skcipher_ctx(tfm);
926 refcount_set(&ctx->refcnt, 1);
927
928 return container_of(tfm, struct cryptd_skcipher, base);
929}
930EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
931
932struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
933{
934 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
935
936 return ctx->child;
937}
938EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
939
940bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
941{
942 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
943
944 return refcount_read(&ctx->refcnt) - 1;
945}
946EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
947
948void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
949{
950 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
951
952 if (refcount_dec_and_test(&ctx->refcnt))
953 crypto_free_skcipher(&tfm->base);
954}
955EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
956
957struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
958 u32 type, u32 mask)
959{
960 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
961 struct cryptd_hash_ctx *ctx;
962 struct crypto_ahash *tfm;
963
964 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
965 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
966 return ERR_PTR(-EINVAL);
967 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
968 if (IS_ERR(tfm))
969 return ERR_CAST(tfm);
970 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
971 crypto_free_ahash(tfm);
972 return ERR_PTR(-EINVAL);
973 }
974
975 ctx = crypto_ahash_ctx(tfm);
976 refcount_set(&ctx->refcnt, 1);
977
978 return __cryptd_ahash_cast(tfm);
979}
980EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
981
982struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
983{
984 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
985
986 return ctx->child;
987}
988EXPORT_SYMBOL_GPL(cryptd_ahash_child);
989
990struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
991{
992 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
993 return &rctx->desc;
994}
995EXPORT_SYMBOL_GPL(cryptd_shash_desc);
996
997bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
998{
999 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1000
1001 return refcount_read(&ctx->refcnt) - 1;
1002}
1003EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1004
1005void cryptd_free_ahash(struct cryptd_ahash *tfm)
1006{
1007 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1008
1009 if (refcount_dec_and_test(&ctx->refcnt))
1010 crypto_free_ahash(&tfm->base);
1011}
1012EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1013
1014struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1015 u32 type, u32 mask)
1016{
1017 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1018 struct cryptd_aead_ctx *ctx;
1019 struct crypto_aead *tfm;
1020
1021 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1022 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1023 return ERR_PTR(-EINVAL);
1024 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1025 if (IS_ERR(tfm))
1026 return ERR_CAST(tfm);
1027 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1028 crypto_free_aead(tfm);
1029 return ERR_PTR(-EINVAL);
1030 }
1031
1032 ctx = crypto_aead_ctx(tfm);
1033 refcount_set(&ctx->refcnt, 1);
1034
1035 return __cryptd_aead_cast(tfm);
1036}
1037EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1038
1039struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1040{
1041 struct cryptd_aead_ctx *ctx;
1042 ctx = crypto_aead_ctx(&tfm->base);
1043 return ctx->child;
1044}
1045EXPORT_SYMBOL_GPL(cryptd_aead_child);
1046
1047bool cryptd_aead_queued(struct cryptd_aead *tfm)
1048{
1049 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1050
1051 return refcount_read(&ctx->refcnt) - 1;
1052}
1053EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1054
1055void cryptd_free_aead(struct cryptd_aead *tfm)
1056{
1057 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1058
1059 if (refcount_dec_and_test(&ctx->refcnt))
1060 crypto_free_aead(&tfm->base);
1061}
1062EXPORT_SYMBOL_GPL(cryptd_free_aead);
1063
1064static int __init cryptd_init(void)
1065{
1066 int err;
1067
1068 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1069 1);
1070 if (!cryptd_wq)
1071 return -ENOMEM;
1072
1073 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1074 if (err)
1075 goto err_destroy_wq;
1076
1077 err = crypto_register_template(&cryptd_tmpl);
1078 if (err)
1079 goto err_fini_queue;
1080
1081 return 0;
1082
1083err_fini_queue:
1084 cryptd_fini_queue(&queue);
1085err_destroy_wq:
1086 destroy_workqueue(cryptd_wq);
1087 return err;
1088}
1089
1090static void __exit cryptd_exit(void)
1091{
1092 destroy_workqueue(cryptd_wq);
1093 cryptd_fini_queue(&queue);
1094 crypto_unregister_template(&cryptd_tmpl);
1095}
1096
1097subsys_initcall(cryptd_init);
1098module_exit(cryptd_exit);
1099
1100MODULE_LICENSE("GPL");
1101MODULE_DESCRIPTION("Software async crypto daemon");
1102MODULE_ALIAS_CRYPTO("cryptd");
1/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#include <crypto/algapi.h>
21#include <crypto/internal/hash.h>
22#include <crypto/internal/aead.h>
23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/module.h>
30#include <linux/scatterlist.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33
34#define CRYPTD_MAX_CPU_QLEN 100
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 struct cryptd_cpu_queue __percpu *cpu_queue;
43};
44
45struct cryptd_instance_ctx {
46 struct crypto_spawn spawn;
47 struct cryptd_queue *queue;
48};
49
50struct hashd_instance_ctx {
51 struct crypto_shash_spawn spawn;
52 struct cryptd_queue *queue;
53};
54
55struct aead_instance_ctx {
56 struct crypto_aead_spawn aead_spawn;
57 struct cryptd_queue *queue;
58};
59
60struct cryptd_blkcipher_ctx {
61 struct crypto_blkcipher *child;
62};
63
64struct cryptd_blkcipher_request_ctx {
65 crypto_completion_t complete;
66};
67
68struct cryptd_hash_ctx {
69 struct crypto_shash *child;
70};
71
72struct cryptd_hash_request_ctx {
73 crypto_completion_t complete;
74 struct shash_desc desc;
75};
76
77struct cryptd_aead_ctx {
78 struct crypto_aead *child;
79};
80
81struct cryptd_aead_request_ctx {
82 crypto_completion_t complete;
83};
84
85static void cryptd_queue_worker(struct work_struct *work);
86
87static int cryptd_init_queue(struct cryptd_queue *queue,
88 unsigned int max_cpu_qlen)
89{
90 int cpu;
91 struct cryptd_cpu_queue *cpu_queue;
92
93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
94 if (!queue->cpu_queue)
95 return -ENOMEM;
96 for_each_possible_cpu(cpu) {
97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
99 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100 }
101 return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106 int cpu;
107 struct cryptd_cpu_queue *cpu_queue;
108
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 BUG_ON(cpu_queue->queue.qlen);
112 }
113 free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117 struct crypto_async_request *request)
118{
119 int cpu, err;
120 struct cryptd_cpu_queue *cpu_queue;
121
122 cpu = get_cpu();
123 cpu_queue = this_cpu_ptr(queue->cpu_queue);
124 err = crypto_enqueue_request(&cpu_queue->queue, request);
125 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126 put_cpu();
127
128 return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136 struct cryptd_cpu_queue *cpu_queue;
137 struct crypto_async_request *req, *backlog;
138
139 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140 /*
141 * Only handle one request at a time to avoid hogging crypto workqueue.
142 * preempt_disable/enable is used to prevent being preempted by
143 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144 * cryptd_enqueue_request() being accessed from software interrupts.
145 */
146 local_bh_disable();
147 preempt_disable();
148 backlog = crypto_get_backlog(&cpu_queue->queue);
149 req = crypto_dequeue_request(&cpu_queue->queue);
150 preempt_enable();
151 local_bh_enable();
152
153 if (!req)
154 return;
155
156 if (backlog)
157 backlog->complete(backlog, -EINPROGRESS);
158 req->complete(req, 0);
159
160 if (cpu_queue->queue.qlen)
161 queue_work(kcrypto_wq, &cpu_queue->work);
162}
163
164static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165{
166 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168 return ictx->queue;
169}
170
171static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
172 const u8 *key, unsigned int keylen)
173{
174 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
175 struct crypto_blkcipher *child = ctx->child;
176 int err;
177
178 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
179 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
180 CRYPTO_TFM_REQ_MASK);
181 err = crypto_blkcipher_setkey(child, key, keylen);
182 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
183 CRYPTO_TFM_RES_MASK);
184 return err;
185}
186
187static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
188 struct crypto_blkcipher *child,
189 int err,
190 int (*crypt)(struct blkcipher_desc *desc,
191 struct scatterlist *dst,
192 struct scatterlist *src,
193 unsigned int len))
194{
195 struct cryptd_blkcipher_request_ctx *rctx;
196 struct blkcipher_desc desc;
197
198 rctx = ablkcipher_request_ctx(req);
199
200 if (unlikely(err == -EINPROGRESS))
201 goto out;
202
203 desc.tfm = child;
204 desc.info = req->info;
205 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
206
207 err = crypt(&desc, req->dst, req->src, req->nbytes);
208
209 req->base.complete = rctx->complete;
210
211out:
212 local_bh_disable();
213 rctx->complete(&req->base, err);
214 local_bh_enable();
215}
216
217static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
218{
219 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
220 struct crypto_blkcipher *child = ctx->child;
221
222 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
223 crypto_blkcipher_crt(child)->encrypt);
224}
225
226static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
227{
228 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
229 struct crypto_blkcipher *child = ctx->child;
230
231 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
232 crypto_blkcipher_crt(child)->decrypt);
233}
234
235static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
236 crypto_completion_t complete)
237{
238 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
239 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
240 struct cryptd_queue *queue;
241
242 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
243 rctx->complete = req->base.complete;
244 req->base.complete = complete;
245
246 return cryptd_enqueue_request(queue, &req->base);
247}
248
249static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
250{
251 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
252}
253
254static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
255{
256 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
257}
258
259static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
260{
261 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
262 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
263 struct crypto_spawn *spawn = &ictx->spawn;
264 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
265 struct crypto_blkcipher *cipher;
266
267 cipher = crypto_spawn_blkcipher(spawn);
268 if (IS_ERR(cipher))
269 return PTR_ERR(cipher);
270
271 ctx->child = cipher;
272 tfm->crt_ablkcipher.reqsize =
273 sizeof(struct cryptd_blkcipher_request_ctx);
274 return 0;
275}
276
277static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
278{
279 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
280
281 crypto_free_blkcipher(ctx->child);
282}
283
284static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
285 unsigned int tail)
286{
287 char *p;
288 struct crypto_instance *inst;
289 int err;
290
291 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
292 if (!p)
293 return ERR_PTR(-ENOMEM);
294
295 inst = (void *)(p + head);
296
297 err = -ENAMETOOLONG;
298 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
299 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
300 goto out_free_inst;
301
302 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
303
304 inst->alg.cra_priority = alg->cra_priority + 50;
305 inst->alg.cra_blocksize = alg->cra_blocksize;
306 inst->alg.cra_alignmask = alg->cra_alignmask;
307
308out:
309 return p;
310
311out_free_inst:
312 kfree(p);
313 p = ERR_PTR(err);
314 goto out;
315}
316
317static int cryptd_create_blkcipher(struct crypto_template *tmpl,
318 struct rtattr **tb,
319 struct cryptd_queue *queue)
320{
321 struct cryptd_instance_ctx *ctx;
322 struct crypto_instance *inst;
323 struct crypto_alg *alg;
324 int err;
325
326 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
327 CRYPTO_ALG_TYPE_MASK);
328 if (IS_ERR(alg))
329 return PTR_ERR(alg);
330
331 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
332 err = PTR_ERR(inst);
333 if (IS_ERR(inst))
334 goto out_put_alg;
335
336 ctx = crypto_instance_ctx(inst);
337 ctx->queue = queue;
338
339 err = crypto_init_spawn(&ctx->spawn, alg, inst,
340 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
341 if (err)
342 goto out_free_inst;
343
344 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
345 inst->alg.cra_type = &crypto_ablkcipher_type;
346
347 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
348 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
349 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
350
351 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
352
353 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
354
355 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
356 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
357
358 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
359 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
360 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
361
362 err = crypto_register_instance(tmpl, inst);
363 if (err) {
364 crypto_drop_spawn(&ctx->spawn);
365out_free_inst:
366 kfree(inst);
367 }
368
369out_put_alg:
370 crypto_mod_put(alg);
371 return err;
372}
373
374static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
375{
376 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
377 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
378 struct crypto_shash_spawn *spawn = &ictx->spawn;
379 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
380 struct crypto_shash *hash;
381
382 hash = crypto_spawn_shash(spawn);
383 if (IS_ERR(hash))
384 return PTR_ERR(hash);
385
386 ctx->child = hash;
387 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
388 sizeof(struct cryptd_hash_request_ctx) +
389 crypto_shash_descsize(hash));
390 return 0;
391}
392
393static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
394{
395 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
396
397 crypto_free_shash(ctx->child);
398}
399
400static int cryptd_hash_setkey(struct crypto_ahash *parent,
401 const u8 *key, unsigned int keylen)
402{
403 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
404 struct crypto_shash *child = ctx->child;
405 int err;
406
407 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
408 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
409 CRYPTO_TFM_REQ_MASK);
410 err = crypto_shash_setkey(child, key, keylen);
411 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
412 CRYPTO_TFM_RES_MASK);
413 return err;
414}
415
416static int cryptd_hash_enqueue(struct ahash_request *req,
417 crypto_completion_t complete)
418{
419 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
420 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
421 struct cryptd_queue *queue =
422 cryptd_get_queue(crypto_ahash_tfm(tfm));
423
424 rctx->complete = req->base.complete;
425 req->base.complete = complete;
426
427 return cryptd_enqueue_request(queue, &req->base);
428}
429
430static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
431{
432 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
433 struct crypto_shash *child = ctx->child;
434 struct ahash_request *req = ahash_request_cast(req_async);
435 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
436 struct shash_desc *desc = &rctx->desc;
437
438 if (unlikely(err == -EINPROGRESS))
439 goto out;
440
441 desc->tfm = child;
442 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
443
444 err = crypto_shash_init(desc);
445
446 req->base.complete = rctx->complete;
447
448out:
449 local_bh_disable();
450 rctx->complete(&req->base, err);
451 local_bh_enable();
452}
453
454static int cryptd_hash_init_enqueue(struct ahash_request *req)
455{
456 return cryptd_hash_enqueue(req, cryptd_hash_init);
457}
458
459static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
460{
461 struct ahash_request *req = ahash_request_cast(req_async);
462 struct cryptd_hash_request_ctx *rctx;
463
464 rctx = ahash_request_ctx(req);
465
466 if (unlikely(err == -EINPROGRESS))
467 goto out;
468
469 err = shash_ahash_update(req, &rctx->desc);
470
471 req->base.complete = rctx->complete;
472
473out:
474 local_bh_disable();
475 rctx->complete(&req->base, err);
476 local_bh_enable();
477}
478
479static int cryptd_hash_update_enqueue(struct ahash_request *req)
480{
481 return cryptd_hash_enqueue(req, cryptd_hash_update);
482}
483
484static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
485{
486 struct ahash_request *req = ahash_request_cast(req_async);
487 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
488
489 if (unlikely(err == -EINPROGRESS))
490 goto out;
491
492 err = crypto_shash_final(&rctx->desc, req->result);
493
494 req->base.complete = rctx->complete;
495
496out:
497 local_bh_disable();
498 rctx->complete(&req->base, err);
499 local_bh_enable();
500}
501
502static int cryptd_hash_final_enqueue(struct ahash_request *req)
503{
504 return cryptd_hash_enqueue(req, cryptd_hash_final);
505}
506
507static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
508{
509 struct ahash_request *req = ahash_request_cast(req_async);
510 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
511
512 if (unlikely(err == -EINPROGRESS))
513 goto out;
514
515 err = shash_ahash_finup(req, &rctx->desc);
516
517 req->base.complete = rctx->complete;
518
519out:
520 local_bh_disable();
521 rctx->complete(&req->base, err);
522 local_bh_enable();
523}
524
525static int cryptd_hash_finup_enqueue(struct ahash_request *req)
526{
527 return cryptd_hash_enqueue(req, cryptd_hash_finup);
528}
529
530static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
531{
532 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
533 struct crypto_shash *child = ctx->child;
534 struct ahash_request *req = ahash_request_cast(req_async);
535 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
536 struct shash_desc *desc = &rctx->desc;
537
538 if (unlikely(err == -EINPROGRESS))
539 goto out;
540
541 desc->tfm = child;
542 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
543
544 err = shash_ahash_digest(req, desc);
545
546 req->base.complete = rctx->complete;
547
548out:
549 local_bh_disable();
550 rctx->complete(&req->base, err);
551 local_bh_enable();
552}
553
554static int cryptd_hash_digest_enqueue(struct ahash_request *req)
555{
556 return cryptd_hash_enqueue(req, cryptd_hash_digest);
557}
558
559static int cryptd_hash_export(struct ahash_request *req, void *out)
560{
561 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
562
563 return crypto_shash_export(&rctx->desc, out);
564}
565
566static int cryptd_hash_import(struct ahash_request *req, const void *in)
567{
568 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
569
570 return crypto_shash_import(&rctx->desc, in);
571}
572
573static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
574 struct cryptd_queue *queue)
575{
576 struct hashd_instance_ctx *ctx;
577 struct ahash_instance *inst;
578 struct shash_alg *salg;
579 struct crypto_alg *alg;
580 int err;
581
582 salg = shash_attr_alg(tb[1], 0, 0);
583 if (IS_ERR(salg))
584 return PTR_ERR(salg);
585
586 alg = &salg->base;
587 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
588 sizeof(*ctx));
589 err = PTR_ERR(inst);
590 if (IS_ERR(inst))
591 goto out_put_alg;
592
593 ctx = ahash_instance_ctx(inst);
594 ctx->queue = queue;
595
596 err = crypto_init_shash_spawn(&ctx->spawn, salg,
597 ahash_crypto_instance(inst));
598 if (err)
599 goto out_free_inst;
600
601 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
602
603 inst->alg.halg.digestsize = salg->digestsize;
604 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
605
606 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
607 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
608
609 inst->alg.init = cryptd_hash_init_enqueue;
610 inst->alg.update = cryptd_hash_update_enqueue;
611 inst->alg.final = cryptd_hash_final_enqueue;
612 inst->alg.finup = cryptd_hash_finup_enqueue;
613 inst->alg.export = cryptd_hash_export;
614 inst->alg.import = cryptd_hash_import;
615 inst->alg.setkey = cryptd_hash_setkey;
616 inst->alg.digest = cryptd_hash_digest_enqueue;
617
618 err = ahash_register_instance(tmpl, inst);
619 if (err) {
620 crypto_drop_shash(&ctx->spawn);
621out_free_inst:
622 kfree(inst);
623 }
624
625out_put_alg:
626 crypto_mod_put(alg);
627 return err;
628}
629
630static void cryptd_aead_crypt(struct aead_request *req,
631 struct crypto_aead *child,
632 int err,
633 int (*crypt)(struct aead_request *req))
634{
635 struct cryptd_aead_request_ctx *rctx;
636 rctx = aead_request_ctx(req);
637
638 if (unlikely(err == -EINPROGRESS))
639 goto out;
640 aead_request_set_tfm(req, child);
641 err = crypt( req );
642 req->base.complete = rctx->complete;
643out:
644 local_bh_disable();
645 rctx->complete(&req->base, err);
646 local_bh_enable();
647}
648
649static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
650{
651 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
652 struct crypto_aead *child = ctx->child;
653 struct aead_request *req;
654
655 req = container_of(areq, struct aead_request, base);
656 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
657}
658
659static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
660{
661 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
662 struct crypto_aead *child = ctx->child;
663 struct aead_request *req;
664
665 req = container_of(areq, struct aead_request, base);
666 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
667}
668
669static int cryptd_aead_enqueue(struct aead_request *req,
670 crypto_completion_t complete)
671{
672 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
673 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
674 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
675
676 rctx->complete = req->base.complete;
677 req->base.complete = complete;
678 return cryptd_enqueue_request(queue, &req->base);
679}
680
681static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
682{
683 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
684}
685
686static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
687{
688 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
689}
690
691static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
692{
693 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
694 struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
695 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
696 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
697 struct crypto_aead *cipher;
698
699 cipher = crypto_spawn_aead(spawn);
700 if (IS_ERR(cipher))
701 return PTR_ERR(cipher);
702
703 crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
704 ctx->child = cipher;
705 tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
706 return 0;
707}
708
709static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
710{
711 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
712 crypto_free_aead(ctx->child);
713}
714
715static int cryptd_create_aead(struct crypto_template *tmpl,
716 struct rtattr **tb,
717 struct cryptd_queue *queue)
718{
719 struct aead_instance_ctx *ctx;
720 struct crypto_instance *inst;
721 struct crypto_alg *alg;
722 int err;
723
724 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
725 CRYPTO_ALG_TYPE_MASK);
726 if (IS_ERR(alg))
727 return PTR_ERR(alg);
728
729 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
730 err = PTR_ERR(inst);
731 if (IS_ERR(inst))
732 goto out_put_alg;
733
734 ctx = crypto_instance_ctx(inst);
735 ctx->queue = queue;
736
737 err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
738 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
739 if (err)
740 goto out_free_inst;
741
742 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
743 inst->alg.cra_type = alg->cra_type;
744 inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
745 inst->alg.cra_init = cryptd_aead_init_tfm;
746 inst->alg.cra_exit = cryptd_aead_exit_tfm;
747 inst->alg.cra_aead.setkey = alg->cra_aead.setkey;
748 inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
749 inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
750 inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
751 inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
752 inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue;
753 inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue;
754 inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt;
755 inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt;
756
757 err = crypto_register_instance(tmpl, inst);
758 if (err) {
759 crypto_drop_spawn(&ctx->aead_spawn.base);
760out_free_inst:
761 kfree(inst);
762 }
763out_put_alg:
764 crypto_mod_put(alg);
765 return err;
766}
767
768static struct cryptd_queue queue;
769
770static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
771{
772 struct crypto_attr_type *algt;
773
774 algt = crypto_get_attr_type(tb);
775 if (IS_ERR(algt))
776 return PTR_ERR(algt);
777
778 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
779 case CRYPTO_ALG_TYPE_BLKCIPHER:
780 return cryptd_create_blkcipher(tmpl, tb, &queue);
781 case CRYPTO_ALG_TYPE_DIGEST:
782 return cryptd_create_hash(tmpl, tb, &queue);
783 case CRYPTO_ALG_TYPE_AEAD:
784 return cryptd_create_aead(tmpl, tb, &queue);
785 }
786
787 return -EINVAL;
788}
789
790static void cryptd_free(struct crypto_instance *inst)
791{
792 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
793 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
794 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
795
796 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
797 case CRYPTO_ALG_TYPE_AHASH:
798 crypto_drop_shash(&hctx->spawn);
799 kfree(ahash_instance(inst));
800 return;
801 case CRYPTO_ALG_TYPE_AEAD:
802 crypto_drop_spawn(&aead_ctx->aead_spawn.base);
803 kfree(inst);
804 return;
805 default:
806 crypto_drop_spawn(&ctx->spawn);
807 kfree(inst);
808 }
809}
810
811static struct crypto_template cryptd_tmpl = {
812 .name = "cryptd",
813 .create = cryptd_create,
814 .free = cryptd_free,
815 .module = THIS_MODULE,
816};
817
818struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
819 u32 type, u32 mask)
820{
821 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
822 struct crypto_tfm *tfm;
823
824 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
825 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
826 return ERR_PTR(-EINVAL);
827 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
828 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
829 mask &= ~CRYPTO_ALG_TYPE_MASK;
830 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
831 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
832 if (IS_ERR(tfm))
833 return ERR_CAST(tfm);
834 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
835 crypto_free_tfm(tfm);
836 return ERR_PTR(-EINVAL);
837 }
838
839 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
840}
841EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
842
843struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
844{
845 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
846 return ctx->child;
847}
848EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
849
850void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
851{
852 crypto_free_ablkcipher(&tfm->base);
853}
854EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
855
856struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
857 u32 type, u32 mask)
858{
859 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
860 struct crypto_ahash *tfm;
861
862 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
863 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
864 return ERR_PTR(-EINVAL);
865 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
866 if (IS_ERR(tfm))
867 return ERR_CAST(tfm);
868 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
869 crypto_free_ahash(tfm);
870 return ERR_PTR(-EINVAL);
871 }
872
873 return __cryptd_ahash_cast(tfm);
874}
875EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
876
877struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
878{
879 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
880
881 return ctx->child;
882}
883EXPORT_SYMBOL_GPL(cryptd_ahash_child);
884
885struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
886{
887 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
888 return &rctx->desc;
889}
890EXPORT_SYMBOL_GPL(cryptd_shash_desc);
891
892void cryptd_free_ahash(struct cryptd_ahash *tfm)
893{
894 crypto_free_ahash(&tfm->base);
895}
896EXPORT_SYMBOL_GPL(cryptd_free_ahash);
897
898struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
899 u32 type, u32 mask)
900{
901 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
902 struct crypto_aead *tfm;
903
904 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
905 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
906 return ERR_PTR(-EINVAL);
907 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
908 if (IS_ERR(tfm))
909 return ERR_CAST(tfm);
910 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
911 crypto_free_aead(tfm);
912 return ERR_PTR(-EINVAL);
913 }
914 return __cryptd_aead_cast(tfm);
915}
916EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
917
918struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
919{
920 struct cryptd_aead_ctx *ctx;
921 ctx = crypto_aead_ctx(&tfm->base);
922 return ctx->child;
923}
924EXPORT_SYMBOL_GPL(cryptd_aead_child);
925
926void cryptd_free_aead(struct cryptd_aead *tfm)
927{
928 crypto_free_aead(&tfm->base);
929}
930EXPORT_SYMBOL_GPL(cryptd_free_aead);
931
932static int __init cryptd_init(void)
933{
934 int err;
935
936 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
937 if (err)
938 return err;
939
940 err = crypto_register_template(&cryptd_tmpl);
941 if (err)
942 cryptd_fini_queue(&queue);
943
944 return err;
945}
946
947static void __exit cryptd_exit(void)
948{
949 cryptd_fini_queue(&queue);
950 crypto_unregister_template(&cryptd_tmpl);
951}
952
953subsys_initcall(cryptd_init);
954module_exit(cryptd_exit);
955
956MODULE_LICENSE("GPL");
957MODULE_DESCRIPTION("Software async crypto daemon");