Loading...
1/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#include <crypto/internal/hash.h>
21#include <crypto/internal/aead.h>
22#include <crypto/internal/skcipher.h>
23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h>
25#include <linux/atomic.h>
26#include <linux/err.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34
35static unsigned int cryptd_max_cpu_qlen = 1000;
36module_param(cryptd_max_cpu_qlen, uint, 0);
37MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
38
39struct cryptd_cpu_queue {
40 struct crypto_queue queue;
41 struct work_struct work;
42};
43
44struct cryptd_queue {
45 struct cryptd_cpu_queue __percpu *cpu_queue;
46};
47
48struct cryptd_instance_ctx {
49 struct crypto_spawn spawn;
50 struct cryptd_queue *queue;
51};
52
53struct skcipherd_instance_ctx {
54 struct crypto_skcipher_spawn spawn;
55 struct cryptd_queue *queue;
56};
57
58struct hashd_instance_ctx {
59 struct crypto_shash_spawn spawn;
60 struct cryptd_queue *queue;
61};
62
63struct aead_instance_ctx {
64 struct crypto_aead_spawn aead_spawn;
65 struct cryptd_queue *queue;
66};
67
68struct cryptd_blkcipher_ctx {
69 atomic_t refcnt;
70 struct crypto_blkcipher *child;
71};
72
73struct cryptd_blkcipher_request_ctx {
74 crypto_completion_t complete;
75};
76
77struct cryptd_skcipher_ctx {
78 atomic_t refcnt;
79 struct crypto_skcipher *child;
80};
81
82struct cryptd_skcipher_request_ctx {
83 crypto_completion_t complete;
84};
85
86struct cryptd_hash_ctx {
87 atomic_t refcnt;
88 struct crypto_shash *child;
89};
90
91struct cryptd_hash_request_ctx {
92 crypto_completion_t complete;
93 struct shash_desc desc;
94};
95
96struct cryptd_aead_ctx {
97 atomic_t refcnt;
98 struct crypto_aead *child;
99};
100
101struct cryptd_aead_request_ctx {
102 crypto_completion_t complete;
103};
104
105static void cryptd_queue_worker(struct work_struct *work);
106
107static int cryptd_init_queue(struct cryptd_queue *queue,
108 unsigned int max_cpu_qlen)
109{
110 int cpu;
111 struct cryptd_cpu_queue *cpu_queue;
112
113 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
114 if (!queue->cpu_queue)
115 return -ENOMEM;
116 for_each_possible_cpu(cpu) {
117 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
119 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
120 }
121 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
122 return 0;
123}
124
125static void cryptd_fini_queue(struct cryptd_queue *queue)
126{
127 int cpu;
128 struct cryptd_cpu_queue *cpu_queue;
129
130 for_each_possible_cpu(cpu) {
131 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
132 BUG_ON(cpu_queue->queue.qlen);
133 }
134 free_percpu(queue->cpu_queue);
135}
136
137static int cryptd_enqueue_request(struct cryptd_queue *queue,
138 struct crypto_async_request *request)
139{
140 int cpu, err;
141 struct cryptd_cpu_queue *cpu_queue;
142 atomic_t *refcnt;
143
144 cpu = get_cpu();
145 cpu_queue = this_cpu_ptr(queue->cpu_queue);
146 err = crypto_enqueue_request(&cpu_queue->queue, request);
147
148 refcnt = crypto_tfm_ctx(request->tfm);
149
150 if (err == -ENOSPC)
151 goto out_put_cpu;
152
153 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
154
155 if (!atomic_read(refcnt))
156 goto out_put_cpu;
157
158 atomic_inc(refcnt);
159
160out_put_cpu:
161 put_cpu();
162
163 return err;
164}
165
166/* Called in workqueue context, do one real cryption work (via
167 * req->complete) and reschedule itself if there are more work to
168 * do. */
169static void cryptd_queue_worker(struct work_struct *work)
170{
171 struct cryptd_cpu_queue *cpu_queue;
172 struct crypto_async_request *req, *backlog;
173
174 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
175 /*
176 * Only handle one request at a time to avoid hogging crypto workqueue.
177 * preempt_disable/enable is used to prevent being preempted by
178 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
179 * cryptd_enqueue_request() being accessed from software interrupts.
180 */
181 local_bh_disable();
182 preempt_disable();
183 backlog = crypto_get_backlog(&cpu_queue->queue);
184 req = crypto_dequeue_request(&cpu_queue->queue);
185 preempt_enable();
186 local_bh_enable();
187
188 if (!req)
189 return;
190
191 if (backlog)
192 backlog->complete(backlog, -EINPROGRESS);
193 req->complete(req, 0);
194
195 if (cpu_queue->queue.qlen)
196 queue_work(kcrypto_wq, &cpu_queue->work);
197}
198
199static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
200{
201 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
202 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
203 return ictx->queue;
204}
205
206static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
207 u32 *mask)
208{
209 struct crypto_attr_type *algt;
210
211 algt = crypto_get_attr_type(tb);
212 if (IS_ERR(algt))
213 return;
214
215 *type |= algt->type & CRYPTO_ALG_INTERNAL;
216 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
217}
218
219static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
220 const u8 *key, unsigned int keylen)
221{
222 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
223 struct crypto_blkcipher *child = ctx->child;
224 int err;
225
226 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
227 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
228 CRYPTO_TFM_REQ_MASK);
229 err = crypto_blkcipher_setkey(child, key, keylen);
230 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
231 CRYPTO_TFM_RES_MASK);
232 return err;
233}
234
235static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
236 struct crypto_blkcipher *child,
237 int err,
238 int (*crypt)(struct blkcipher_desc *desc,
239 struct scatterlist *dst,
240 struct scatterlist *src,
241 unsigned int len))
242{
243 struct cryptd_blkcipher_request_ctx *rctx;
244 struct cryptd_blkcipher_ctx *ctx;
245 struct crypto_ablkcipher *tfm;
246 struct blkcipher_desc desc;
247 int refcnt;
248
249 rctx = ablkcipher_request_ctx(req);
250
251 if (unlikely(err == -EINPROGRESS))
252 goto out;
253
254 desc.tfm = child;
255 desc.info = req->info;
256 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
257
258 err = crypt(&desc, req->dst, req->src, req->nbytes);
259
260 req->base.complete = rctx->complete;
261
262out:
263 tfm = crypto_ablkcipher_reqtfm(req);
264 ctx = crypto_ablkcipher_ctx(tfm);
265 refcnt = atomic_read(&ctx->refcnt);
266
267 local_bh_disable();
268 rctx->complete(&req->base, err);
269 local_bh_enable();
270
271 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
272 crypto_free_ablkcipher(tfm);
273}
274
275static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
276{
277 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
278 struct crypto_blkcipher *child = ctx->child;
279
280 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
281 crypto_blkcipher_crt(child)->encrypt);
282}
283
284static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
285{
286 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
287 struct crypto_blkcipher *child = ctx->child;
288
289 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
290 crypto_blkcipher_crt(child)->decrypt);
291}
292
293static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
294 crypto_completion_t compl)
295{
296 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
298 struct cryptd_queue *queue;
299
300 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
301 rctx->complete = req->base.complete;
302 req->base.complete = compl;
303
304 return cryptd_enqueue_request(queue, &req->base);
305}
306
307static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
308{
309 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
310}
311
312static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
313{
314 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
315}
316
317static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
318{
319 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
320 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
321 struct crypto_spawn *spawn = &ictx->spawn;
322 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
323 struct crypto_blkcipher *cipher;
324
325 cipher = crypto_spawn_blkcipher(spawn);
326 if (IS_ERR(cipher))
327 return PTR_ERR(cipher);
328
329 ctx->child = cipher;
330 tfm->crt_ablkcipher.reqsize =
331 sizeof(struct cryptd_blkcipher_request_ctx);
332 return 0;
333}
334
335static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
336{
337 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
338
339 crypto_free_blkcipher(ctx->child);
340}
341
342static int cryptd_init_instance(struct crypto_instance *inst,
343 struct crypto_alg *alg)
344{
345 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
346 "cryptd(%s)",
347 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
348 return -ENAMETOOLONG;
349
350 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
351
352 inst->alg.cra_priority = alg->cra_priority + 50;
353 inst->alg.cra_blocksize = alg->cra_blocksize;
354 inst->alg.cra_alignmask = alg->cra_alignmask;
355
356 return 0;
357}
358
359static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
360 unsigned int tail)
361{
362 char *p;
363 struct crypto_instance *inst;
364 int err;
365
366 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
367 if (!p)
368 return ERR_PTR(-ENOMEM);
369
370 inst = (void *)(p + head);
371
372 err = cryptd_init_instance(inst, alg);
373 if (err)
374 goto out_free_inst;
375
376out:
377 return p;
378
379out_free_inst:
380 kfree(p);
381 p = ERR_PTR(err);
382 goto out;
383}
384
385static int cryptd_create_blkcipher(struct crypto_template *tmpl,
386 struct rtattr **tb,
387 struct cryptd_queue *queue)
388{
389 struct cryptd_instance_ctx *ctx;
390 struct crypto_instance *inst;
391 struct crypto_alg *alg;
392 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
393 u32 mask = CRYPTO_ALG_TYPE_MASK;
394 int err;
395
396 cryptd_check_internal(tb, &type, &mask);
397
398 alg = crypto_get_attr_alg(tb, type, mask);
399 if (IS_ERR(alg))
400 return PTR_ERR(alg);
401
402 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
403 err = PTR_ERR(inst);
404 if (IS_ERR(inst))
405 goto out_put_alg;
406
407 ctx = crypto_instance_ctx(inst);
408 ctx->queue = queue;
409
410 err = crypto_init_spawn(&ctx->spawn, alg, inst,
411 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
412 if (err)
413 goto out_free_inst;
414
415 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
416 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
417 type |= CRYPTO_ALG_INTERNAL;
418 inst->alg.cra_flags = type;
419 inst->alg.cra_type = &crypto_ablkcipher_type;
420
421 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
422 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
423 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
424
425 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
426
427 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
428
429 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
430 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
431
432 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
433 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
434 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
435
436 err = crypto_register_instance(tmpl, inst);
437 if (err) {
438 crypto_drop_spawn(&ctx->spawn);
439out_free_inst:
440 kfree(inst);
441 }
442
443out_put_alg:
444 crypto_mod_put(alg);
445 return err;
446}
447
448static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
449 const u8 *key, unsigned int keylen)
450{
451 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
452 struct crypto_skcipher *child = ctx->child;
453 int err;
454
455 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
456 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
457 CRYPTO_TFM_REQ_MASK);
458 err = crypto_skcipher_setkey(child, key, keylen);
459 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
460 CRYPTO_TFM_RES_MASK);
461 return err;
462}
463
464static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
465{
466 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
467 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
468 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
469 int refcnt = atomic_read(&ctx->refcnt);
470
471 local_bh_disable();
472 rctx->complete(&req->base, err);
473 local_bh_enable();
474
475 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
476 crypto_free_skcipher(tfm);
477}
478
479static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
480 int err)
481{
482 struct skcipher_request *req = skcipher_request_cast(base);
483 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
484 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
485 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
486 struct crypto_skcipher *child = ctx->child;
487 SKCIPHER_REQUEST_ON_STACK(subreq, child);
488
489 if (unlikely(err == -EINPROGRESS))
490 goto out;
491
492 skcipher_request_set_tfm(subreq, child);
493 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
494 NULL, NULL);
495 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
496 req->iv);
497
498 err = crypto_skcipher_encrypt(subreq);
499 skcipher_request_zero(subreq);
500
501 req->base.complete = rctx->complete;
502
503out:
504 cryptd_skcipher_complete(req, err);
505}
506
507static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
508 int err)
509{
510 struct skcipher_request *req = skcipher_request_cast(base);
511 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
513 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
514 struct crypto_skcipher *child = ctx->child;
515 SKCIPHER_REQUEST_ON_STACK(subreq, child);
516
517 if (unlikely(err == -EINPROGRESS))
518 goto out;
519
520 skcipher_request_set_tfm(subreq, child);
521 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
522 NULL, NULL);
523 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
524 req->iv);
525
526 err = crypto_skcipher_decrypt(subreq);
527 skcipher_request_zero(subreq);
528
529 req->base.complete = rctx->complete;
530
531out:
532 cryptd_skcipher_complete(req, err);
533}
534
535static int cryptd_skcipher_enqueue(struct skcipher_request *req,
536 crypto_completion_t compl)
537{
538 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
539 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
540 struct cryptd_queue *queue;
541
542 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
543 rctx->complete = req->base.complete;
544 req->base.complete = compl;
545
546 return cryptd_enqueue_request(queue, &req->base);
547}
548
549static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
550{
551 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
552}
553
554static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
555{
556 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
557}
558
559static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
560{
561 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
562 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
563 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
564 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
565 struct crypto_skcipher *cipher;
566
567 cipher = crypto_spawn_skcipher(spawn);
568 if (IS_ERR(cipher))
569 return PTR_ERR(cipher);
570
571 ctx->child = cipher;
572 crypto_skcipher_set_reqsize(
573 tfm, sizeof(struct cryptd_skcipher_request_ctx));
574 return 0;
575}
576
577static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
578{
579 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
580
581 crypto_free_skcipher(ctx->child);
582}
583
584static void cryptd_skcipher_free(struct skcipher_instance *inst)
585{
586 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
587
588 crypto_drop_skcipher(&ctx->spawn);
589}
590
591static int cryptd_create_skcipher(struct crypto_template *tmpl,
592 struct rtattr **tb,
593 struct cryptd_queue *queue)
594{
595 struct skcipherd_instance_ctx *ctx;
596 struct skcipher_instance *inst;
597 struct skcipher_alg *alg;
598 const char *name;
599 u32 type;
600 u32 mask;
601 int err;
602
603 type = 0;
604 mask = CRYPTO_ALG_ASYNC;
605
606 cryptd_check_internal(tb, &type, &mask);
607
608 name = crypto_attr_alg_name(tb[1]);
609 if (IS_ERR(name))
610 return PTR_ERR(name);
611
612 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
613 if (!inst)
614 return -ENOMEM;
615
616 ctx = skcipher_instance_ctx(inst);
617 ctx->queue = queue;
618
619 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
620 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
621 if (err)
622 goto out_free_inst;
623
624 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
625 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
626 if (err)
627 goto out_drop_skcipher;
628
629 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
630 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
631
632 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
633 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
634 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
635 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
636
637 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
638
639 inst->alg.init = cryptd_skcipher_init_tfm;
640 inst->alg.exit = cryptd_skcipher_exit_tfm;
641
642 inst->alg.setkey = cryptd_skcipher_setkey;
643 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
644 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
645
646 inst->free = cryptd_skcipher_free;
647
648 err = skcipher_register_instance(tmpl, inst);
649 if (err) {
650out_drop_skcipher:
651 crypto_drop_skcipher(&ctx->spawn);
652out_free_inst:
653 kfree(inst);
654 }
655 return err;
656}
657
658static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
659{
660 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
661 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
662 struct crypto_shash_spawn *spawn = &ictx->spawn;
663 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
664 struct crypto_shash *hash;
665
666 hash = crypto_spawn_shash(spawn);
667 if (IS_ERR(hash))
668 return PTR_ERR(hash);
669
670 ctx->child = hash;
671 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
672 sizeof(struct cryptd_hash_request_ctx) +
673 crypto_shash_descsize(hash));
674 return 0;
675}
676
677static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
678{
679 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
680
681 crypto_free_shash(ctx->child);
682}
683
684static int cryptd_hash_setkey(struct crypto_ahash *parent,
685 const u8 *key, unsigned int keylen)
686{
687 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
688 struct crypto_shash *child = ctx->child;
689 int err;
690
691 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
692 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
693 CRYPTO_TFM_REQ_MASK);
694 err = crypto_shash_setkey(child, key, keylen);
695 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
696 CRYPTO_TFM_RES_MASK);
697 return err;
698}
699
700static int cryptd_hash_enqueue(struct ahash_request *req,
701 crypto_completion_t compl)
702{
703 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
704 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
705 struct cryptd_queue *queue =
706 cryptd_get_queue(crypto_ahash_tfm(tfm));
707
708 rctx->complete = req->base.complete;
709 req->base.complete = compl;
710
711 return cryptd_enqueue_request(queue, &req->base);
712}
713
714static void cryptd_hash_complete(struct ahash_request *req, int err)
715{
716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
717 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
718 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
719 int refcnt = atomic_read(&ctx->refcnt);
720
721 local_bh_disable();
722 rctx->complete(&req->base, err);
723 local_bh_enable();
724
725 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
726 crypto_free_ahash(tfm);
727}
728
729static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
730{
731 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
732 struct crypto_shash *child = ctx->child;
733 struct ahash_request *req = ahash_request_cast(req_async);
734 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
735 struct shash_desc *desc = &rctx->desc;
736
737 if (unlikely(err == -EINPROGRESS))
738 goto out;
739
740 desc->tfm = child;
741 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
742
743 err = crypto_shash_init(desc);
744
745 req->base.complete = rctx->complete;
746
747out:
748 cryptd_hash_complete(req, err);
749}
750
751static int cryptd_hash_init_enqueue(struct ahash_request *req)
752{
753 return cryptd_hash_enqueue(req, cryptd_hash_init);
754}
755
756static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
757{
758 struct ahash_request *req = ahash_request_cast(req_async);
759 struct cryptd_hash_request_ctx *rctx;
760
761 rctx = ahash_request_ctx(req);
762
763 if (unlikely(err == -EINPROGRESS))
764 goto out;
765
766 err = shash_ahash_update(req, &rctx->desc);
767
768 req->base.complete = rctx->complete;
769
770out:
771 cryptd_hash_complete(req, err);
772}
773
774static int cryptd_hash_update_enqueue(struct ahash_request *req)
775{
776 return cryptd_hash_enqueue(req, cryptd_hash_update);
777}
778
779static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
780{
781 struct ahash_request *req = ahash_request_cast(req_async);
782 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
783
784 if (unlikely(err == -EINPROGRESS))
785 goto out;
786
787 err = crypto_shash_final(&rctx->desc, req->result);
788
789 req->base.complete = rctx->complete;
790
791out:
792 cryptd_hash_complete(req, err);
793}
794
795static int cryptd_hash_final_enqueue(struct ahash_request *req)
796{
797 return cryptd_hash_enqueue(req, cryptd_hash_final);
798}
799
800static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
801{
802 struct ahash_request *req = ahash_request_cast(req_async);
803 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
804
805 if (unlikely(err == -EINPROGRESS))
806 goto out;
807
808 err = shash_ahash_finup(req, &rctx->desc);
809
810 req->base.complete = rctx->complete;
811
812out:
813 cryptd_hash_complete(req, err);
814}
815
816static int cryptd_hash_finup_enqueue(struct ahash_request *req)
817{
818 return cryptd_hash_enqueue(req, cryptd_hash_finup);
819}
820
821static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
822{
823 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
824 struct crypto_shash *child = ctx->child;
825 struct ahash_request *req = ahash_request_cast(req_async);
826 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
827 struct shash_desc *desc = &rctx->desc;
828
829 if (unlikely(err == -EINPROGRESS))
830 goto out;
831
832 desc->tfm = child;
833 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
834
835 err = shash_ahash_digest(req, desc);
836
837 req->base.complete = rctx->complete;
838
839out:
840 cryptd_hash_complete(req, err);
841}
842
843static int cryptd_hash_digest_enqueue(struct ahash_request *req)
844{
845 return cryptd_hash_enqueue(req, cryptd_hash_digest);
846}
847
848static int cryptd_hash_export(struct ahash_request *req, void *out)
849{
850 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
851
852 return crypto_shash_export(&rctx->desc, out);
853}
854
855static int cryptd_hash_import(struct ahash_request *req, const void *in)
856{
857 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
858 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
859 struct shash_desc *desc = cryptd_shash_desc(req);
860
861 desc->tfm = ctx->child;
862 desc->flags = req->base.flags;
863
864 return crypto_shash_import(desc, in);
865}
866
867static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
868 struct cryptd_queue *queue)
869{
870 struct hashd_instance_ctx *ctx;
871 struct ahash_instance *inst;
872 struct shash_alg *salg;
873 struct crypto_alg *alg;
874 u32 type = 0;
875 u32 mask = 0;
876 int err;
877
878 cryptd_check_internal(tb, &type, &mask);
879
880 salg = shash_attr_alg(tb[1], type, mask);
881 if (IS_ERR(salg))
882 return PTR_ERR(salg);
883
884 alg = &salg->base;
885 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
886 sizeof(*ctx));
887 err = PTR_ERR(inst);
888 if (IS_ERR(inst))
889 goto out_put_alg;
890
891 ctx = ahash_instance_ctx(inst);
892 ctx->queue = queue;
893
894 err = crypto_init_shash_spawn(&ctx->spawn, salg,
895 ahash_crypto_instance(inst));
896 if (err)
897 goto out_free_inst;
898
899 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
900 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
901 CRYPTO_ALG_OPTIONAL_KEY));
902
903 inst->alg.halg.digestsize = salg->digestsize;
904 inst->alg.halg.statesize = salg->statesize;
905 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
906
907 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
908 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
909
910 inst->alg.init = cryptd_hash_init_enqueue;
911 inst->alg.update = cryptd_hash_update_enqueue;
912 inst->alg.final = cryptd_hash_final_enqueue;
913 inst->alg.finup = cryptd_hash_finup_enqueue;
914 inst->alg.export = cryptd_hash_export;
915 inst->alg.import = cryptd_hash_import;
916 if (crypto_shash_alg_has_setkey(salg))
917 inst->alg.setkey = cryptd_hash_setkey;
918 inst->alg.digest = cryptd_hash_digest_enqueue;
919
920 err = ahash_register_instance(tmpl, inst);
921 if (err) {
922 crypto_drop_shash(&ctx->spawn);
923out_free_inst:
924 kfree(inst);
925 }
926
927out_put_alg:
928 crypto_mod_put(alg);
929 return err;
930}
931
932static int cryptd_aead_setkey(struct crypto_aead *parent,
933 const u8 *key, unsigned int keylen)
934{
935 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
936 struct crypto_aead *child = ctx->child;
937
938 return crypto_aead_setkey(child, key, keylen);
939}
940
941static int cryptd_aead_setauthsize(struct crypto_aead *parent,
942 unsigned int authsize)
943{
944 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
945 struct crypto_aead *child = ctx->child;
946
947 return crypto_aead_setauthsize(child, authsize);
948}
949
950static void cryptd_aead_crypt(struct aead_request *req,
951 struct crypto_aead *child,
952 int err,
953 int (*crypt)(struct aead_request *req))
954{
955 struct cryptd_aead_request_ctx *rctx;
956 struct cryptd_aead_ctx *ctx;
957 crypto_completion_t compl;
958 struct crypto_aead *tfm;
959 int refcnt;
960
961 rctx = aead_request_ctx(req);
962 compl = rctx->complete;
963
964 tfm = crypto_aead_reqtfm(req);
965
966 if (unlikely(err == -EINPROGRESS))
967 goto out;
968 aead_request_set_tfm(req, child);
969 err = crypt( req );
970
971out:
972 ctx = crypto_aead_ctx(tfm);
973 refcnt = atomic_read(&ctx->refcnt);
974
975 local_bh_disable();
976 compl(&req->base, err);
977 local_bh_enable();
978
979 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
980 crypto_free_aead(tfm);
981}
982
983static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
984{
985 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
986 struct crypto_aead *child = ctx->child;
987 struct aead_request *req;
988
989 req = container_of(areq, struct aead_request, base);
990 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
991}
992
993static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
994{
995 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
996 struct crypto_aead *child = ctx->child;
997 struct aead_request *req;
998
999 req = container_of(areq, struct aead_request, base);
1000 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
1001}
1002
1003static int cryptd_aead_enqueue(struct aead_request *req,
1004 crypto_completion_t compl)
1005{
1006 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1007 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1008 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1009
1010 rctx->complete = req->base.complete;
1011 req->base.complete = compl;
1012 return cryptd_enqueue_request(queue, &req->base);
1013}
1014
1015static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1016{
1017 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1018}
1019
1020static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1021{
1022 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1023}
1024
1025static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1026{
1027 struct aead_instance *inst = aead_alg_instance(tfm);
1028 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1029 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1030 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1031 struct crypto_aead *cipher;
1032
1033 cipher = crypto_spawn_aead(spawn);
1034 if (IS_ERR(cipher))
1035 return PTR_ERR(cipher);
1036
1037 ctx->child = cipher;
1038 crypto_aead_set_reqsize(
1039 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1040 crypto_aead_reqsize(cipher)));
1041 return 0;
1042}
1043
1044static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1045{
1046 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1047 crypto_free_aead(ctx->child);
1048}
1049
1050static int cryptd_create_aead(struct crypto_template *tmpl,
1051 struct rtattr **tb,
1052 struct cryptd_queue *queue)
1053{
1054 struct aead_instance_ctx *ctx;
1055 struct aead_instance *inst;
1056 struct aead_alg *alg;
1057 const char *name;
1058 u32 type = 0;
1059 u32 mask = CRYPTO_ALG_ASYNC;
1060 int err;
1061
1062 cryptd_check_internal(tb, &type, &mask);
1063
1064 name = crypto_attr_alg_name(tb[1]);
1065 if (IS_ERR(name))
1066 return PTR_ERR(name);
1067
1068 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1069 if (!inst)
1070 return -ENOMEM;
1071
1072 ctx = aead_instance_ctx(inst);
1073 ctx->queue = queue;
1074
1075 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1076 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1077 if (err)
1078 goto out_free_inst;
1079
1080 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1081 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1082 if (err)
1083 goto out_drop_aead;
1084
1085 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1086 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1087 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1088
1089 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1090 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1091
1092 inst->alg.init = cryptd_aead_init_tfm;
1093 inst->alg.exit = cryptd_aead_exit_tfm;
1094 inst->alg.setkey = cryptd_aead_setkey;
1095 inst->alg.setauthsize = cryptd_aead_setauthsize;
1096 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1097 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1098
1099 err = aead_register_instance(tmpl, inst);
1100 if (err) {
1101out_drop_aead:
1102 crypto_drop_aead(&ctx->aead_spawn);
1103out_free_inst:
1104 kfree(inst);
1105 }
1106 return err;
1107}
1108
1109static struct cryptd_queue queue;
1110
1111static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1112{
1113 struct crypto_attr_type *algt;
1114
1115 algt = crypto_get_attr_type(tb);
1116 if (IS_ERR(algt))
1117 return PTR_ERR(algt);
1118
1119 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1120 case CRYPTO_ALG_TYPE_BLKCIPHER:
1121 if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1122 CRYPTO_ALG_TYPE_BLKCIPHER)
1123 return cryptd_create_blkcipher(tmpl, tb, &queue);
1124
1125 return cryptd_create_skcipher(tmpl, tb, &queue);
1126 case CRYPTO_ALG_TYPE_DIGEST:
1127 return cryptd_create_hash(tmpl, tb, &queue);
1128 case CRYPTO_ALG_TYPE_AEAD:
1129 return cryptd_create_aead(tmpl, tb, &queue);
1130 }
1131
1132 return -EINVAL;
1133}
1134
1135static void cryptd_free(struct crypto_instance *inst)
1136{
1137 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1138 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1139 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1140
1141 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1142 case CRYPTO_ALG_TYPE_AHASH:
1143 crypto_drop_shash(&hctx->spawn);
1144 kfree(ahash_instance(inst));
1145 return;
1146 case CRYPTO_ALG_TYPE_AEAD:
1147 crypto_drop_aead(&aead_ctx->aead_spawn);
1148 kfree(aead_instance(inst));
1149 return;
1150 default:
1151 crypto_drop_spawn(&ctx->spawn);
1152 kfree(inst);
1153 }
1154}
1155
1156static struct crypto_template cryptd_tmpl = {
1157 .name = "cryptd",
1158 .create = cryptd_create,
1159 .free = cryptd_free,
1160 .module = THIS_MODULE,
1161};
1162
1163struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1164 u32 type, u32 mask)
1165{
1166 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1167 struct cryptd_blkcipher_ctx *ctx;
1168 struct crypto_tfm *tfm;
1169
1170 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1171 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1172 return ERR_PTR(-EINVAL);
1173 type = crypto_skcipher_type(type);
1174 mask &= ~CRYPTO_ALG_TYPE_MASK;
1175 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1176 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1177 if (IS_ERR(tfm))
1178 return ERR_CAST(tfm);
1179 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1180 crypto_free_tfm(tfm);
1181 return ERR_PTR(-EINVAL);
1182 }
1183
1184 ctx = crypto_tfm_ctx(tfm);
1185 atomic_set(&ctx->refcnt, 1);
1186
1187 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1188}
1189EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1190
1191struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1192{
1193 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1194 return ctx->child;
1195}
1196EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1197
1198bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1199{
1200 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1201
1202 return atomic_read(&ctx->refcnt) - 1;
1203}
1204EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1205
1206void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1207{
1208 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1209
1210 if (atomic_dec_and_test(&ctx->refcnt))
1211 crypto_free_ablkcipher(&tfm->base);
1212}
1213EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1214
1215struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1216 u32 type, u32 mask)
1217{
1218 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1219 struct cryptd_skcipher_ctx *ctx;
1220 struct crypto_skcipher *tfm;
1221
1222 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1223 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1224 return ERR_PTR(-EINVAL);
1225
1226 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1227 if (IS_ERR(tfm))
1228 return ERR_CAST(tfm);
1229
1230 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1231 crypto_free_skcipher(tfm);
1232 return ERR_PTR(-EINVAL);
1233 }
1234
1235 ctx = crypto_skcipher_ctx(tfm);
1236 atomic_set(&ctx->refcnt, 1);
1237
1238 return container_of(tfm, struct cryptd_skcipher, base);
1239}
1240EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1241
1242struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1243{
1244 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1245
1246 return ctx->child;
1247}
1248EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1249
1250bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1251{
1252 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1253
1254 return atomic_read(&ctx->refcnt) - 1;
1255}
1256EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1257
1258void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1259{
1260 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1261
1262 if (atomic_dec_and_test(&ctx->refcnt))
1263 crypto_free_skcipher(&tfm->base);
1264}
1265EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1266
1267struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1268 u32 type, u32 mask)
1269{
1270 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1271 struct cryptd_hash_ctx *ctx;
1272 struct crypto_ahash *tfm;
1273
1274 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1275 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1276 return ERR_PTR(-EINVAL);
1277 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1278 if (IS_ERR(tfm))
1279 return ERR_CAST(tfm);
1280 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1281 crypto_free_ahash(tfm);
1282 return ERR_PTR(-EINVAL);
1283 }
1284
1285 ctx = crypto_ahash_ctx(tfm);
1286 atomic_set(&ctx->refcnt, 1);
1287
1288 return __cryptd_ahash_cast(tfm);
1289}
1290EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1291
1292struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1293{
1294 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1295
1296 return ctx->child;
1297}
1298EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1299
1300struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1301{
1302 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1303 return &rctx->desc;
1304}
1305EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1306
1307bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1308{
1309 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1310
1311 return atomic_read(&ctx->refcnt) - 1;
1312}
1313EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1314
1315void cryptd_free_ahash(struct cryptd_ahash *tfm)
1316{
1317 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1318
1319 if (atomic_dec_and_test(&ctx->refcnt))
1320 crypto_free_ahash(&tfm->base);
1321}
1322EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1323
1324struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1325 u32 type, u32 mask)
1326{
1327 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1328 struct cryptd_aead_ctx *ctx;
1329 struct crypto_aead *tfm;
1330
1331 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1332 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1333 return ERR_PTR(-EINVAL);
1334 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1335 if (IS_ERR(tfm))
1336 return ERR_CAST(tfm);
1337 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1338 crypto_free_aead(tfm);
1339 return ERR_PTR(-EINVAL);
1340 }
1341
1342 ctx = crypto_aead_ctx(tfm);
1343 atomic_set(&ctx->refcnt, 1);
1344
1345 return __cryptd_aead_cast(tfm);
1346}
1347EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1348
1349struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1350{
1351 struct cryptd_aead_ctx *ctx;
1352 ctx = crypto_aead_ctx(&tfm->base);
1353 return ctx->child;
1354}
1355EXPORT_SYMBOL_GPL(cryptd_aead_child);
1356
1357bool cryptd_aead_queued(struct cryptd_aead *tfm)
1358{
1359 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1360
1361 return atomic_read(&ctx->refcnt) - 1;
1362}
1363EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1364
1365void cryptd_free_aead(struct cryptd_aead *tfm)
1366{
1367 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1368
1369 if (atomic_dec_and_test(&ctx->refcnt))
1370 crypto_free_aead(&tfm->base);
1371}
1372EXPORT_SYMBOL_GPL(cryptd_free_aead);
1373
1374static int __init cryptd_init(void)
1375{
1376 int err;
1377
1378 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1379 if (err)
1380 return err;
1381
1382 err = crypto_register_template(&cryptd_tmpl);
1383 if (err)
1384 cryptd_fini_queue(&queue);
1385
1386 return err;
1387}
1388
1389static void __exit cryptd_exit(void)
1390{
1391 cryptd_fini_queue(&queue);
1392 crypto_unregister_template(&cryptd_tmpl);
1393}
1394
1395subsys_initcall(cryptd_init);
1396module_exit(cryptd_exit);
1397
1398MODULE_LICENSE("GPL");
1399MODULE_DESCRIPTION("Software async crypto daemon");
1400MODULE_ALIAS_CRYPTO("cryptd");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 /*
43 * Protected by disabling BH to allow enqueueing from softinterrupt and
44 * dequeuing from kworker (cryptd_queue_worker()).
45 */
46 struct cryptd_cpu_queue __percpu *cpu_queue;
47};
48
49struct cryptd_instance_ctx {
50 struct crypto_spawn spawn;
51 struct cryptd_queue *queue;
52};
53
54struct skcipherd_instance_ctx {
55 struct crypto_skcipher_spawn spawn;
56 struct cryptd_queue *queue;
57};
58
59struct hashd_instance_ctx {
60 struct crypto_shash_spawn spawn;
61 struct cryptd_queue *queue;
62};
63
64struct aead_instance_ctx {
65 struct crypto_aead_spawn aead_spawn;
66 struct cryptd_queue *queue;
67};
68
69struct cryptd_skcipher_ctx {
70 refcount_t refcnt;
71 struct crypto_skcipher *child;
72};
73
74struct cryptd_skcipher_request_ctx {
75 struct skcipher_request req;
76};
77
78struct cryptd_hash_ctx {
79 refcount_t refcnt;
80 struct crypto_shash *child;
81};
82
83struct cryptd_hash_request_ctx {
84 crypto_completion_t complete;
85 void *data;
86 struct shash_desc desc;
87};
88
89struct cryptd_aead_ctx {
90 refcount_t refcnt;
91 struct crypto_aead *child;
92};
93
94struct cryptd_aead_request_ctx {
95 struct aead_request req;
96};
97
98static void cryptd_queue_worker(struct work_struct *work);
99
100static int cryptd_init_queue(struct cryptd_queue *queue,
101 unsigned int max_cpu_qlen)
102{
103 int cpu;
104 struct cryptd_cpu_queue *cpu_queue;
105
106 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
107 if (!queue->cpu_queue)
108 return -ENOMEM;
109 for_each_possible_cpu(cpu) {
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
112 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
113 }
114 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
115 return 0;
116}
117
118static void cryptd_fini_queue(struct cryptd_queue *queue)
119{
120 int cpu;
121 struct cryptd_cpu_queue *cpu_queue;
122
123 for_each_possible_cpu(cpu) {
124 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
125 BUG_ON(cpu_queue->queue.qlen);
126 }
127 free_percpu(queue->cpu_queue);
128}
129
130static int cryptd_enqueue_request(struct cryptd_queue *queue,
131 struct crypto_async_request *request)
132{
133 int err;
134 struct cryptd_cpu_queue *cpu_queue;
135 refcount_t *refcnt;
136
137 local_bh_disable();
138 cpu_queue = this_cpu_ptr(queue->cpu_queue);
139 err = crypto_enqueue_request(&cpu_queue->queue, request);
140
141 refcnt = crypto_tfm_ctx(request->tfm);
142
143 if (err == -ENOSPC)
144 goto out;
145
146 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
147
148 if (!refcount_read(refcnt))
149 goto out;
150
151 refcount_inc(refcnt);
152
153out:
154 local_bh_enable();
155
156 return err;
157}
158
159/* Called in workqueue context, do one real cryption work (via
160 * req->complete) and reschedule itself if there are more work to
161 * do. */
162static void cryptd_queue_worker(struct work_struct *work)
163{
164 struct cryptd_cpu_queue *cpu_queue;
165 struct crypto_async_request *req, *backlog;
166
167 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
168 /*
169 * Only handle one request at a time to avoid hogging crypto workqueue.
170 */
171 local_bh_disable();
172 backlog = crypto_get_backlog(&cpu_queue->queue);
173 req = crypto_dequeue_request(&cpu_queue->queue);
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 crypto_request_complete(backlog, -EINPROGRESS);
181 crypto_request_complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_skcipher *child = ctx->child;
232
233 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_skcipher_setkey(child, key, keylen);
238}
239
240static struct skcipher_request *cryptd_skcipher_prepare(
241 struct skcipher_request *req, int err)
242{
243 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
244 struct skcipher_request *subreq = &rctx->req;
245 struct cryptd_skcipher_ctx *ctx;
246 struct crypto_skcipher *child;
247
248 req->base.complete = subreq->base.complete;
249 req->base.data = subreq->base.data;
250
251 if (unlikely(err == -EINPROGRESS))
252 return NULL;
253
254 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
255 child = ctx->child;
256
257 skcipher_request_set_tfm(subreq, child);
258 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
259 NULL, NULL);
260 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
261 req->iv);
262
263 return subreq;
264}
265
266static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
267 crypto_completion_t complete)
268{
269 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
270 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
271 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
272 struct skcipher_request *subreq = &rctx->req;
273 int refcnt = refcount_read(&ctx->refcnt);
274
275 local_bh_disable();
276 skcipher_request_complete(req, err);
277 local_bh_enable();
278
279 if (unlikely(err == -EINPROGRESS)) {
280 subreq->base.complete = req->base.complete;
281 subreq->base.data = req->base.data;
282 req->base.complete = complete;
283 req->base.data = req;
284 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
285 crypto_free_skcipher(tfm);
286}
287
288static void cryptd_skcipher_encrypt(void *data, int err)
289{
290 struct skcipher_request *req = data;
291 struct skcipher_request *subreq;
292
293 subreq = cryptd_skcipher_prepare(req, err);
294 if (likely(subreq))
295 err = crypto_skcipher_encrypt(subreq);
296
297 cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
298}
299
300static void cryptd_skcipher_decrypt(void *data, int err)
301{
302 struct skcipher_request *req = data;
303 struct skcipher_request *subreq;
304
305 subreq = cryptd_skcipher_prepare(req, err);
306 if (likely(subreq))
307 err = crypto_skcipher_decrypt(subreq);
308
309 cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
310}
311
312static int cryptd_skcipher_enqueue(struct skcipher_request *req,
313 crypto_completion_t compl)
314{
315 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
316 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
317 struct skcipher_request *subreq = &rctx->req;
318 struct cryptd_queue *queue;
319
320 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
321 subreq->base.complete = req->base.complete;
322 subreq->base.data = req->base.data;
323 req->base.complete = compl;
324 req->base.data = req;
325
326 return cryptd_enqueue_request(queue, &req->base);
327}
328
329static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
330{
331 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
332}
333
334static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
335{
336 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
337}
338
339static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
340{
341 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
342 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
343 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
344 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
345 struct crypto_skcipher *cipher;
346
347 cipher = crypto_spawn_skcipher(spawn);
348 if (IS_ERR(cipher))
349 return PTR_ERR(cipher);
350
351 ctx->child = cipher;
352 crypto_skcipher_set_reqsize(
353 tfm, sizeof(struct cryptd_skcipher_request_ctx) +
354 crypto_skcipher_reqsize(cipher));
355 return 0;
356}
357
358static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
359{
360 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
361
362 crypto_free_skcipher(ctx->child);
363}
364
365static void cryptd_skcipher_free(struct skcipher_instance *inst)
366{
367 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
368
369 crypto_drop_skcipher(&ctx->spawn);
370 kfree(inst);
371}
372
373static int cryptd_create_skcipher(struct crypto_template *tmpl,
374 struct rtattr **tb,
375 struct crypto_attr_type *algt,
376 struct cryptd_queue *queue)
377{
378 struct skcipherd_instance_ctx *ctx;
379 struct skcipher_instance *inst;
380 struct skcipher_alg_common *alg;
381 u32 type;
382 u32 mask;
383 int err;
384
385 cryptd_type_and_mask(algt, &type, &mask);
386
387 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
388 if (!inst)
389 return -ENOMEM;
390
391 ctx = skcipher_instance_ctx(inst);
392 ctx->queue = queue;
393
394 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
395 crypto_attr_alg_name(tb[1]), type, mask);
396 if (err)
397 goto err_free_inst;
398
399 alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
400 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
401 if (err)
402 goto err_free_inst;
403
404 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
405 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
406 inst->alg.ivsize = alg->ivsize;
407 inst->alg.chunksize = alg->chunksize;
408 inst->alg.min_keysize = alg->min_keysize;
409 inst->alg.max_keysize = alg->max_keysize;
410
411 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
412
413 inst->alg.init = cryptd_skcipher_init_tfm;
414 inst->alg.exit = cryptd_skcipher_exit_tfm;
415
416 inst->alg.setkey = cryptd_skcipher_setkey;
417 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
418 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
419
420 inst->free = cryptd_skcipher_free;
421
422 err = skcipher_register_instance(tmpl, inst);
423 if (err) {
424err_free_inst:
425 cryptd_skcipher_free(inst);
426 }
427 return err;
428}
429
430static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
431{
432 struct ahash_instance *inst = ahash_alg_instance(tfm);
433 struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
434 struct crypto_shash_spawn *spawn = &ictx->spawn;
435 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
436 struct crypto_shash *hash;
437
438 hash = crypto_spawn_shash(spawn);
439 if (IS_ERR(hash))
440 return PTR_ERR(hash);
441
442 ctx->child = hash;
443 crypto_ahash_set_reqsize(tfm,
444 sizeof(struct cryptd_hash_request_ctx) +
445 crypto_shash_descsize(hash));
446 return 0;
447}
448
449static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
450 struct crypto_ahash *tfm)
451{
452 struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
453 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
454 struct crypto_shash *hash;
455
456 hash = crypto_clone_shash(ctx->child);
457 if (IS_ERR(hash))
458 return PTR_ERR(hash);
459
460 nctx->child = hash;
461 return 0;
462}
463
464static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
465{
466 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
467
468 crypto_free_shash(ctx->child);
469}
470
471static int cryptd_hash_setkey(struct crypto_ahash *parent,
472 const u8 *key, unsigned int keylen)
473{
474 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
475 struct crypto_shash *child = ctx->child;
476
477 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
478 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
479 CRYPTO_TFM_REQ_MASK);
480 return crypto_shash_setkey(child, key, keylen);
481}
482
483static int cryptd_hash_enqueue(struct ahash_request *req,
484 crypto_completion_t compl)
485{
486 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
487 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
488 struct cryptd_queue *queue =
489 cryptd_get_queue(crypto_ahash_tfm(tfm));
490
491 rctx->complete = req->base.complete;
492 rctx->data = req->base.data;
493 req->base.complete = compl;
494 req->base.data = req;
495
496 return cryptd_enqueue_request(queue, &req->base);
497}
498
499static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
500 int err)
501{
502 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
503
504 req->base.complete = rctx->complete;
505 req->base.data = rctx->data;
506
507 if (unlikely(err == -EINPROGRESS))
508 return NULL;
509
510 return &rctx->desc;
511}
512
513static void cryptd_hash_complete(struct ahash_request *req, int err,
514 crypto_completion_t complete)
515{
516 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
517 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
518 int refcnt = refcount_read(&ctx->refcnt);
519
520 local_bh_disable();
521 ahash_request_complete(req, err);
522 local_bh_enable();
523
524 if (err == -EINPROGRESS) {
525 req->base.complete = complete;
526 req->base.data = req;
527 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
528 crypto_free_ahash(tfm);
529}
530
531static void cryptd_hash_init(void *data, int err)
532{
533 struct ahash_request *req = data;
534 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
535 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
536 struct crypto_shash *child = ctx->child;
537 struct shash_desc *desc;
538
539 desc = cryptd_hash_prepare(req, err);
540 if (unlikely(!desc))
541 goto out;
542
543 desc->tfm = child;
544
545 err = crypto_shash_init(desc);
546
547out:
548 cryptd_hash_complete(req, err, cryptd_hash_init);
549}
550
551static int cryptd_hash_init_enqueue(struct ahash_request *req)
552{
553 return cryptd_hash_enqueue(req, cryptd_hash_init);
554}
555
556static void cryptd_hash_update(void *data, int err)
557{
558 struct ahash_request *req = data;
559 struct shash_desc *desc;
560
561 desc = cryptd_hash_prepare(req, err);
562 if (likely(desc))
563 err = shash_ahash_update(req, desc);
564
565 cryptd_hash_complete(req, err, cryptd_hash_update);
566}
567
568static int cryptd_hash_update_enqueue(struct ahash_request *req)
569{
570 return cryptd_hash_enqueue(req, cryptd_hash_update);
571}
572
573static void cryptd_hash_final(void *data, int err)
574{
575 struct ahash_request *req = data;
576 struct shash_desc *desc;
577
578 desc = cryptd_hash_prepare(req, err);
579 if (likely(desc))
580 err = crypto_shash_final(desc, req->result);
581
582 cryptd_hash_complete(req, err, cryptd_hash_final);
583}
584
585static int cryptd_hash_final_enqueue(struct ahash_request *req)
586{
587 return cryptd_hash_enqueue(req, cryptd_hash_final);
588}
589
590static void cryptd_hash_finup(void *data, int err)
591{
592 struct ahash_request *req = data;
593 struct shash_desc *desc;
594
595 desc = cryptd_hash_prepare(req, err);
596 if (likely(desc))
597 err = shash_ahash_finup(req, desc);
598
599 cryptd_hash_complete(req, err, cryptd_hash_finup);
600}
601
602static int cryptd_hash_finup_enqueue(struct ahash_request *req)
603{
604 return cryptd_hash_enqueue(req, cryptd_hash_finup);
605}
606
607static void cryptd_hash_digest(void *data, int err)
608{
609 struct ahash_request *req = data;
610 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
611 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
612 struct crypto_shash *child = ctx->child;
613 struct shash_desc *desc;
614
615 desc = cryptd_hash_prepare(req, err);
616 if (unlikely(!desc))
617 goto out;
618
619 desc->tfm = child;
620
621 err = shash_ahash_digest(req, desc);
622
623out:
624 cryptd_hash_complete(req, err, cryptd_hash_digest);
625}
626
627static int cryptd_hash_digest_enqueue(struct ahash_request *req)
628{
629 return cryptd_hash_enqueue(req, cryptd_hash_digest);
630}
631
632static int cryptd_hash_export(struct ahash_request *req, void *out)
633{
634 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
635
636 return crypto_shash_export(&rctx->desc, out);
637}
638
639static int cryptd_hash_import(struct ahash_request *req, const void *in)
640{
641 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
642 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
643 struct shash_desc *desc = cryptd_shash_desc(req);
644
645 desc->tfm = ctx->child;
646
647 return crypto_shash_import(desc, in);
648}
649
650static void cryptd_hash_free(struct ahash_instance *inst)
651{
652 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
653
654 crypto_drop_shash(&ctx->spawn);
655 kfree(inst);
656}
657
658static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
659 struct crypto_attr_type *algt,
660 struct cryptd_queue *queue)
661{
662 struct hashd_instance_ctx *ctx;
663 struct ahash_instance *inst;
664 struct shash_alg *alg;
665 u32 type;
666 u32 mask;
667 int err;
668
669 cryptd_type_and_mask(algt, &type, &mask);
670
671 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
672 if (!inst)
673 return -ENOMEM;
674
675 ctx = ahash_instance_ctx(inst);
676 ctx->queue = queue;
677
678 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
679 crypto_attr_alg_name(tb[1]), type, mask);
680 if (err)
681 goto err_free_inst;
682 alg = crypto_spawn_shash_alg(&ctx->spawn);
683
684 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
685 if (err)
686 goto err_free_inst;
687
688 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
689 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
690 CRYPTO_ALG_OPTIONAL_KEY));
691 inst->alg.halg.digestsize = alg->digestsize;
692 inst->alg.halg.statesize = alg->statesize;
693 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
694
695 inst->alg.init_tfm = cryptd_hash_init_tfm;
696 inst->alg.clone_tfm = cryptd_hash_clone_tfm;
697 inst->alg.exit_tfm = cryptd_hash_exit_tfm;
698
699 inst->alg.init = cryptd_hash_init_enqueue;
700 inst->alg.update = cryptd_hash_update_enqueue;
701 inst->alg.final = cryptd_hash_final_enqueue;
702 inst->alg.finup = cryptd_hash_finup_enqueue;
703 inst->alg.export = cryptd_hash_export;
704 inst->alg.import = cryptd_hash_import;
705 if (crypto_shash_alg_has_setkey(alg))
706 inst->alg.setkey = cryptd_hash_setkey;
707 inst->alg.digest = cryptd_hash_digest_enqueue;
708
709 inst->free = cryptd_hash_free;
710
711 err = ahash_register_instance(tmpl, inst);
712 if (err) {
713err_free_inst:
714 cryptd_hash_free(inst);
715 }
716 return err;
717}
718
719static int cryptd_aead_setkey(struct crypto_aead *parent,
720 const u8 *key, unsigned int keylen)
721{
722 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
723 struct crypto_aead *child = ctx->child;
724
725 return crypto_aead_setkey(child, key, keylen);
726}
727
728static int cryptd_aead_setauthsize(struct crypto_aead *parent,
729 unsigned int authsize)
730{
731 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
732 struct crypto_aead *child = ctx->child;
733
734 return crypto_aead_setauthsize(child, authsize);
735}
736
737static void cryptd_aead_crypt(struct aead_request *req,
738 struct crypto_aead *child, int err,
739 int (*crypt)(struct aead_request *req),
740 crypto_completion_t compl)
741{
742 struct cryptd_aead_request_ctx *rctx;
743 struct aead_request *subreq;
744 struct cryptd_aead_ctx *ctx;
745 struct crypto_aead *tfm;
746 int refcnt;
747
748 rctx = aead_request_ctx(req);
749 subreq = &rctx->req;
750 req->base.complete = subreq->base.complete;
751 req->base.data = subreq->base.data;
752
753 tfm = crypto_aead_reqtfm(req);
754
755 if (unlikely(err == -EINPROGRESS))
756 goto out;
757
758 aead_request_set_tfm(subreq, child);
759 aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
760 NULL, NULL);
761 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
762 req->iv);
763 aead_request_set_ad(subreq, req->assoclen);
764
765 err = crypt(subreq);
766
767out:
768 ctx = crypto_aead_ctx(tfm);
769 refcnt = refcount_read(&ctx->refcnt);
770
771 local_bh_disable();
772 aead_request_complete(req, err);
773 local_bh_enable();
774
775 if (err == -EINPROGRESS) {
776 subreq->base.complete = req->base.complete;
777 subreq->base.data = req->base.data;
778 req->base.complete = compl;
779 req->base.data = req;
780 } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
781 crypto_free_aead(tfm);
782}
783
784static void cryptd_aead_encrypt(void *data, int err)
785{
786 struct aead_request *req = data;
787 struct cryptd_aead_ctx *ctx;
788 struct crypto_aead *child;
789
790 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
791 child = ctx->child;
792 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
793 cryptd_aead_encrypt);
794}
795
796static void cryptd_aead_decrypt(void *data, int err)
797{
798 struct aead_request *req = data;
799 struct cryptd_aead_ctx *ctx;
800 struct crypto_aead *child;
801
802 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
803 child = ctx->child;
804 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
805 cryptd_aead_decrypt);
806}
807
808static int cryptd_aead_enqueue(struct aead_request *req,
809 crypto_completion_t compl)
810{
811 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
812 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
813 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
814 struct aead_request *subreq = &rctx->req;
815
816 subreq->base.complete = req->base.complete;
817 subreq->base.data = req->base.data;
818 req->base.complete = compl;
819 req->base.data = req;
820 return cryptd_enqueue_request(queue, &req->base);
821}
822
823static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
824{
825 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
826}
827
828static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
829{
830 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
831}
832
833static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
834{
835 struct aead_instance *inst = aead_alg_instance(tfm);
836 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
837 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
838 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
839 struct crypto_aead *cipher;
840
841 cipher = crypto_spawn_aead(spawn);
842 if (IS_ERR(cipher))
843 return PTR_ERR(cipher);
844
845 ctx->child = cipher;
846 crypto_aead_set_reqsize(
847 tfm, sizeof(struct cryptd_aead_request_ctx) +
848 crypto_aead_reqsize(cipher));
849 return 0;
850}
851
852static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
853{
854 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
855 crypto_free_aead(ctx->child);
856}
857
858static void cryptd_aead_free(struct aead_instance *inst)
859{
860 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
861
862 crypto_drop_aead(&ctx->aead_spawn);
863 kfree(inst);
864}
865
866static int cryptd_create_aead(struct crypto_template *tmpl,
867 struct rtattr **tb,
868 struct crypto_attr_type *algt,
869 struct cryptd_queue *queue)
870{
871 struct aead_instance_ctx *ctx;
872 struct aead_instance *inst;
873 struct aead_alg *alg;
874 u32 type;
875 u32 mask;
876 int err;
877
878 cryptd_type_and_mask(algt, &type, &mask);
879
880 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
881 if (!inst)
882 return -ENOMEM;
883
884 ctx = aead_instance_ctx(inst);
885 ctx->queue = queue;
886
887 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
888 crypto_attr_alg_name(tb[1]), type, mask);
889 if (err)
890 goto err_free_inst;
891
892 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
893 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
894 if (err)
895 goto err_free_inst;
896
897 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
898 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
899 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
900
901 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
902 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
903
904 inst->alg.init = cryptd_aead_init_tfm;
905 inst->alg.exit = cryptd_aead_exit_tfm;
906 inst->alg.setkey = cryptd_aead_setkey;
907 inst->alg.setauthsize = cryptd_aead_setauthsize;
908 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
909 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
910
911 inst->free = cryptd_aead_free;
912
913 err = aead_register_instance(tmpl, inst);
914 if (err) {
915err_free_inst:
916 cryptd_aead_free(inst);
917 }
918 return err;
919}
920
921static struct cryptd_queue queue;
922
923static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
924{
925 struct crypto_attr_type *algt;
926
927 algt = crypto_get_attr_type(tb);
928 if (IS_ERR(algt))
929 return PTR_ERR(algt);
930
931 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
932 case CRYPTO_ALG_TYPE_LSKCIPHER:
933 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
934 case CRYPTO_ALG_TYPE_HASH:
935 return cryptd_create_hash(tmpl, tb, algt, &queue);
936 case CRYPTO_ALG_TYPE_AEAD:
937 return cryptd_create_aead(tmpl, tb, algt, &queue);
938 }
939
940 return -EINVAL;
941}
942
943static struct crypto_template cryptd_tmpl = {
944 .name = "cryptd",
945 .create = cryptd_create,
946 .module = THIS_MODULE,
947};
948
949struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
950 u32 type, u32 mask)
951{
952 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
953 struct cryptd_skcipher_ctx *ctx;
954 struct crypto_skcipher *tfm;
955
956 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
957 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
958 return ERR_PTR(-EINVAL);
959
960 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
961 if (IS_ERR(tfm))
962 return ERR_CAST(tfm);
963
964 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
965 crypto_free_skcipher(tfm);
966 return ERR_PTR(-EINVAL);
967 }
968
969 ctx = crypto_skcipher_ctx(tfm);
970 refcount_set(&ctx->refcnt, 1);
971
972 return container_of(tfm, struct cryptd_skcipher, base);
973}
974EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
975
976struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
977{
978 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
979
980 return ctx->child;
981}
982EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
983
984bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
985{
986 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
987
988 return refcount_read(&ctx->refcnt) - 1;
989}
990EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
991
992void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
993{
994 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
995
996 if (refcount_dec_and_test(&ctx->refcnt))
997 crypto_free_skcipher(&tfm->base);
998}
999EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1000
1001struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1002 u32 type, u32 mask)
1003{
1004 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1005 struct cryptd_hash_ctx *ctx;
1006 struct crypto_ahash *tfm;
1007
1008 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1009 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1010 return ERR_PTR(-EINVAL);
1011 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1012 if (IS_ERR(tfm))
1013 return ERR_CAST(tfm);
1014 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1015 crypto_free_ahash(tfm);
1016 return ERR_PTR(-EINVAL);
1017 }
1018
1019 ctx = crypto_ahash_ctx(tfm);
1020 refcount_set(&ctx->refcnt, 1);
1021
1022 return __cryptd_ahash_cast(tfm);
1023}
1024EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1025
1026struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1027{
1028 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1029
1030 return ctx->child;
1031}
1032EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1033
1034struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1035{
1036 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1037 return &rctx->desc;
1038}
1039EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1040
1041bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1042{
1043 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1044
1045 return refcount_read(&ctx->refcnt) - 1;
1046}
1047EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1048
1049void cryptd_free_ahash(struct cryptd_ahash *tfm)
1050{
1051 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1052
1053 if (refcount_dec_and_test(&ctx->refcnt))
1054 crypto_free_ahash(&tfm->base);
1055}
1056EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1057
1058struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1059 u32 type, u32 mask)
1060{
1061 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1062 struct cryptd_aead_ctx *ctx;
1063 struct crypto_aead *tfm;
1064
1065 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1066 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1067 return ERR_PTR(-EINVAL);
1068 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1069 if (IS_ERR(tfm))
1070 return ERR_CAST(tfm);
1071 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1072 crypto_free_aead(tfm);
1073 return ERR_PTR(-EINVAL);
1074 }
1075
1076 ctx = crypto_aead_ctx(tfm);
1077 refcount_set(&ctx->refcnt, 1);
1078
1079 return __cryptd_aead_cast(tfm);
1080}
1081EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1082
1083struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1084{
1085 struct cryptd_aead_ctx *ctx;
1086 ctx = crypto_aead_ctx(&tfm->base);
1087 return ctx->child;
1088}
1089EXPORT_SYMBOL_GPL(cryptd_aead_child);
1090
1091bool cryptd_aead_queued(struct cryptd_aead *tfm)
1092{
1093 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1094
1095 return refcount_read(&ctx->refcnt) - 1;
1096}
1097EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1098
1099void cryptd_free_aead(struct cryptd_aead *tfm)
1100{
1101 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1102
1103 if (refcount_dec_and_test(&ctx->refcnt))
1104 crypto_free_aead(&tfm->base);
1105}
1106EXPORT_SYMBOL_GPL(cryptd_free_aead);
1107
1108static int __init cryptd_init(void)
1109{
1110 int err;
1111
1112 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1113 1);
1114 if (!cryptd_wq)
1115 return -ENOMEM;
1116
1117 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1118 if (err)
1119 goto err_destroy_wq;
1120
1121 err = crypto_register_template(&cryptd_tmpl);
1122 if (err)
1123 goto err_fini_queue;
1124
1125 return 0;
1126
1127err_fini_queue:
1128 cryptd_fini_queue(&queue);
1129err_destroy_wq:
1130 destroy_workqueue(cryptd_wq);
1131 return err;
1132}
1133
1134static void __exit cryptd_exit(void)
1135{
1136 destroy_workqueue(cryptd_wq);
1137 cryptd_fini_queue(&queue);
1138 crypto_unregister_template(&cryptd_tmpl);
1139}
1140
1141subsys_initcall(cryptd_init);
1142module_exit(cryptd_exit);
1143
1144MODULE_LICENSE("GPL");
1145MODULE_DESCRIPTION("Software async crypto daemon");
1146MODULE_ALIAS_CRYPTO("cryptd");