Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15#include <crypto/internal/hash.h>
16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h>
18#include <crypto/cryptd.h>
19#include <linux/refcount.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29
30static unsigned int cryptd_max_cpu_qlen = 1000;
31module_param(cryptd_max_cpu_qlen, uint, 0);
32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34static struct workqueue_struct *cryptd_wq;
35
36struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39};
40
41struct cryptd_queue {
42 struct cryptd_cpu_queue __percpu *cpu_queue;
43};
44
45struct cryptd_instance_ctx {
46 struct crypto_spawn spawn;
47 struct cryptd_queue *queue;
48};
49
50struct skcipherd_instance_ctx {
51 struct crypto_skcipher_spawn spawn;
52 struct cryptd_queue *queue;
53};
54
55struct hashd_instance_ctx {
56 struct crypto_shash_spawn spawn;
57 struct cryptd_queue *queue;
58};
59
60struct aead_instance_ctx {
61 struct crypto_aead_spawn aead_spawn;
62 struct cryptd_queue *queue;
63};
64
65struct cryptd_skcipher_ctx {
66 refcount_t refcnt;
67 struct crypto_sync_skcipher *child;
68};
69
70struct cryptd_skcipher_request_ctx {
71 crypto_completion_t complete;
72};
73
74struct cryptd_hash_ctx {
75 refcount_t refcnt;
76 struct crypto_shash *child;
77};
78
79struct cryptd_hash_request_ctx {
80 crypto_completion_t complete;
81 struct shash_desc desc;
82};
83
84struct cryptd_aead_ctx {
85 refcount_t refcnt;
86 struct crypto_aead *child;
87};
88
89struct cryptd_aead_request_ctx {
90 crypto_completion_t complete;
91};
92
93static void cryptd_queue_worker(struct work_struct *work);
94
95static int cryptd_init_queue(struct cryptd_queue *queue,
96 unsigned int max_cpu_qlen)
97{
98 int cpu;
99 struct cryptd_cpu_queue *cpu_queue;
100
101 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
102 if (!queue->cpu_queue)
103 return -ENOMEM;
104 for_each_possible_cpu(cpu) {
105 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
106 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
107 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
108 }
109 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
110 return 0;
111}
112
113static void cryptd_fini_queue(struct cryptd_queue *queue)
114{
115 int cpu;
116 struct cryptd_cpu_queue *cpu_queue;
117
118 for_each_possible_cpu(cpu) {
119 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
120 BUG_ON(cpu_queue->queue.qlen);
121 }
122 free_percpu(queue->cpu_queue);
123}
124
125static int cryptd_enqueue_request(struct cryptd_queue *queue,
126 struct crypto_async_request *request)
127{
128 int cpu, err;
129 struct cryptd_cpu_queue *cpu_queue;
130 refcount_t *refcnt;
131
132 cpu = get_cpu();
133 cpu_queue = this_cpu_ptr(queue->cpu_queue);
134 err = crypto_enqueue_request(&cpu_queue->queue, request);
135
136 refcnt = crypto_tfm_ctx(request->tfm);
137
138 if (err == -ENOSPC)
139 goto out_put_cpu;
140
141 queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
142
143 if (!refcount_read(refcnt))
144 goto out_put_cpu;
145
146 refcount_inc(refcnt);
147
148out_put_cpu:
149 put_cpu();
150
151 return err;
152}
153
154/* Called in workqueue context, do one real cryption work (via
155 * req->complete) and reschedule itself if there are more work to
156 * do. */
157static void cryptd_queue_worker(struct work_struct *work)
158{
159 struct cryptd_cpu_queue *cpu_queue;
160 struct crypto_async_request *req, *backlog;
161
162 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
163 /*
164 * Only handle one request at a time to avoid hogging crypto workqueue.
165 * preempt_disable/enable is used to prevent being preempted by
166 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
167 * cryptd_enqueue_request() being accessed from software interrupts.
168 */
169 local_bh_disable();
170 preempt_disable();
171 backlog = crypto_get_backlog(&cpu_queue->queue);
172 req = crypto_dequeue_request(&cpu_queue->queue);
173 preempt_enable();
174 local_bh_enable();
175
176 if (!req)
177 return;
178
179 if (backlog)
180 backlog->complete(backlog, -EINPROGRESS);
181 req->complete(req, 0);
182
183 if (cpu_queue->queue.qlen)
184 queue_work(cryptd_wq, &cpu_queue->work);
185}
186
187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
188{
189 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
190 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
191 return ictx->queue;
192}
193
194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
195 u32 *type, u32 *mask)
196{
197 /*
198 * cryptd is allowed to wrap internal algorithms, but in that case the
199 * resulting cryptd instance will be marked as internal as well.
200 */
201 *type = algt->type & CRYPTO_ALG_INTERNAL;
202 *mask = algt->mask & CRYPTO_ALG_INTERNAL;
203
204 /* No point in cryptd wrapping an algorithm that's already async. */
205 *mask |= CRYPTO_ALG_ASYNC;
206
207 *mask |= crypto_algt_inherited_mask(algt);
208}
209
210static int cryptd_init_instance(struct crypto_instance *inst,
211 struct crypto_alg *alg)
212{
213 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
214 "cryptd(%s)",
215 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
216 return -ENAMETOOLONG;
217
218 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
219
220 inst->alg.cra_priority = alg->cra_priority + 50;
221 inst->alg.cra_blocksize = alg->cra_blocksize;
222 inst->alg.cra_alignmask = alg->cra_alignmask;
223
224 return 0;
225}
226
227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
228 const u8 *key, unsigned int keylen)
229{
230 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
231 struct crypto_sync_skcipher *child = ctx->child;
232
233 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
234 crypto_sync_skcipher_set_flags(child,
235 crypto_skcipher_get_flags(parent) &
236 CRYPTO_TFM_REQ_MASK);
237 return crypto_sync_skcipher_setkey(child, key, keylen);
238}
239
240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
241{
242 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
243 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
244 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
245 int refcnt = refcount_read(&ctx->refcnt);
246
247 local_bh_disable();
248 rctx->complete(&req->base, err);
249 local_bh_enable();
250
251 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
252 crypto_free_skcipher(tfm);
253}
254
255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
256 int err)
257{
258 struct skcipher_request *req = skcipher_request_cast(base);
259 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
260 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
261 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
262 struct crypto_sync_skcipher *child = ctx->child;
263 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
264
265 if (unlikely(err == -EINPROGRESS))
266 goto out;
267
268 skcipher_request_set_sync_tfm(subreq, child);
269 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
270 NULL, NULL);
271 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
272 req->iv);
273
274 err = crypto_skcipher_encrypt(subreq);
275 skcipher_request_zero(subreq);
276
277 req->base.complete = rctx->complete;
278
279out:
280 cryptd_skcipher_complete(req, err);
281}
282
283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
284 int err)
285{
286 struct skcipher_request *req = skcipher_request_cast(base);
287 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
288 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
289 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
290 struct crypto_sync_skcipher *child = ctx->child;
291 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
292
293 if (unlikely(err == -EINPROGRESS))
294 goto out;
295
296 skcipher_request_set_sync_tfm(subreq, child);
297 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
298 NULL, NULL);
299 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
300 req->iv);
301
302 err = crypto_skcipher_decrypt(subreq);
303 skcipher_request_zero(subreq);
304
305 req->base.complete = rctx->complete;
306
307out:
308 cryptd_skcipher_complete(req, err);
309}
310
311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
312 crypto_completion_t compl)
313{
314 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
315 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 struct cryptd_queue *queue;
317
318 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
319 rctx->complete = req->base.complete;
320 req->base.complete = compl;
321
322 return cryptd_enqueue_request(queue, &req->base);
323}
324
325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
326{
327 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
328}
329
330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
331{
332 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
333}
334
335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
336{
337 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
338 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
339 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
340 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
341 struct crypto_skcipher *cipher;
342
343 cipher = crypto_spawn_skcipher(spawn);
344 if (IS_ERR(cipher))
345 return PTR_ERR(cipher);
346
347 ctx->child = (struct crypto_sync_skcipher *)cipher;
348 crypto_skcipher_set_reqsize(
349 tfm, sizeof(struct cryptd_skcipher_request_ctx));
350 return 0;
351}
352
353static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
354{
355 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
356
357 crypto_free_sync_skcipher(ctx->child);
358}
359
360static void cryptd_skcipher_free(struct skcipher_instance *inst)
361{
362 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
363
364 crypto_drop_skcipher(&ctx->spawn);
365 kfree(inst);
366}
367
368static int cryptd_create_skcipher(struct crypto_template *tmpl,
369 struct rtattr **tb,
370 struct crypto_attr_type *algt,
371 struct cryptd_queue *queue)
372{
373 struct skcipherd_instance_ctx *ctx;
374 struct skcipher_instance *inst;
375 struct skcipher_alg *alg;
376 u32 type;
377 u32 mask;
378 int err;
379
380 cryptd_type_and_mask(algt, &type, &mask);
381
382 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
383 if (!inst)
384 return -ENOMEM;
385
386 ctx = skcipher_instance_ctx(inst);
387 ctx->queue = queue;
388
389 err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
390 crypto_attr_alg_name(tb[1]), type, mask);
391 if (err)
392 goto err_free_inst;
393
394 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
395 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
396 if (err)
397 goto err_free_inst;
398
399 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
400 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
401 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
402 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
403 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
404 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
405
406 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
407
408 inst->alg.init = cryptd_skcipher_init_tfm;
409 inst->alg.exit = cryptd_skcipher_exit_tfm;
410
411 inst->alg.setkey = cryptd_skcipher_setkey;
412 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
413 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
414
415 inst->free = cryptd_skcipher_free;
416
417 err = skcipher_register_instance(tmpl, inst);
418 if (err) {
419err_free_inst:
420 cryptd_skcipher_free(inst);
421 }
422 return err;
423}
424
425static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
426{
427 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
428 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
429 struct crypto_shash_spawn *spawn = &ictx->spawn;
430 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
431 struct crypto_shash *hash;
432
433 hash = crypto_spawn_shash(spawn);
434 if (IS_ERR(hash))
435 return PTR_ERR(hash);
436
437 ctx->child = hash;
438 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
439 sizeof(struct cryptd_hash_request_ctx) +
440 crypto_shash_descsize(hash));
441 return 0;
442}
443
444static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
445{
446 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
447
448 crypto_free_shash(ctx->child);
449}
450
451static int cryptd_hash_setkey(struct crypto_ahash *parent,
452 const u8 *key, unsigned int keylen)
453{
454 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
455 struct crypto_shash *child = ctx->child;
456
457 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
458 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
459 CRYPTO_TFM_REQ_MASK);
460 return crypto_shash_setkey(child, key, keylen);
461}
462
463static int cryptd_hash_enqueue(struct ahash_request *req,
464 crypto_completion_t compl)
465{
466 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
467 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
468 struct cryptd_queue *queue =
469 cryptd_get_queue(crypto_ahash_tfm(tfm));
470
471 rctx->complete = req->base.complete;
472 req->base.complete = compl;
473
474 return cryptd_enqueue_request(queue, &req->base);
475}
476
477static void cryptd_hash_complete(struct ahash_request *req, int err)
478{
479 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
480 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
481 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
482 int refcnt = refcount_read(&ctx->refcnt);
483
484 local_bh_disable();
485 rctx->complete(&req->base, err);
486 local_bh_enable();
487
488 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
489 crypto_free_ahash(tfm);
490}
491
492static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
493{
494 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
495 struct crypto_shash *child = ctx->child;
496 struct ahash_request *req = ahash_request_cast(req_async);
497 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
498 struct shash_desc *desc = &rctx->desc;
499
500 if (unlikely(err == -EINPROGRESS))
501 goto out;
502
503 desc->tfm = child;
504
505 err = crypto_shash_init(desc);
506
507 req->base.complete = rctx->complete;
508
509out:
510 cryptd_hash_complete(req, err);
511}
512
513static int cryptd_hash_init_enqueue(struct ahash_request *req)
514{
515 return cryptd_hash_enqueue(req, cryptd_hash_init);
516}
517
518static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
519{
520 struct ahash_request *req = ahash_request_cast(req_async);
521 struct cryptd_hash_request_ctx *rctx;
522
523 rctx = ahash_request_ctx(req);
524
525 if (unlikely(err == -EINPROGRESS))
526 goto out;
527
528 err = shash_ahash_update(req, &rctx->desc);
529
530 req->base.complete = rctx->complete;
531
532out:
533 cryptd_hash_complete(req, err);
534}
535
536static int cryptd_hash_update_enqueue(struct ahash_request *req)
537{
538 return cryptd_hash_enqueue(req, cryptd_hash_update);
539}
540
541static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
542{
543 struct ahash_request *req = ahash_request_cast(req_async);
544 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
545
546 if (unlikely(err == -EINPROGRESS))
547 goto out;
548
549 err = crypto_shash_final(&rctx->desc, req->result);
550
551 req->base.complete = rctx->complete;
552
553out:
554 cryptd_hash_complete(req, err);
555}
556
557static int cryptd_hash_final_enqueue(struct ahash_request *req)
558{
559 return cryptd_hash_enqueue(req, cryptd_hash_final);
560}
561
562static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
563{
564 struct ahash_request *req = ahash_request_cast(req_async);
565 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
566
567 if (unlikely(err == -EINPROGRESS))
568 goto out;
569
570 err = shash_ahash_finup(req, &rctx->desc);
571
572 req->base.complete = rctx->complete;
573
574out:
575 cryptd_hash_complete(req, err);
576}
577
578static int cryptd_hash_finup_enqueue(struct ahash_request *req)
579{
580 return cryptd_hash_enqueue(req, cryptd_hash_finup);
581}
582
583static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
584{
585 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
586 struct crypto_shash *child = ctx->child;
587 struct ahash_request *req = ahash_request_cast(req_async);
588 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
589 struct shash_desc *desc = &rctx->desc;
590
591 if (unlikely(err == -EINPROGRESS))
592 goto out;
593
594 desc->tfm = child;
595
596 err = shash_ahash_digest(req, desc);
597
598 req->base.complete = rctx->complete;
599
600out:
601 cryptd_hash_complete(req, err);
602}
603
604static int cryptd_hash_digest_enqueue(struct ahash_request *req)
605{
606 return cryptd_hash_enqueue(req, cryptd_hash_digest);
607}
608
609static int cryptd_hash_export(struct ahash_request *req, void *out)
610{
611 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
612
613 return crypto_shash_export(&rctx->desc, out);
614}
615
616static int cryptd_hash_import(struct ahash_request *req, const void *in)
617{
618 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
619 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
620 struct shash_desc *desc = cryptd_shash_desc(req);
621
622 desc->tfm = ctx->child;
623
624 return crypto_shash_import(desc, in);
625}
626
627static void cryptd_hash_free(struct ahash_instance *inst)
628{
629 struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
630
631 crypto_drop_shash(&ctx->spawn);
632 kfree(inst);
633}
634
635static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
636 struct crypto_attr_type *algt,
637 struct cryptd_queue *queue)
638{
639 struct hashd_instance_ctx *ctx;
640 struct ahash_instance *inst;
641 struct shash_alg *alg;
642 u32 type;
643 u32 mask;
644 int err;
645
646 cryptd_type_and_mask(algt, &type, &mask);
647
648 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
649 if (!inst)
650 return -ENOMEM;
651
652 ctx = ahash_instance_ctx(inst);
653 ctx->queue = queue;
654
655 err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
656 crypto_attr_alg_name(tb[1]), type, mask);
657 if (err)
658 goto err_free_inst;
659 alg = crypto_spawn_shash_alg(&ctx->spawn);
660
661 err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
662 if (err)
663 goto err_free_inst;
664
665 inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
666 (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
667 CRYPTO_ALG_OPTIONAL_KEY));
668 inst->alg.halg.digestsize = alg->digestsize;
669 inst->alg.halg.statesize = alg->statesize;
670 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
671
672 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
673 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
674
675 inst->alg.init = cryptd_hash_init_enqueue;
676 inst->alg.update = cryptd_hash_update_enqueue;
677 inst->alg.final = cryptd_hash_final_enqueue;
678 inst->alg.finup = cryptd_hash_finup_enqueue;
679 inst->alg.export = cryptd_hash_export;
680 inst->alg.import = cryptd_hash_import;
681 if (crypto_shash_alg_has_setkey(alg))
682 inst->alg.setkey = cryptd_hash_setkey;
683 inst->alg.digest = cryptd_hash_digest_enqueue;
684
685 inst->free = cryptd_hash_free;
686
687 err = ahash_register_instance(tmpl, inst);
688 if (err) {
689err_free_inst:
690 cryptd_hash_free(inst);
691 }
692 return err;
693}
694
695static int cryptd_aead_setkey(struct crypto_aead *parent,
696 const u8 *key, unsigned int keylen)
697{
698 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
699 struct crypto_aead *child = ctx->child;
700
701 return crypto_aead_setkey(child, key, keylen);
702}
703
704static int cryptd_aead_setauthsize(struct crypto_aead *parent,
705 unsigned int authsize)
706{
707 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
708 struct crypto_aead *child = ctx->child;
709
710 return crypto_aead_setauthsize(child, authsize);
711}
712
713static void cryptd_aead_crypt(struct aead_request *req,
714 struct crypto_aead *child,
715 int err,
716 int (*crypt)(struct aead_request *req))
717{
718 struct cryptd_aead_request_ctx *rctx;
719 struct cryptd_aead_ctx *ctx;
720 crypto_completion_t compl;
721 struct crypto_aead *tfm;
722 int refcnt;
723
724 rctx = aead_request_ctx(req);
725 compl = rctx->complete;
726
727 tfm = crypto_aead_reqtfm(req);
728
729 if (unlikely(err == -EINPROGRESS))
730 goto out;
731 aead_request_set_tfm(req, child);
732 err = crypt( req );
733
734out:
735 ctx = crypto_aead_ctx(tfm);
736 refcnt = refcount_read(&ctx->refcnt);
737
738 local_bh_disable();
739 compl(&req->base, err);
740 local_bh_enable();
741
742 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
743 crypto_free_aead(tfm);
744}
745
746static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
747{
748 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
749 struct crypto_aead *child = ctx->child;
750 struct aead_request *req;
751
752 req = container_of(areq, struct aead_request, base);
753 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
754}
755
756static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
757{
758 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
759 struct crypto_aead *child = ctx->child;
760 struct aead_request *req;
761
762 req = container_of(areq, struct aead_request, base);
763 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
764}
765
766static int cryptd_aead_enqueue(struct aead_request *req,
767 crypto_completion_t compl)
768{
769 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
770 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
771 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
772
773 rctx->complete = req->base.complete;
774 req->base.complete = compl;
775 return cryptd_enqueue_request(queue, &req->base);
776}
777
778static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
779{
780 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
781}
782
783static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
784{
785 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
786}
787
788static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
789{
790 struct aead_instance *inst = aead_alg_instance(tfm);
791 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
792 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
793 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
794 struct crypto_aead *cipher;
795
796 cipher = crypto_spawn_aead(spawn);
797 if (IS_ERR(cipher))
798 return PTR_ERR(cipher);
799
800 ctx->child = cipher;
801 crypto_aead_set_reqsize(
802 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
803 crypto_aead_reqsize(cipher)));
804 return 0;
805}
806
807static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
808{
809 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
810 crypto_free_aead(ctx->child);
811}
812
813static void cryptd_aead_free(struct aead_instance *inst)
814{
815 struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
816
817 crypto_drop_aead(&ctx->aead_spawn);
818 kfree(inst);
819}
820
821static int cryptd_create_aead(struct crypto_template *tmpl,
822 struct rtattr **tb,
823 struct crypto_attr_type *algt,
824 struct cryptd_queue *queue)
825{
826 struct aead_instance_ctx *ctx;
827 struct aead_instance *inst;
828 struct aead_alg *alg;
829 u32 type;
830 u32 mask;
831 int err;
832
833 cryptd_type_and_mask(algt, &type, &mask);
834
835 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
836 if (!inst)
837 return -ENOMEM;
838
839 ctx = aead_instance_ctx(inst);
840 ctx->queue = queue;
841
842 err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
843 crypto_attr_alg_name(tb[1]), type, mask);
844 if (err)
845 goto err_free_inst;
846
847 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
848 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
849 if (err)
850 goto err_free_inst;
851
852 inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
853 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
854 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
855
856 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
857 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
858
859 inst->alg.init = cryptd_aead_init_tfm;
860 inst->alg.exit = cryptd_aead_exit_tfm;
861 inst->alg.setkey = cryptd_aead_setkey;
862 inst->alg.setauthsize = cryptd_aead_setauthsize;
863 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
864 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
865
866 inst->free = cryptd_aead_free;
867
868 err = aead_register_instance(tmpl, inst);
869 if (err) {
870err_free_inst:
871 cryptd_aead_free(inst);
872 }
873 return err;
874}
875
876static struct cryptd_queue queue;
877
878static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
879{
880 struct crypto_attr_type *algt;
881
882 algt = crypto_get_attr_type(tb);
883 if (IS_ERR(algt))
884 return PTR_ERR(algt);
885
886 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
887 case CRYPTO_ALG_TYPE_SKCIPHER:
888 return cryptd_create_skcipher(tmpl, tb, algt, &queue);
889 case CRYPTO_ALG_TYPE_HASH:
890 return cryptd_create_hash(tmpl, tb, algt, &queue);
891 case CRYPTO_ALG_TYPE_AEAD:
892 return cryptd_create_aead(tmpl, tb, algt, &queue);
893 }
894
895 return -EINVAL;
896}
897
898static struct crypto_template cryptd_tmpl = {
899 .name = "cryptd",
900 .create = cryptd_create,
901 .module = THIS_MODULE,
902};
903
904struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
905 u32 type, u32 mask)
906{
907 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
908 struct cryptd_skcipher_ctx *ctx;
909 struct crypto_skcipher *tfm;
910
911 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
912 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
913 return ERR_PTR(-EINVAL);
914
915 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
916 if (IS_ERR(tfm))
917 return ERR_CAST(tfm);
918
919 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
920 crypto_free_skcipher(tfm);
921 return ERR_PTR(-EINVAL);
922 }
923
924 ctx = crypto_skcipher_ctx(tfm);
925 refcount_set(&ctx->refcnt, 1);
926
927 return container_of(tfm, struct cryptd_skcipher, base);
928}
929EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
930
931struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
932{
933 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
934
935 return &ctx->child->base;
936}
937EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
938
939bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
940{
941 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
942
943 return refcount_read(&ctx->refcnt) - 1;
944}
945EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
946
947void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
948{
949 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
950
951 if (refcount_dec_and_test(&ctx->refcnt))
952 crypto_free_skcipher(&tfm->base);
953}
954EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
955
956struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
957 u32 type, u32 mask)
958{
959 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
960 struct cryptd_hash_ctx *ctx;
961 struct crypto_ahash *tfm;
962
963 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
964 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
965 return ERR_PTR(-EINVAL);
966 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
967 if (IS_ERR(tfm))
968 return ERR_CAST(tfm);
969 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
970 crypto_free_ahash(tfm);
971 return ERR_PTR(-EINVAL);
972 }
973
974 ctx = crypto_ahash_ctx(tfm);
975 refcount_set(&ctx->refcnt, 1);
976
977 return __cryptd_ahash_cast(tfm);
978}
979EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
980
981struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
982{
983 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
984
985 return ctx->child;
986}
987EXPORT_SYMBOL_GPL(cryptd_ahash_child);
988
989struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
990{
991 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
992 return &rctx->desc;
993}
994EXPORT_SYMBOL_GPL(cryptd_shash_desc);
995
996bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
997{
998 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
999
1000 return refcount_read(&ctx->refcnt) - 1;
1001}
1002EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1003
1004void cryptd_free_ahash(struct cryptd_ahash *tfm)
1005{
1006 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1007
1008 if (refcount_dec_and_test(&ctx->refcnt))
1009 crypto_free_ahash(&tfm->base);
1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1012
1013struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1014 u32 type, u32 mask)
1015{
1016 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1017 struct cryptd_aead_ctx *ctx;
1018 struct crypto_aead *tfm;
1019
1020 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1021 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1022 return ERR_PTR(-EINVAL);
1023 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1024 if (IS_ERR(tfm))
1025 return ERR_CAST(tfm);
1026 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1027 crypto_free_aead(tfm);
1028 return ERR_PTR(-EINVAL);
1029 }
1030
1031 ctx = crypto_aead_ctx(tfm);
1032 refcount_set(&ctx->refcnt, 1);
1033
1034 return __cryptd_aead_cast(tfm);
1035}
1036EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1037
1038struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1039{
1040 struct cryptd_aead_ctx *ctx;
1041 ctx = crypto_aead_ctx(&tfm->base);
1042 return ctx->child;
1043}
1044EXPORT_SYMBOL_GPL(cryptd_aead_child);
1045
1046bool cryptd_aead_queued(struct cryptd_aead *tfm)
1047{
1048 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1049
1050 return refcount_read(&ctx->refcnt) - 1;
1051}
1052EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1053
1054void cryptd_free_aead(struct cryptd_aead *tfm)
1055{
1056 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1057
1058 if (refcount_dec_and_test(&ctx->refcnt))
1059 crypto_free_aead(&tfm->base);
1060}
1061EXPORT_SYMBOL_GPL(cryptd_free_aead);
1062
1063static int __init cryptd_init(void)
1064{
1065 int err;
1066
1067 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1068 1);
1069 if (!cryptd_wq)
1070 return -ENOMEM;
1071
1072 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1073 if (err)
1074 goto err_destroy_wq;
1075
1076 err = crypto_register_template(&cryptd_tmpl);
1077 if (err)
1078 goto err_fini_queue;
1079
1080 return 0;
1081
1082err_fini_queue:
1083 cryptd_fini_queue(&queue);
1084err_destroy_wq:
1085 destroy_workqueue(cryptd_wq);
1086 return err;
1087}
1088
1089static void __exit cryptd_exit(void)
1090{
1091 destroy_workqueue(cryptd_wq);
1092 cryptd_fini_queue(&queue);
1093 crypto_unregister_template(&cryptd_tmpl);
1094}
1095
1096subsys_initcall(cryptd_init);
1097module_exit(cryptd_exit);
1098
1099MODULE_LICENSE("GPL");
1100MODULE_DESCRIPTION("Software async crypto daemon");
1101MODULE_ALIAS_CRYPTO("cryptd");
1/*
2 * Software async crypto daemon.
3 *
4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * Added AEAD support to cryptd.
7 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
8 * Adrian Hoban <adrian.hoban@intel.com>
9 * Gabriele Paoloni <gabriele.paoloni@intel.com>
10 * Aidan O'Mahony (aidan.o.mahony@intel.com)
11 * Copyright (c) 2010, Intel Corporation.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#include <crypto/internal/hash.h>
21#include <crypto/internal/aead.h>
22#include <crypto/internal/skcipher.h>
23#include <crypto/cryptd.h>
24#include <crypto/crypto_wq.h>
25#include <linux/atomic.h>
26#include <linux/err.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/scatterlist.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34
35static unsigned int cryptd_max_cpu_qlen = 1000;
36module_param(cryptd_max_cpu_qlen, uint, 0);
37MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
38
39struct cryptd_cpu_queue {
40 struct crypto_queue queue;
41 struct work_struct work;
42};
43
44struct cryptd_queue {
45 struct cryptd_cpu_queue __percpu *cpu_queue;
46};
47
48struct cryptd_instance_ctx {
49 struct crypto_spawn spawn;
50 struct cryptd_queue *queue;
51};
52
53struct skcipherd_instance_ctx {
54 struct crypto_skcipher_spawn spawn;
55 struct cryptd_queue *queue;
56};
57
58struct hashd_instance_ctx {
59 struct crypto_shash_spawn spawn;
60 struct cryptd_queue *queue;
61};
62
63struct aead_instance_ctx {
64 struct crypto_aead_spawn aead_spawn;
65 struct cryptd_queue *queue;
66};
67
68struct cryptd_blkcipher_ctx {
69 atomic_t refcnt;
70 struct crypto_blkcipher *child;
71};
72
73struct cryptd_blkcipher_request_ctx {
74 crypto_completion_t complete;
75};
76
77struct cryptd_skcipher_ctx {
78 atomic_t refcnt;
79 struct crypto_skcipher *child;
80};
81
82struct cryptd_skcipher_request_ctx {
83 crypto_completion_t complete;
84};
85
86struct cryptd_hash_ctx {
87 atomic_t refcnt;
88 struct crypto_shash *child;
89};
90
91struct cryptd_hash_request_ctx {
92 crypto_completion_t complete;
93 struct shash_desc desc;
94};
95
96struct cryptd_aead_ctx {
97 atomic_t refcnt;
98 struct crypto_aead *child;
99};
100
101struct cryptd_aead_request_ctx {
102 crypto_completion_t complete;
103};
104
105static void cryptd_queue_worker(struct work_struct *work);
106
107static int cryptd_init_queue(struct cryptd_queue *queue,
108 unsigned int max_cpu_qlen)
109{
110 int cpu;
111 struct cryptd_cpu_queue *cpu_queue;
112
113 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
114 if (!queue->cpu_queue)
115 return -ENOMEM;
116 for_each_possible_cpu(cpu) {
117 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
118 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
119 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
120 }
121 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
122 return 0;
123}
124
125static void cryptd_fini_queue(struct cryptd_queue *queue)
126{
127 int cpu;
128 struct cryptd_cpu_queue *cpu_queue;
129
130 for_each_possible_cpu(cpu) {
131 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
132 BUG_ON(cpu_queue->queue.qlen);
133 }
134 free_percpu(queue->cpu_queue);
135}
136
137static int cryptd_enqueue_request(struct cryptd_queue *queue,
138 struct crypto_async_request *request)
139{
140 int cpu, err;
141 struct cryptd_cpu_queue *cpu_queue;
142 atomic_t *refcnt;
143
144 cpu = get_cpu();
145 cpu_queue = this_cpu_ptr(queue->cpu_queue);
146 err = crypto_enqueue_request(&cpu_queue->queue, request);
147
148 refcnt = crypto_tfm_ctx(request->tfm);
149
150 if (err == -ENOSPC)
151 goto out_put_cpu;
152
153 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
154
155 if (!atomic_read(refcnt))
156 goto out_put_cpu;
157
158 atomic_inc(refcnt);
159
160out_put_cpu:
161 put_cpu();
162
163 return err;
164}
165
166/* Called in workqueue context, do one real cryption work (via
167 * req->complete) and reschedule itself if there are more work to
168 * do. */
169static void cryptd_queue_worker(struct work_struct *work)
170{
171 struct cryptd_cpu_queue *cpu_queue;
172 struct crypto_async_request *req, *backlog;
173
174 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
175 /*
176 * Only handle one request at a time to avoid hogging crypto workqueue.
177 * preempt_disable/enable is used to prevent being preempted by
178 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
179 * cryptd_enqueue_request() being accessed from software interrupts.
180 */
181 local_bh_disable();
182 preempt_disable();
183 backlog = crypto_get_backlog(&cpu_queue->queue);
184 req = crypto_dequeue_request(&cpu_queue->queue);
185 preempt_enable();
186 local_bh_enable();
187
188 if (!req)
189 return;
190
191 if (backlog)
192 backlog->complete(backlog, -EINPROGRESS);
193 req->complete(req, 0);
194
195 if (cpu_queue->queue.qlen)
196 queue_work(kcrypto_wq, &cpu_queue->work);
197}
198
199static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
200{
201 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
202 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
203 return ictx->queue;
204}
205
206static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
207 u32 *mask)
208{
209 struct crypto_attr_type *algt;
210
211 algt = crypto_get_attr_type(tb);
212 if (IS_ERR(algt))
213 return;
214
215 *type |= algt->type & CRYPTO_ALG_INTERNAL;
216 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
217}
218
219static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
220 const u8 *key, unsigned int keylen)
221{
222 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
223 struct crypto_blkcipher *child = ctx->child;
224 int err;
225
226 crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
227 crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
228 CRYPTO_TFM_REQ_MASK);
229 err = crypto_blkcipher_setkey(child, key, keylen);
230 crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
231 CRYPTO_TFM_RES_MASK);
232 return err;
233}
234
235static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
236 struct crypto_blkcipher *child,
237 int err,
238 int (*crypt)(struct blkcipher_desc *desc,
239 struct scatterlist *dst,
240 struct scatterlist *src,
241 unsigned int len))
242{
243 struct cryptd_blkcipher_request_ctx *rctx;
244 struct cryptd_blkcipher_ctx *ctx;
245 struct crypto_ablkcipher *tfm;
246 struct blkcipher_desc desc;
247 int refcnt;
248
249 rctx = ablkcipher_request_ctx(req);
250
251 if (unlikely(err == -EINPROGRESS))
252 goto out;
253
254 desc.tfm = child;
255 desc.info = req->info;
256 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
257
258 err = crypt(&desc, req->dst, req->src, req->nbytes);
259
260 req->base.complete = rctx->complete;
261
262out:
263 tfm = crypto_ablkcipher_reqtfm(req);
264 ctx = crypto_ablkcipher_ctx(tfm);
265 refcnt = atomic_read(&ctx->refcnt);
266
267 local_bh_disable();
268 rctx->complete(&req->base, err);
269 local_bh_enable();
270
271 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
272 crypto_free_ablkcipher(tfm);
273}
274
275static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
276{
277 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
278 struct crypto_blkcipher *child = ctx->child;
279
280 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
281 crypto_blkcipher_crt(child)->encrypt);
282}
283
284static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
285{
286 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
287 struct crypto_blkcipher *child = ctx->child;
288
289 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
290 crypto_blkcipher_crt(child)->decrypt);
291}
292
293static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
294 crypto_completion_t compl)
295{
296 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
297 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
298 struct cryptd_queue *queue;
299
300 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
301 rctx->complete = req->base.complete;
302 req->base.complete = compl;
303
304 return cryptd_enqueue_request(queue, &req->base);
305}
306
307static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
308{
309 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
310}
311
312static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
313{
314 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
315}
316
317static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
318{
319 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
320 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
321 struct crypto_spawn *spawn = &ictx->spawn;
322 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
323 struct crypto_blkcipher *cipher;
324
325 cipher = crypto_spawn_blkcipher(spawn);
326 if (IS_ERR(cipher))
327 return PTR_ERR(cipher);
328
329 ctx->child = cipher;
330 tfm->crt_ablkcipher.reqsize =
331 sizeof(struct cryptd_blkcipher_request_ctx);
332 return 0;
333}
334
335static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
336{
337 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
338
339 crypto_free_blkcipher(ctx->child);
340}
341
342static int cryptd_init_instance(struct crypto_instance *inst,
343 struct crypto_alg *alg)
344{
345 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
346 "cryptd(%s)",
347 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
348 return -ENAMETOOLONG;
349
350 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
351
352 inst->alg.cra_priority = alg->cra_priority + 50;
353 inst->alg.cra_blocksize = alg->cra_blocksize;
354 inst->alg.cra_alignmask = alg->cra_alignmask;
355
356 return 0;
357}
358
359static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
360 unsigned int tail)
361{
362 char *p;
363 struct crypto_instance *inst;
364 int err;
365
366 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
367 if (!p)
368 return ERR_PTR(-ENOMEM);
369
370 inst = (void *)(p + head);
371
372 err = cryptd_init_instance(inst, alg);
373 if (err)
374 goto out_free_inst;
375
376out:
377 return p;
378
379out_free_inst:
380 kfree(p);
381 p = ERR_PTR(err);
382 goto out;
383}
384
385static int cryptd_create_blkcipher(struct crypto_template *tmpl,
386 struct rtattr **tb,
387 struct cryptd_queue *queue)
388{
389 struct cryptd_instance_ctx *ctx;
390 struct crypto_instance *inst;
391 struct crypto_alg *alg;
392 u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
393 u32 mask = CRYPTO_ALG_TYPE_MASK;
394 int err;
395
396 cryptd_check_internal(tb, &type, &mask);
397
398 alg = crypto_get_attr_alg(tb, type, mask);
399 if (IS_ERR(alg))
400 return PTR_ERR(alg);
401
402 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
403 err = PTR_ERR(inst);
404 if (IS_ERR(inst))
405 goto out_put_alg;
406
407 ctx = crypto_instance_ctx(inst);
408 ctx->queue = queue;
409
410 err = crypto_init_spawn(&ctx->spawn, alg, inst,
411 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
412 if (err)
413 goto out_free_inst;
414
415 type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
416 if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
417 type |= CRYPTO_ALG_INTERNAL;
418 inst->alg.cra_flags = type;
419 inst->alg.cra_type = &crypto_ablkcipher_type;
420
421 inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
422 inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
423 inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
424
425 inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
426
427 inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
428
429 inst->alg.cra_init = cryptd_blkcipher_init_tfm;
430 inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
431
432 inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
433 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
434 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
435
436 err = crypto_register_instance(tmpl, inst);
437 if (err) {
438 crypto_drop_spawn(&ctx->spawn);
439out_free_inst:
440 kfree(inst);
441 }
442
443out_put_alg:
444 crypto_mod_put(alg);
445 return err;
446}
447
448static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
449 const u8 *key, unsigned int keylen)
450{
451 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
452 struct crypto_skcipher *child = ctx->child;
453 int err;
454
455 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
456 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
457 CRYPTO_TFM_REQ_MASK);
458 err = crypto_skcipher_setkey(child, key, keylen);
459 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
460 CRYPTO_TFM_RES_MASK);
461 return err;
462}
463
464static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
465{
466 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
467 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
468 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
469 int refcnt = atomic_read(&ctx->refcnt);
470
471 local_bh_disable();
472 rctx->complete(&req->base, err);
473 local_bh_enable();
474
475 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
476 crypto_free_skcipher(tfm);
477}
478
479static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
480 int err)
481{
482 struct skcipher_request *req = skcipher_request_cast(base);
483 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
484 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
485 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
486 struct crypto_skcipher *child = ctx->child;
487 SKCIPHER_REQUEST_ON_STACK(subreq, child);
488
489 if (unlikely(err == -EINPROGRESS))
490 goto out;
491
492 skcipher_request_set_tfm(subreq, child);
493 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
494 NULL, NULL);
495 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
496 req->iv);
497
498 err = crypto_skcipher_encrypt(subreq);
499 skcipher_request_zero(subreq);
500
501 req->base.complete = rctx->complete;
502
503out:
504 cryptd_skcipher_complete(req, err);
505}
506
507static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
508 int err)
509{
510 struct skcipher_request *req = skcipher_request_cast(base);
511 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
513 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
514 struct crypto_skcipher *child = ctx->child;
515 SKCIPHER_REQUEST_ON_STACK(subreq, child);
516
517 if (unlikely(err == -EINPROGRESS))
518 goto out;
519
520 skcipher_request_set_tfm(subreq, child);
521 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
522 NULL, NULL);
523 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
524 req->iv);
525
526 err = crypto_skcipher_decrypt(subreq);
527 skcipher_request_zero(subreq);
528
529 req->base.complete = rctx->complete;
530
531out:
532 cryptd_skcipher_complete(req, err);
533}
534
535static int cryptd_skcipher_enqueue(struct skcipher_request *req,
536 crypto_completion_t compl)
537{
538 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
539 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
540 struct cryptd_queue *queue;
541
542 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
543 rctx->complete = req->base.complete;
544 req->base.complete = compl;
545
546 return cryptd_enqueue_request(queue, &req->base);
547}
548
549static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
550{
551 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
552}
553
554static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
555{
556 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
557}
558
559static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
560{
561 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
562 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
563 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
564 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
565 struct crypto_skcipher *cipher;
566
567 cipher = crypto_spawn_skcipher(spawn);
568 if (IS_ERR(cipher))
569 return PTR_ERR(cipher);
570
571 ctx->child = cipher;
572 crypto_skcipher_set_reqsize(
573 tfm, sizeof(struct cryptd_skcipher_request_ctx));
574 return 0;
575}
576
577static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
578{
579 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
580
581 crypto_free_skcipher(ctx->child);
582}
583
584static void cryptd_skcipher_free(struct skcipher_instance *inst)
585{
586 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
587
588 crypto_drop_skcipher(&ctx->spawn);
589}
590
591static int cryptd_create_skcipher(struct crypto_template *tmpl,
592 struct rtattr **tb,
593 struct cryptd_queue *queue)
594{
595 struct skcipherd_instance_ctx *ctx;
596 struct skcipher_instance *inst;
597 struct skcipher_alg *alg;
598 const char *name;
599 u32 type;
600 u32 mask;
601 int err;
602
603 type = 0;
604 mask = CRYPTO_ALG_ASYNC;
605
606 cryptd_check_internal(tb, &type, &mask);
607
608 name = crypto_attr_alg_name(tb[1]);
609 if (IS_ERR(name))
610 return PTR_ERR(name);
611
612 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
613 if (!inst)
614 return -ENOMEM;
615
616 ctx = skcipher_instance_ctx(inst);
617 ctx->queue = queue;
618
619 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
620 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
621 if (err)
622 goto out_free_inst;
623
624 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
625 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
626 if (err)
627 goto out_drop_skcipher;
628
629 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
630 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
631
632 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
633 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
634 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
635 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
636
637 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
638
639 inst->alg.init = cryptd_skcipher_init_tfm;
640 inst->alg.exit = cryptd_skcipher_exit_tfm;
641
642 inst->alg.setkey = cryptd_skcipher_setkey;
643 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
644 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
645
646 inst->free = cryptd_skcipher_free;
647
648 err = skcipher_register_instance(tmpl, inst);
649 if (err) {
650out_drop_skcipher:
651 crypto_drop_skcipher(&ctx->spawn);
652out_free_inst:
653 kfree(inst);
654 }
655 return err;
656}
657
658static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
659{
660 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
661 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
662 struct crypto_shash_spawn *spawn = &ictx->spawn;
663 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
664 struct crypto_shash *hash;
665
666 hash = crypto_spawn_shash(spawn);
667 if (IS_ERR(hash))
668 return PTR_ERR(hash);
669
670 ctx->child = hash;
671 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
672 sizeof(struct cryptd_hash_request_ctx) +
673 crypto_shash_descsize(hash));
674 return 0;
675}
676
677static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
678{
679 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
680
681 crypto_free_shash(ctx->child);
682}
683
684static int cryptd_hash_setkey(struct crypto_ahash *parent,
685 const u8 *key, unsigned int keylen)
686{
687 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
688 struct crypto_shash *child = ctx->child;
689 int err;
690
691 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
692 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
693 CRYPTO_TFM_REQ_MASK);
694 err = crypto_shash_setkey(child, key, keylen);
695 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
696 CRYPTO_TFM_RES_MASK);
697 return err;
698}
699
700static int cryptd_hash_enqueue(struct ahash_request *req,
701 crypto_completion_t compl)
702{
703 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
704 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
705 struct cryptd_queue *queue =
706 cryptd_get_queue(crypto_ahash_tfm(tfm));
707
708 rctx->complete = req->base.complete;
709 req->base.complete = compl;
710
711 return cryptd_enqueue_request(queue, &req->base);
712}
713
714static void cryptd_hash_complete(struct ahash_request *req, int err)
715{
716 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
717 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
718 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
719 int refcnt = atomic_read(&ctx->refcnt);
720
721 local_bh_disable();
722 rctx->complete(&req->base, err);
723 local_bh_enable();
724
725 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
726 crypto_free_ahash(tfm);
727}
728
729static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
730{
731 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
732 struct crypto_shash *child = ctx->child;
733 struct ahash_request *req = ahash_request_cast(req_async);
734 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
735 struct shash_desc *desc = &rctx->desc;
736
737 if (unlikely(err == -EINPROGRESS))
738 goto out;
739
740 desc->tfm = child;
741 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
742
743 err = crypto_shash_init(desc);
744
745 req->base.complete = rctx->complete;
746
747out:
748 cryptd_hash_complete(req, err);
749}
750
751static int cryptd_hash_init_enqueue(struct ahash_request *req)
752{
753 return cryptd_hash_enqueue(req, cryptd_hash_init);
754}
755
756static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
757{
758 struct ahash_request *req = ahash_request_cast(req_async);
759 struct cryptd_hash_request_ctx *rctx;
760
761 rctx = ahash_request_ctx(req);
762
763 if (unlikely(err == -EINPROGRESS))
764 goto out;
765
766 err = shash_ahash_update(req, &rctx->desc);
767
768 req->base.complete = rctx->complete;
769
770out:
771 cryptd_hash_complete(req, err);
772}
773
774static int cryptd_hash_update_enqueue(struct ahash_request *req)
775{
776 return cryptd_hash_enqueue(req, cryptd_hash_update);
777}
778
779static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
780{
781 struct ahash_request *req = ahash_request_cast(req_async);
782 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
783
784 if (unlikely(err == -EINPROGRESS))
785 goto out;
786
787 err = crypto_shash_final(&rctx->desc, req->result);
788
789 req->base.complete = rctx->complete;
790
791out:
792 cryptd_hash_complete(req, err);
793}
794
795static int cryptd_hash_final_enqueue(struct ahash_request *req)
796{
797 return cryptd_hash_enqueue(req, cryptd_hash_final);
798}
799
800static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
801{
802 struct ahash_request *req = ahash_request_cast(req_async);
803 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
804
805 if (unlikely(err == -EINPROGRESS))
806 goto out;
807
808 err = shash_ahash_finup(req, &rctx->desc);
809
810 req->base.complete = rctx->complete;
811
812out:
813 cryptd_hash_complete(req, err);
814}
815
816static int cryptd_hash_finup_enqueue(struct ahash_request *req)
817{
818 return cryptd_hash_enqueue(req, cryptd_hash_finup);
819}
820
821static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
822{
823 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
824 struct crypto_shash *child = ctx->child;
825 struct ahash_request *req = ahash_request_cast(req_async);
826 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
827 struct shash_desc *desc = &rctx->desc;
828
829 if (unlikely(err == -EINPROGRESS))
830 goto out;
831
832 desc->tfm = child;
833 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
834
835 err = shash_ahash_digest(req, desc);
836
837 req->base.complete = rctx->complete;
838
839out:
840 cryptd_hash_complete(req, err);
841}
842
843static int cryptd_hash_digest_enqueue(struct ahash_request *req)
844{
845 return cryptd_hash_enqueue(req, cryptd_hash_digest);
846}
847
848static int cryptd_hash_export(struct ahash_request *req, void *out)
849{
850 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
851
852 return crypto_shash_export(&rctx->desc, out);
853}
854
855static int cryptd_hash_import(struct ahash_request *req, const void *in)
856{
857 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
858 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
859 struct shash_desc *desc = cryptd_shash_desc(req);
860
861 desc->tfm = ctx->child;
862 desc->flags = req->base.flags;
863
864 return crypto_shash_import(desc, in);
865}
866
867static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
868 struct cryptd_queue *queue)
869{
870 struct hashd_instance_ctx *ctx;
871 struct ahash_instance *inst;
872 struct shash_alg *salg;
873 struct crypto_alg *alg;
874 u32 type = 0;
875 u32 mask = 0;
876 int err;
877
878 cryptd_check_internal(tb, &type, &mask);
879
880 salg = shash_attr_alg(tb[1], type, mask);
881 if (IS_ERR(salg))
882 return PTR_ERR(salg);
883
884 alg = &salg->base;
885 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
886 sizeof(*ctx));
887 err = PTR_ERR(inst);
888 if (IS_ERR(inst))
889 goto out_put_alg;
890
891 ctx = ahash_instance_ctx(inst);
892 ctx->queue = queue;
893
894 err = crypto_init_shash_spawn(&ctx->spawn, salg,
895 ahash_crypto_instance(inst));
896 if (err)
897 goto out_free_inst;
898
899 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
900 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
901 CRYPTO_ALG_OPTIONAL_KEY));
902
903 inst->alg.halg.digestsize = salg->digestsize;
904 inst->alg.halg.statesize = salg->statesize;
905 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
906
907 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
908 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
909
910 inst->alg.init = cryptd_hash_init_enqueue;
911 inst->alg.update = cryptd_hash_update_enqueue;
912 inst->alg.final = cryptd_hash_final_enqueue;
913 inst->alg.finup = cryptd_hash_finup_enqueue;
914 inst->alg.export = cryptd_hash_export;
915 inst->alg.import = cryptd_hash_import;
916 if (crypto_shash_alg_has_setkey(salg))
917 inst->alg.setkey = cryptd_hash_setkey;
918 inst->alg.digest = cryptd_hash_digest_enqueue;
919
920 err = ahash_register_instance(tmpl, inst);
921 if (err) {
922 crypto_drop_shash(&ctx->spawn);
923out_free_inst:
924 kfree(inst);
925 }
926
927out_put_alg:
928 crypto_mod_put(alg);
929 return err;
930}
931
932static int cryptd_aead_setkey(struct crypto_aead *parent,
933 const u8 *key, unsigned int keylen)
934{
935 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
936 struct crypto_aead *child = ctx->child;
937
938 return crypto_aead_setkey(child, key, keylen);
939}
940
941static int cryptd_aead_setauthsize(struct crypto_aead *parent,
942 unsigned int authsize)
943{
944 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
945 struct crypto_aead *child = ctx->child;
946
947 return crypto_aead_setauthsize(child, authsize);
948}
949
950static void cryptd_aead_crypt(struct aead_request *req,
951 struct crypto_aead *child,
952 int err,
953 int (*crypt)(struct aead_request *req))
954{
955 struct cryptd_aead_request_ctx *rctx;
956 struct cryptd_aead_ctx *ctx;
957 crypto_completion_t compl;
958 struct crypto_aead *tfm;
959 int refcnt;
960
961 rctx = aead_request_ctx(req);
962 compl = rctx->complete;
963
964 tfm = crypto_aead_reqtfm(req);
965
966 if (unlikely(err == -EINPROGRESS))
967 goto out;
968 aead_request_set_tfm(req, child);
969 err = crypt( req );
970
971out:
972 ctx = crypto_aead_ctx(tfm);
973 refcnt = atomic_read(&ctx->refcnt);
974
975 local_bh_disable();
976 compl(&req->base, err);
977 local_bh_enable();
978
979 if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
980 crypto_free_aead(tfm);
981}
982
983static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
984{
985 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
986 struct crypto_aead *child = ctx->child;
987 struct aead_request *req;
988
989 req = container_of(areq, struct aead_request, base);
990 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
991}
992
993static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
994{
995 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
996 struct crypto_aead *child = ctx->child;
997 struct aead_request *req;
998
999 req = container_of(areq, struct aead_request, base);
1000 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
1001}
1002
1003static int cryptd_aead_enqueue(struct aead_request *req,
1004 crypto_completion_t compl)
1005{
1006 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1007 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1008 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1009
1010 rctx->complete = req->base.complete;
1011 req->base.complete = compl;
1012 return cryptd_enqueue_request(queue, &req->base);
1013}
1014
1015static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1016{
1017 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1018}
1019
1020static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1021{
1022 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1023}
1024
1025static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1026{
1027 struct aead_instance *inst = aead_alg_instance(tfm);
1028 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1029 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1030 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1031 struct crypto_aead *cipher;
1032
1033 cipher = crypto_spawn_aead(spawn);
1034 if (IS_ERR(cipher))
1035 return PTR_ERR(cipher);
1036
1037 ctx->child = cipher;
1038 crypto_aead_set_reqsize(
1039 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1040 crypto_aead_reqsize(cipher)));
1041 return 0;
1042}
1043
1044static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1045{
1046 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1047 crypto_free_aead(ctx->child);
1048}
1049
1050static int cryptd_create_aead(struct crypto_template *tmpl,
1051 struct rtattr **tb,
1052 struct cryptd_queue *queue)
1053{
1054 struct aead_instance_ctx *ctx;
1055 struct aead_instance *inst;
1056 struct aead_alg *alg;
1057 const char *name;
1058 u32 type = 0;
1059 u32 mask = CRYPTO_ALG_ASYNC;
1060 int err;
1061
1062 cryptd_check_internal(tb, &type, &mask);
1063
1064 name = crypto_attr_alg_name(tb[1]);
1065 if (IS_ERR(name))
1066 return PTR_ERR(name);
1067
1068 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1069 if (!inst)
1070 return -ENOMEM;
1071
1072 ctx = aead_instance_ctx(inst);
1073 ctx->queue = queue;
1074
1075 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1076 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1077 if (err)
1078 goto out_free_inst;
1079
1080 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1081 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1082 if (err)
1083 goto out_drop_aead;
1084
1085 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1086 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1087 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1088
1089 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1090 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1091
1092 inst->alg.init = cryptd_aead_init_tfm;
1093 inst->alg.exit = cryptd_aead_exit_tfm;
1094 inst->alg.setkey = cryptd_aead_setkey;
1095 inst->alg.setauthsize = cryptd_aead_setauthsize;
1096 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1097 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1098
1099 err = aead_register_instance(tmpl, inst);
1100 if (err) {
1101out_drop_aead:
1102 crypto_drop_aead(&ctx->aead_spawn);
1103out_free_inst:
1104 kfree(inst);
1105 }
1106 return err;
1107}
1108
1109static struct cryptd_queue queue;
1110
1111static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1112{
1113 struct crypto_attr_type *algt;
1114
1115 algt = crypto_get_attr_type(tb);
1116 if (IS_ERR(algt))
1117 return PTR_ERR(algt);
1118
1119 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1120 case CRYPTO_ALG_TYPE_BLKCIPHER:
1121 if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1122 CRYPTO_ALG_TYPE_BLKCIPHER)
1123 return cryptd_create_blkcipher(tmpl, tb, &queue);
1124
1125 return cryptd_create_skcipher(tmpl, tb, &queue);
1126 case CRYPTO_ALG_TYPE_DIGEST:
1127 return cryptd_create_hash(tmpl, tb, &queue);
1128 case CRYPTO_ALG_TYPE_AEAD:
1129 return cryptd_create_aead(tmpl, tb, &queue);
1130 }
1131
1132 return -EINVAL;
1133}
1134
1135static void cryptd_free(struct crypto_instance *inst)
1136{
1137 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1138 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1139 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1140
1141 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1142 case CRYPTO_ALG_TYPE_AHASH:
1143 crypto_drop_shash(&hctx->spawn);
1144 kfree(ahash_instance(inst));
1145 return;
1146 case CRYPTO_ALG_TYPE_AEAD:
1147 crypto_drop_aead(&aead_ctx->aead_spawn);
1148 kfree(aead_instance(inst));
1149 return;
1150 default:
1151 crypto_drop_spawn(&ctx->spawn);
1152 kfree(inst);
1153 }
1154}
1155
1156static struct crypto_template cryptd_tmpl = {
1157 .name = "cryptd",
1158 .create = cryptd_create,
1159 .free = cryptd_free,
1160 .module = THIS_MODULE,
1161};
1162
1163struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1164 u32 type, u32 mask)
1165{
1166 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1167 struct cryptd_blkcipher_ctx *ctx;
1168 struct crypto_tfm *tfm;
1169
1170 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1171 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1172 return ERR_PTR(-EINVAL);
1173 type = crypto_skcipher_type(type);
1174 mask &= ~CRYPTO_ALG_TYPE_MASK;
1175 mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1176 tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1177 if (IS_ERR(tfm))
1178 return ERR_CAST(tfm);
1179 if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1180 crypto_free_tfm(tfm);
1181 return ERR_PTR(-EINVAL);
1182 }
1183
1184 ctx = crypto_tfm_ctx(tfm);
1185 atomic_set(&ctx->refcnt, 1);
1186
1187 return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1188}
1189EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1190
1191struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1192{
1193 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1194 return ctx->child;
1195}
1196EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1197
1198bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1199{
1200 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1201
1202 return atomic_read(&ctx->refcnt) - 1;
1203}
1204EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1205
1206void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1207{
1208 struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1209
1210 if (atomic_dec_and_test(&ctx->refcnt))
1211 crypto_free_ablkcipher(&tfm->base);
1212}
1213EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1214
1215struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1216 u32 type, u32 mask)
1217{
1218 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1219 struct cryptd_skcipher_ctx *ctx;
1220 struct crypto_skcipher *tfm;
1221
1222 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1223 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1224 return ERR_PTR(-EINVAL);
1225
1226 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1227 if (IS_ERR(tfm))
1228 return ERR_CAST(tfm);
1229
1230 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1231 crypto_free_skcipher(tfm);
1232 return ERR_PTR(-EINVAL);
1233 }
1234
1235 ctx = crypto_skcipher_ctx(tfm);
1236 atomic_set(&ctx->refcnt, 1);
1237
1238 return container_of(tfm, struct cryptd_skcipher, base);
1239}
1240EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1241
1242struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1243{
1244 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1245
1246 return ctx->child;
1247}
1248EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1249
1250bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1251{
1252 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1253
1254 return atomic_read(&ctx->refcnt) - 1;
1255}
1256EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1257
1258void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1259{
1260 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1261
1262 if (atomic_dec_and_test(&ctx->refcnt))
1263 crypto_free_skcipher(&tfm->base);
1264}
1265EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1266
1267struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1268 u32 type, u32 mask)
1269{
1270 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1271 struct cryptd_hash_ctx *ctx;
1272 struct crypto_ahash *tfm;
1273
1274 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1275 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1276 return ERR_PTR(-EINVAL);
1277 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1278 if (IS_ERR(tfm))
1279 return ERR_CAST(tfm);
1280 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1281 crypto_free_ahash(tfm);
1282 return ERR_PTR(-EINVAL);
1283 }
1284
1285 ctx = crypto_ahash_ctx(tfm);
1286 atomic_set(&ctx->refcnt, 1);
1287
1288 return __cryptd_ahash_cast(tfm);
1289}
1290EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1291
1292struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1293{
1294 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1295
1296 return ctx->child;
1297}
1298EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1299
1300struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1301{
1302 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1303 return &rctx->desc;
1304}
1305EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1306
1307bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1308{
1309 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1310
1311 return atomic_read(&ctx->refcnt) - 1;
1312}
1313EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1314
1315void cryptd_free_ahash(struct cryptd_ahash *tfm)
1316{
1317 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1318
1319 if (atomic_dec_and_test(&ctx->refcnt))
1320 crypto_free_ahash(&tfm->base);
1321}
1322EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1323
1324struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1325 u32 type, u32 mask)
1326{
1327 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1328 struct cryptd_aead_ctx *ctx;
1329 struct crypto_aead *tfm;
1330
1331 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1332 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1333 return ERR_PTR(-EINVAL);
1334 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1335 if (IS_ERR(tfm))
1336 return ERR_CAST(tfm);
1337 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1338 crypto_free_aead(tfm);
1339 return ERR_PTR(-EINVAL);
1340 }
1341
1342 ctx = crypto_aead_ctx(tfm);
1343 atomic_set(&ctx->refcnt, 1);
1344
1345 return __cryptd_aead_cast(tfm);
1346}
1347EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1348
1349struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1350{
1351 struct cryptd_aead_ctx *ctx;
1352 ctx = crypto_aead_ctx(&tfm->base);
1353 return ctx->child;
1354}
1355EXPORT_SYMBOL_GPL(cryptd_aead_child);
1356
1357bool cryptd_aead_queued(struct cryptd_aead *tfm)
1358{
1359 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1360
1361 return atomic_read(&ctx->refcnt) - 1;
1362}
1363EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1364
1365void cryptd_free_aead(struct cryptd_aead *tfm)
1366{
1367 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1368
1369 if (atomic_dec_and_test(&ctx->refcnt))
1370 crypto_free_aead(&tfm->base);
1371}
1372EXPORT_SYMBOL_GPL(cryptd_free_aead);
1373
1374static int __init cryptd_init(void)
1375{
1376 int err;
1377
1378 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1379 if (err)
1380 return err;
1381
1382 err = crypto_register_template(&cryptd_tmpl);
1383 if (err)
1384 cryptd_fini_queue(&queue);
1385
1386 return err;
1387}
1388
1389static void __exit cryptd_exit(void)
1390{
1391 cryptd_fini_queue(&queue);
1392 crypto_unregister_template(&cryptd_tmpl);
1393}
1394
1395subsys_initcall(cryptd_init);
1396module_exit(cryptd_exit);
1397
1398MODULE_LICENSE("GPL");
1399MODULE_DESCRIPTION("Software async crypto daemon");
1400MODULE_ALIAS_CRYPTO("cryptd");