Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Software async crypto daemon.
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * Added AEAD support to cryptd.
  7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8 *             Adrian Hoban <adrian.hoban@intel.com>
  9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 11 *    Copyright (c) 2010, Intel Corporation.
 12 *
 13 * This program is free software; you can redistribute it and/or modify it
 14 * under the terms of the GNU General Public License as published by the Free
 15 * Software Foundation; either version 2 of the License, or (at your option)
 16 * any later version.
 17 *
 18 */
 19
 20#include <crypto/algapi.h>
 21#include <crypto/internal/hash.h>
 22#include <crypto/internal/aead.h>
 
 23#include <crypto/cryptd.h>
 24#include <crypto/crypto_wq.h>
 25#include <linux/err.h>
 26#include <linux/init.h>
 27#include <linux/kernel.h>
 28#include <linux/list.h>
 29#include <linux/module.h>
 30#include <linux/scatterlist.h>
 31#include <linux/sched.h>
 32#include <linux/slab.h>
 
 33
 34#define CRYPTD_MAX_CPU_QLEN 100
 
 
 
 
 35
 36struct cryptd_cpu_queue {
 37	struct crypto_queue queue;
 38	struct work_struct work;
 39};
 40
 41struct cryptd_queue {
 42	struct cryptd_cpu_queue __percpu *cpu_queue;
 43};
 44
 45struct cryptd_instance_ctx {
 46	struct crypto_spawn spawn;
 47	struct cryptd_queue *queue;
 48};
 49
 
 
 
 
 
 50struct hashd_instance_ctx {
 51	struct crypto_shash_spawn spawn;
 52	struct cryptd_queue *queue;
 53};
 54
 55struct aead_instance_ctx {
 56	struct crypto_aead_spawn aead_spawn;
 57	struct cryptd_queue *queue;
 58};
 59
 60struct cryptd_blkcipher_ctx {
 61	struct crypto_blkcipher *child;
 
 62};
 63
 64struct cryptd_blkcipher_request_ctx {
 65	crypto_completion_t complete;
 66};
 67
 68struct cryptd_hash_ctx {
 
 69	struct crypto_shash *child;
 70};
 71
 72struct cryptd_hash_request_ctx {
 73	crypto_completion_t complete;
 74	struct shash_desc desc;
 75};
 76
 77struct cryptd_aead_ctx {
 
 78	struct crypto_aead *child;
 79};
 80
 81struct cryptd_aead_request_ctx {
 82	crypto_completion_t complete;
 83};
 84
 85static void cryptd_queue_worker(struct work_struct *work);
 86
 87static int cryptd_init_queue(struct cryptd_queue *queue,
 88			     unsigned int max_cpu_qlen)
 89{
 90	int cpu;
 91	struct cryptd_cpu_queue *cpu_queue;
 92
 93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 94	if (!queue->cpu_queue)
 95		return -ENOMEM;
 96	for_each_possible_cpu(cpu) {
 97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
 
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
 
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
126	put_cpu();
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/*
141	 * Only handle one request at a time to avoid hogging crypto workqueue.
142	 * preempt_disable/enable is used to prevent being preempted by
143	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
144	 * cryptd_enqueue_request() being accessed from software interrupts.
145	 */
146	local_bh_disable();
147	preempt_disable();
148	backlog = crypto_get_backlog(&cpu_queue->queue);
149	req = crypto_dequeue_request(&cpu_queue->queue);
150	preempt_enable();
151	local_bh_enable();
152
153	if (!req)
154		return;
155
156	if (backlog)
157		backlog->complete(backlog, -EINPROGRESS);
158	req->complete(req, 0);
159
160	if (cpu_queue->queue.qlen)
161		queue_work(kcrypto_wq, &cpu_queue->work);
162}
163
164static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
165{
166	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
167	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
168	return ictx->queue;
169}
170
171static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
172				   const u8 *key, unsigned int keylen)
173{
174	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
175	struct crypto_blkcipher *child = ctx->child;
176	int err;
177
178	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
179	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
180					  CRYPTO_TFM_REQ_MASK);
181	err = crypto_blkcipher_setkey(child, key, keylen);
182	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
183					    CRYPTO_TFM_RES_MASK);
184	return err;
185}
186
187static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
188				   struct crypto_blkcipher *child,
189				   int err,
190				   int (*crypt)(struct blkcipher_desc *desc,
191						struct scatterlist *dst,
192						struct scatterlist *src,
193						unsigned int len))
194{
195	struct cryptd_blkcipher_request_ctx *rctx;
196	struct blkcipher_desc desc;
 
 
197
198	rctx = ablkcipher_request_ctx(req);
199
200	if (unlikely(err == -EINPROGRESS))
201		goto out;
 
202
203	desc.tfm = child;
204	desc.info = req->info;
205	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 
 
 
 
 
206
207	err = crypt(&desc, req->dst, req->src, req->nbytes);
 
 
208
209	req->base.complete = rctx->complete;
 
 
 
 
210
211out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212	local_bh_disable();
213	rctx->complete(&req->base, err);
214	local_bh_enable();
 
 
 
215}
216
217static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
 
218{
219	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
220	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
222	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
223			       crypto_blkcipher_crt(child)->encrypt);
 
 
224}
225
226static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 
227{
228	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
229	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
230
231	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
232			       crypto_blkcipher_crt(child)->decrypt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233}
234
235static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
236				    crypto_completion_t complete)
237{
238	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
239	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
240	struct cryptd_queue *queue;
241
242	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
243	rctx->complete = req->base.complete;
244	req->base.complete = complete;
245
246	return cryptd_enqueue_request(queue, &req->base);
247}
248
249static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
250{
251	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
252}
253
254static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
255{
256	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
257}
258
259static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
260{
261	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
262	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
263	struct crypto_spawn *spawn = &ictx->spawn;
264	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
265	struct crypto_blkcipher *cipher;
266
267	cipher = crypto_spawn_blkcipher(spawn);
268	if (IS_ERR(cipher))
269		return PTR_ERR(cipher);
270
271	ctx->child = cipher;
272	tfm->crt_ablkcipher.reqsize =
273		sizeof(struct cryptd_blkcipher_request_ctx);
274	return 0;
275}
276
277static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
278{
279	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
280
281	crypto_free_blkcipher(ctx->child);
282}
283
284static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
285				   unsigned int tail)
286{
287	char *p;
288	struct crypto_instance *inst;
289	int err;
290
291	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
292	if (!p)
293		return ERR_PTR(-ENOMEM);
294
295	inst = (void *)(p + head);
296
297	err = -ENAMETOOLONG;
298	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
299		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
300		goto out_free_inst;
301
302	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
303
304	inst->alg.cra_priority = alg->cra_priority + 50;
305	inst->alg.cra_blocksize = alg->cra_blocksize;
306	inst->alg.cra_alignmask = alg->cra_alignmask;
307
308out:
309	return p;
310
311out_free_inst:
312	kfree(p);
313	p = ERR_PTR(err);
314	goto out;
315}
316
317static int cryptd_create_blkcipher(struct crypto_template *tmpl,
318				   struct rtattr **tb,
319				   struct cryptd_queue *queue)
320{
321	struct cryptd_instance_ctx *ctx;
322	struct crypto_instance *inst;
323	struct crypto_alg *alg;
 
 
 
324	int err;
325
326	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
327				  CRYPTO_ALG_TYPE_MASK);
328	if (IS_ERR(alg))
329		return PTR_ERR(alg);
330
331	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
332	err = PTR_ERR(inst);
333	if (IS_ERR(inst))
334		goto out_put_alg;
 
335
336	ctx = crypto_instance_ctx(inst);
 
 
 
 
337	ctx->queue = queue;
338
339	err = crypto_init_spawn(&ctx->spawn, alg, inst,
340				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
341	if (err)
342		goto out_free_inst;
343
344	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
345	inst->alg.cra_type = &crypto_ablkcipher_type;
 
 
 
 
 
346
347	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
348	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
349	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 
350
351	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
352
353	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 
354
355	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
356	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 
357
358	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
359	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
360	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
361
362	err = crypto_register_instance(tmpl, inst);
363	if (err) {
364		crypto_drop_spawn(&ctx->spawn);
 
365out_free_inst:
366		kfree(inst);
367	}
368
369out_put_alg:
370	crypto_mod_put(alg);
371	return err;
372}
373
374static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
375{
376	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
377	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
378	struct crypto_shash_spawn *spawn = &ictx->spawn;
379	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
380	struct crypto_shash *hash;
381
382	hash = crypto_spawn_shash(spawn);
383	if (IS_ERR(hash))
384		return PTR_ERR(hash);
385
386	ctx->child = hash;
387	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
388				 sizeof(struct cryptd_hash_request_ctx) +
389				 crypto_shash_descsize(hash));
390	return 0;
391}
392
393static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
394{
395	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
396
397	crypto_free_shash(ctx->child);
398}
399
400static int cryptd_hash_setkey(struct crypto_ahash *parent,
401				   const u8 *key, unsigned int keylen)
402{
403	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
404	struct crypto_shash *child = ctx->child;
405	int err;
406
407	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
408	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
409				      CRYPTO_TFM_REQ_MASK);
410	err = crypto_shash_setkey(child, key, keylen);
411	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
412				       CRYPTO_TFM_RES_MASK);
413	return err;
414}
415
416static int cryptd_hash_enqueue(struct ahash_request *req,
417				crypto_completion_t complete)
418{
419	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
420	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
421	struct cryptd_queue *queue =
422		cryptd_get_queue(crypto_ahash_tfm(tfm));
423
424	rctx->complete = req->base.complete;
425	req->base.complete = complete;
426
427	return cryptd_enqueue_request(queue, &req->base);
428}
429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
431{
432	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
433	struct crypto_shash *child = ctx->child;
434	struct ahash_request *req = ahash_request_cast(req_async);
435	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
436	struct shash_desc *desc = &rctx->desc;
437
438	if (unlikely(err == -EINPROGRESS))
439		goto out;
440
441	desc->tfm = child;
442	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
443
444	err = crypto_shash_init(desc);
445
446	req->base.complete = rctx->complete;
447
448out:
449	local_bh_disable();
450	rctx->complete(&req->base, err);
451	local_bh_enable();
452}
453
454static int cryptd_hash_init_enqueue(struct ahash_request *req)
455{
456	return cryptd_hash_enqueue(req, cryptd_hash_init);
457}
458
459static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
460{
461	struct ahash_request *req = ahash_request_cast(req_async);
462	struct cryptd_hash_request_ctx *rctx;
463
464	rctx = ahash_request_ctx(req);
465
466	if (unlikely(err == -EINPROGRESS))
467		goto out;
468
469	err = shash_ahash_update(req, &rctx->desc);
470
471	req->base.complete = rctx->complete;
472
473out:
474	local_bh_disable();
475	rctx->complete(&req->base, err);
476	local_bh_enable();
477}
478
479static int cryptd_hash_update_enqueue(struct ahash_request *req)
480{
481	return cryptd_hash_enqueue(req, cryptd_hash_update);
482}
483
484static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
485{
486	struct ahash_request *req = ahash_request_cast(req_async);
487	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
488
489	if (unlikely(err == -EINPROGRESS))
490		goto out;
491
492	err = crypto_shash_final(&rctx->desc, req->result);
493
494	req->base.complete = rctx->complete;
495
496out:
497	local_bh_disable();
498	rctx->complete(&req->base, err);
499	local_bh_enable();
500}
501
502static int cryptd_hash_final_enqueue(struct ahash_request *req)
503{
504	return cryptd_hash_enqueue(req, cryptd_hash_final);
505}
506
507static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
508{
509	struct ahash_request *req = ahash_request_cast(req_async);
510	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
511
512	if (unlikely(err == -EINPROGRESS))
513		goto out;
514
515	err = shash_ahash_finup(req, &rctx->desc);
516
517	req->base.complete = rctx->complete;
518
519out:
520	local_bh_disable();
521	rctx->complete(&req->base, err);
522	local_bh_enable();
523}
524
525static int cryptd_hash_finup_enqueue(struct ahash_request *req)
526{
527	return cryptd_hash_enqueue(req, cryptd_hash_finup);
528}
529
530static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
531{
532	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
533	struct crypto_shash *child = ctx->child;
534	struct ahash_request *req = ahash_request_cast(req_async);
535	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
536	struct shash_desc *desc = &rctx->desc;
537
538	if (unlikely(err == -EINPROGRESS))
539		goto out;
540
541	desc->tfm = child;
542	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
543
544	err = shash_ahash_digest(req, desc);
545
546	req->base.complete = rctx->complete;
547
548out:
549	local_bh_disable();
550	rctx->complete(&req->base, err);
551	local_bh_enable();
552}
553
554static int cryptd_hash_digest_enqueue(struct ahash_request *req)
555{
556	return cryptd_hash_enqueue(req, cryptd_hash_digest);
557}
558
559static int cryptd_hash_export(struct ahash_request *req, void *out)
560{
561	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
562
563	return crypto_shash_export(&rctx->desc, out);
564}
565
566static int cryptd_hash_import(struct ahash_request *req, const void *in)
567{
568	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 
 
 
569
570	return crypto_shash_import(&rctx->desc, in);
571}
572
573static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
574			      struct cryptd_queue *queue)
575{
576	struct hashd_instance_ctx *ctx;
577	struct ahash_instance *inst;
578	struct shash_alg *salg;
579	struct crypto_alg *alg;
 
 
580	int err;
581
582	salg = shash_attr_alg(tb[1], 0, 0);
 
 
583	if (IS_ERR(salg))
584		return PTR_ERR(salg);
585
586	alg = &salg->base;
587	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
588				     sizeof(*ctx));
589	err = PTR_ERR(inst);
590	if (IS_ERR(inst))
591		goto out_put_alg;
592
593	ctx = ahash_instance_ctx(inst);
594	ctx->queue = queue;
595
596	err = crypto_init_shash_spawn(&ctx->spawn, salg,
597				      ahash_crypto_instance(inst));
598	if (err)
599		goto out_free_inst;
600
601	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
 
 
602
603	inst->alg.halg.digestsize = salg->digestsize;
 
604	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
605
606	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
607	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
608
609	inst->alg.init   = cryptd_hash_init_enqueue;
610	inst->alg.update = cryptd_hash_update_enqueue;
611	inst->alg.final  = cryptd_hash_final_enqueue;
612	inst->alg.finup  = cryptd_hash_finup_enqueue;
613	inst->alg.export = cryptd_hash_export;
614	inst->alg.import = cryptd_hash_import;
615	inst->alg.setkey = cryptd_hash_setkey;
 
616	inst->alg.digest = cryptd_hash_digest_enqueue;
617
618	err = ahash_register_instance(tmpl, inst);
619	if (err) {
620		crypto_drop_shash(&ctx->spawn);
621out_free_inst:
622		kfree(inst);
623	}
624
625out_put_alg:
626	crypto_mod_put(alg);
627	return err;
628}
629
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
630static void cryptd_aead_crypt(struct aead_request *req,
631			struct crypto_aead *child,
632			int err,
633			int (*crypt)(struct aead_request *req))
634{
635	struct cryptd_aead_request_ctx *rctx;
 
 
 
 
 
636	rctx = aead_request_ctx(req);
 
 
 
637
638	if (unlikely(err == -EINPROGRESS))
639		goto out;
640	aead_request_set_tfm(req, child);
641	err = crypt( req );
642	req->base.complete = rctx->complete;
643out:
 
 
 
644	local_bh_disable();
645	rctx->complete(&req->base, err);
646	local_bh_enable();
 
 
 
647}
648
649static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
650{
651	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
652	struct crypto_aead *child = ctx->child;
653	struct aead_request *req;
654
655	req = container_of(areq, struct aead_request, base);
656	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
657}
658
659static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
660{
661	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
662	struct crypto_aead *child = ctx->child;
663	struct aead_request *req;
664
665	req = container_of(areq, struct aead_request, base);
666	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
667}
668
669static int cryptd_aead_enqueue(struct aead_request *req,
670				    crypto_completion_t complete)
671{
672	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
673	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
674	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
675
676	rctx->complete = req->base.complete;
677	req->base.complete = complete;
678	return cryptd_enqueue_request(queue, &req->base);
679}
680
681static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
682{
683	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
684}
685
686static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
687{
688	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
689}
690
691static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
692{
693	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
694	struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
695	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
696	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
697	struct crypto_aead *cipher;
698
699	cipher = crypto_spawn_aead(spawn);
700	if (IS_ERR(cipher))
701		return PTR_ERR(cipher);
702
703	crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
704	ctx->child = cipher;
705	tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
 
 
706	return 0;
707}
708
709static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
710{
711	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
712	crypto_free_aead(ctx->child);
713}
714
715static int cryptd_create_aead(struct crypto_template *tmpl,
716		              struct rtattr **tb,
717			      struct cryptd_queue *queue)
718{
719	struct aead_instance_ctx *ctx;
720	struct crypto_instance *inst;
721	struct crypto_alg *alg;
 
 
 
722	int err;
723
724	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
725				CRYPTO_ALG_TYPE_MASK);
726        if (IS_ERR(alg))
727		return PTR_ERR(alg);
728
729	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
730	err = PTR_ERR(inst);
731	if (IS_ERR(inst))
732		goto out_put_alg;
733
734	ctx = crypto_instance_ctx(inst);
 
 
 
 
735	ctx->queue = queue;
736
737	err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
738			CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
739	if (err)
740		goto out_free_inst;
741
742	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
743	inst->alg.cra_type = alg->cra_type;
744	inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
745	inst->alg.cra_init = cryptd_aead_init_tfm;
746	inst->alg.cra_exit = cryptd_aead_exit_tfm;
747	inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
748	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
749	inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
750	inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
751	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
752	inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
753	inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
754	inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
755	inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
 
 
 
 
756
757	err = crypto_register_instance(tmpl, inst);
758	if (err) {
759		crypto_drop_spawn(&ctx->aead_spawn.base);
 
760out_free_inst:
761		kfree(inst);
762	}
763out_put_alg:
764	crypto_mod_put(alg);
765	return err;
766}
767
768static struct cryptd_queue queue;
769
770static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
771{
772	struct crypto_attr_type *algt;
773
774	algt = crypto_get_attr_type(tb);
775	if (IS_ERR(algt))
776		return PTR_ERR(algt);
777
778	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
779	case CRYPTO_ALG_TYPE_BLKCIPHER:
780		return cryptd_create_blkcipher(tmpl, tb, &queue);
781	case CRYPTO_ALG_TYPE_DIGEST:
782		return cryptd_create_hash(tmpl, tb, &queue);
783	case CRYPTO_ALG_TYPE_AEAD:
784		return cryptd_create_aead(tmpl, tb, &queue);
785	}
786
787	return -EINVAL;
788}
789
790static void cryptd_free(struct crypto_instance *inst)
791{
792	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
793	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
794	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
795
796	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
797	case CRYPTO_ALG_TYPE_AHASH:
798		crypto_drop_shash(&hctx->spawn);
799		kfree(ahash_instance(inst));
800		return;
801	case CRYPTO_ALG_TYPE_AEAD:
802		crypto_drop_spawn(&aead_ctx->aead_spawn.base);
803		kfree(inst);
804		return;
805	default:
806		crypto_drop_spawn(&ctx->spawn);
807		kfree(inst);
808	}
809}
810
811static struct crypto_template cryptd_tmpl = {
812	.name = "cryptd",
813	.create = cryptd_create,
814	.free = cryptd_free,
815	.module = THIS_MODULE,
816};
817
818struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
819						  u32 type, u32 mask)
820{
821	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
822	struct crypto_tfm *tfm;
 
823
824	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
825		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
826		return ERR_PTR(-EINVAL);
827	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
828	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
829	mask &= ~CRYPTO_ALG_TYPE_MASK;
830	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
831	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
832	if (IS_ERR(tfm))
833		return ERR_CAST(tfm);
834	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
835		crypto_free_tfm(tfm);
 
836		return ERR_PTR(-EINVAL);
837	}
838
839	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
 
 
 
840}
841EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
842
843struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
844{
845	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
846	return ctx->child;
 
847}
848EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
849
850void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
851{
852	crypto_free_ablkcipher(&tfm->base);
 
 
853}
854EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 
 
 
 
 
 
 
 
 
855
856struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
857					u32 type, u32 mask)
858{
859	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
860	struct crypto_ahash *tfm;
861
862	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
863		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
864		return ERR_PTR(-EINVAL);
865	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
866	if (IS_ERR(tfm))
867		return ERR_CAST(tfm);
868	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
869		crypto_free_ahash(tfm);
870		return ERR_PTR(-EINVAL);
871	}
872
 
 
 
873	return __cryptd_ahash_cast(tfm);
874}
875EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
876
877struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
878{
879	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
880
881	return ctx->child;
882}
883EXPORT_SYMBOL_GPL(cryptd_ahash_child);
884
885struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
886{
887	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
888	return &rctx->desc;
889}
890EXPORT_SYMBOL_GPL(cryptd_shash_desc);
891
 
 
 
 
 
 
 
 
892void cryptd_free_ahash(struct cryptd_ahash *tfm)
893{
894	crypto_free_ahash(&tfm->base);
 
 
 
895}
896EXPORT_SYMBOL_GPL(cryptd_free_ahash);
897
898struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
899						  u32 type, u32 mask)
900{
901	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
902	struct crypto_aead *tfm;
903
904	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
905		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
906		return ERR_PTR(-EINVAL);
907	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
908	if (IS_ERR(tfm))
909		return ERR_CAST(tfm);
910	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
911		crypto_free_aead(tfm);
912		return ERR_PTR(-EINVAL);
913	}
 
 
 
 
914	return __cryptd_aead_cast(tfm);
915}
916EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
917
918struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
919{
920	struct cryptd_aead_ctx *ctx;
921	ctx = crypto_aead_ctx(&tfm->base);
922	return ctx->child;
923}
924EXPORT_SYMBOL_GPL(cryptd_aead_child);
925
 
 
 
 
 
 
 
 
926void cryptd_free_aead(struct cryptd_aead *tfm)
927{
928	crypto_free_aead(&tfm->base);
 
 
 
929}
930EXPORT_SYMBOL_GPL(cryptd_free_aead);
931
932static int __init cryptd_init(void)
933{
934	int err;
935
936	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
 
 
 
 
 
937	if (err)
938		return err;
939
940	err = crypto_register_template(&cryptd_tmpl);
941	if (err)
942		cryptd_fini_queue(&queue);
943
 
 
 
 
 
 
944	return err;
945}
946
947static void __exit cryptd_exit(void)
948{
 
949	cryptd_fini_queue(&queue);
950	crypto_unregister_template(&cryptd_tmpl);
951}
952
953subsys_initcall(cryptd_init);
954module_exit(cryptd_exit);
955
956MODULE_LICENSE("GPL");
957MODULE_DESCRIPTION("Software async crypto daemon");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Software async crypto daemon.
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 *
   7 * Added AEAD support to cryptd.
   8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   9 *             Adrian Hoban <adrian.hoban@intel.com>
  10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  12 *    Copyright (c) 2010, Intel Corporation.
 
 
 
 
 
 
  13 */
  14
 
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/cryptd.h>
  19#include <linux/refcount.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29
  30static unsigned int cryptd_max_cpu_qlen = 1000;
  31module_param(cryptd_max_cpu_qlen, uint, 0);
  32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  33
  34static struct workqueue_struct *cryptd_wq;
  35
  36struct cryptd_cpu_queue {
  37	struct crypto_queue queue;
  38	struct work_struct work;
  39};
  40
  41struct cryptd_queue {
  42	struct cryptd_cpu_queue __percpu *cpu_queue;
  43};
  44
  45struct cryptd_instance_ctx {
  46	struct crypto_spawn spawn;
  47	struct cryptd_queue *queue;
  48};
  49
  50struct skcipherd_instance_ctx {
  51	struct crypto_skcipher_spawn spawn;
  52	struct cryptd_queue *queue;
  53};
  54
  55struct hashd_instance_ctx {
  56	struct crypto_shash_spawn spawn;
  57	struct cryptd_queue *queue;
  58};
  59
  60struct aead_instance_ctx {
  61	struct crypto_aead_spawn aead_spawn;
  62	struct cryptd_queue *queue;
  63};
  64
  65struct cryptd_skcipher_ctx {
  66	refcount_t refcnt;
  67	struct crypto_sync_skcipher *child;
  68};
  69
  70struct cryptd_skcipher_request_ctx {
  71	crypto_completion_t complete;
  72};
  73
  74struct cryptd_hash_ctx {
  75	refcount_t refcnt;
  76	struct crypto_shash *child;
  77};
  78
  79struct cryptd_hash_request_ctx {
  80	crypto_completion_t complete;
  81	struct shash_desc desc;
  82};
  83
  84struct cryptd_aead_ctx {
  85	refcount_t refcnt;
  86	struct crypto_aead *child;
  87};
  88
  89struct cryptd_aead_request_ctx {
  90	crypto_completion_t complete;
  91};
  92
  93static void cryptd_queue_worker(struct work_struct *work);
  94
  95static int cryptd_init_queue(struct cryptd_queue *queue,
  96			     unsigned int max_cpu_qlen)
  97{
  98	int cpu;
  99	struct cryptd_cpu_queue *cpu_queue;
 100
 101	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 102	if (!queue->cpu_queue)
 103		return -ENOMEM;
 104	for_each_possible_cpu(cpu) {
 105		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 106		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 107		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 108	}
 109	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 110	return 0;
 111}
 112
 113static void cryptd_fini_queue(struct cryptd_queue *queue)
 114{
 115	int cpu;
 116	struct cryptd_cpu_queue *cpu_queue;
 117
 118	for_each_possible_cpu(cpu) {
 119		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 120		BUG_ON(cpu_queue->queue.qlen);
 121	}
 122	free_percpu(queue->cpu_queue);
 123}
 124
 125static int cryptd_enqueue_request(struct cryptd_queue *queue,
 126				  struct crypto_async_request *request)
 127{
 128	int cpu, err;
 129	struct cryptd_cpu_queue *cpu_queue;
 130	refcount_t *refcnt;
 131
 132	cpu = get_cpu();
 133	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 134	err = crypto_enqueue_request(&cpu_queue->queue, request);
 135
 136	refcnt = crypto_tfm_ctx(request->tfm);
 137
 138	if (err == -ENOSPC)
 139		goto out_put_cpu;
 140
 141	queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
 142
 143	if (!refcount_read(refcnt))
 144		goto out_put_cpu;
 145
 146	refcount_inc(refcnt);
 147
 148out_put_cpu:
 149	put_cpu();
 150
 151	return err;
 152}
 153
 154/* Called in workqueue context, do one real cryption work (via
 155 * req->complete) and reschedule itself if there are more work to
 156 * do. */
 157static void cryptd_queue_worker(struct work_struct *work)
 158{
 159	struct cryptd_cpu_queue *cpu_queue;
 160	struct crypto_async_request *req, *backlog;
 161
 162	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 163	/*
 164	 * Only handle one request at a time to avoid hogging crypto workqueue.
 165	 * preempt_disable/enable is used to prevent being preempted by
 166	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 167	 * cryptd_enqueue_request() being accessed from software interrupts.
 168	 */
 169	local_bh_disable();
 170	preempt_disable();
 171	backlog = crypto_get_backlog(&cpu_queue->queue);
 172	req = crypto_dequeue_request(&cpu_queue->queue);
 173	preempt_enable();
 174	local_bh_enable();
 175
 176	if (!req)
 177		return;
 178
 179	if (backlog)
 180		backlog->complete(backlog, -EINPROGRESS);
 181	req->complete(req, 0);
 182
 183	if (cpu_queue->queue.qlen)
 184		queue_work(cryptd_wq, &cpu_queue->work);
 185}
 186
 187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 188{
 189	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 190	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 191	return ictx->queue;
 192}
 193
 194static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 195					 u32 *mask)
 196{
 197	struct crypto_attr_type *algt;
 
 
 198
 199	algt = crypto_get_attr_type(tb);
 200	if (IS_ERR(algt))
 201		return;
 202
 203	*type |= algt->type & CRYPTO_ALG_INTERNAL;
 204	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 
 205}
 206
 207static int cryptd_init_instance(struct crypto_instance *inst,
 208				struct crypto_alg *alg)
 
 
 
 
 
 209{
 210	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 211		     "cryptd(%s)",
 212		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 213		return -ENAMETOOLONG;
 214
 215	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 216
 217	inst->alg.cra_priority = alg->cra_priority + 50;
 218	inst->alg.cra_blocksize = alg->cra_blocksize;
 219	inst->alg.cra_alignmask = alg->cra_alignmask;
 220
 221	return 0;
 222}
 223
 224static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 225				   unsigned int tail)
 226{
 227	char *p;
 228	struct crypto_instance *inst;
 229	int err;
 230
 231	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
 232	if (!p)
 233		return ERR_PTR(-ENOMEM);
 234
 235	inst = (void *)(p + head);
 236
 237	err = cryptd_init_instance(inst, alg);
 238	if (err)
 239		goto out_free_inst;
 240
 241out:
 242	return p;
 243
 244out_free_inst:
 245	kfree(p);
 246	p = ERR_PTR(err);
 247	goto out;
 248}
 249
 250static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 251				  const u8 *key, unsigned int keylen)
 252{
 253	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 254	struct crypto_sync_skcipher *child = ctx->child;
 255	int err;
 256
 257	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 258	crypto_sync_skcipher_set_flags(child,
 259				       crypto_skcipher_get_flags(parent) &
 260					 CRYPTO_TFM_REQ_MASK);
 261	err = crypto_sync_skcipher_setkey(child, key, keylen);
 262	crypto_skcipher_set_flags(parent,
 263				  crypto_sync_skcipher_get_flags(child) &
 264					  CRYPTO_TFM_RES_MASK);
 265	return err;
 266}
 267
 268static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 269{
 270	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 271	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 272	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 273	int refcnt = refcount_read(&ctx->refcnt);
 274
 275	local_bh_disable();
 276	rctx->complete(&req->base, err);
 277	local_bh_enable();
 278
 279	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 280		crypto_free_skcipher(tfm);
 281}
 282
 283static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 284				    int err)
 285{
 286	struct skcipher_request *req = skcipher_request_cast(base);
 287	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 288	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 289	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 290	struct crypto_sync_skcipher *child = ctx->child;
 291	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 292
 293	if (unlikely(err == -EINPROGRESS))
 294		goto out;
 295
 296	skcipher_request_set_sync_tfm(subreq, child);
 297	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 298				      NULL, NULL);
 299	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 300				   req->iv);
 301
 302	err = crypto_skcipher_encrypt(subreq);
 303	skcipher_request_zero(subreq);
 304
 305	req->base.complete = rctx->complete;
 306
 307out:
 308	cryptd_skcipher_complete(req, err);
 309}
 310
 311static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 312				    int err)
 313{
 314	struct skcipher_request *req = skcipher_request_cast(base);
 315	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 316	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 317	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 318	struct crypto_sync_skcipher *child = ctx->child;
 319	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 320
 321	if (unlikely(err == -EINPROGRESS))
 322		goto out;
 323
 324	skcipher_request_set_sync_tfm(subreq, child);
 325	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 326				      NULL, NULL);
 327	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 328				   req->iv);
 329
 330	err = crypto_skcipher_decrypt(subreq);
 331	skcipher_request_zero(subreq);
 332
 333	req->base.complete = rctx->complete;
 334
 335out:
 336	cryptd_skcipher_complete(req, err);
 337}
 338
 339static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 340				   crypto_completion_t compl)
 341{
 342	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 343	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 344	struct cryptd_queue *queue;
 345
 346	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 347	rctx->complete = req->base.complete;
 348	req->base.complete = compl;
 349
 350	return cryptd_enqueue_request(queue, &req->base);
 351}
 352
 353static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 354{
 355	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 356}
 357
 358static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 359{
 360	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 361}
 362
 363static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 364{
 365	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 366	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 367	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 368	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 369	struct crypto_skcipher *cipher;
 370
 371	cipher = crypto_spawn_skcipher(spawn);
 372	if (IS_ERR(cipher))
 373		return PTR_ERR(cipher);
 374
 375	ctx->child = (struct crypto_sync_skcipher *)cipher;
 376	crypto_skcipher_set_reqsize(
 377		tfm, sizeof(struct cryptd_skcipher_request_ctx));
 378	return 0;
 379}
 380
 381static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 382{
 383	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 384
 385	crypto_free_sync_skcipher(ctx->child);
 386}
 387
 388static void cryptd_skcipher_free(struct skcipher_instance *inst)
 
 389{
 390	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391
 392	crypto_drop_skcipher(&ctx->spawn);
 393	kfree(inst);
 
 
 
 
 
 
 
 
 
 394}
 395
 396static int cryptd_create_skcipher(struct crypto_template *tmpl,
 397				  struct rtattr **tb,
 398				  struct cryptd_queue *queue)
 399{
 400	struct skcipherd_instance_ctx *ctx;
 401	struct skcipher_instance *inst;
 402	struct skcipher_alg *alg;
 403	const char *name;
 404	u32 type;
 405	u32 mask;
 406	int err;
 407
 408	type = 0;
 409	mask = CRYPTO_ALG_ASYNC;
 
 
 410
 411	cryptd_check_internal(tb, &type, &mask);
 412
 413	name = crypto_attr_alg_name(tb[1]);
 414	if (IS_ERR(name))
 415		return PTR_ERR(name);
 416
 417	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 418	if (!inst)
 419		return -ENOMEM;
 420
 421	ctx = skcipher_instance_ctx(inst);
 422	ctx->queue = queue;
 423
 424	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
 425	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
 426	if (err)
 427		goto out_free_inst;
 428
 429	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 430	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 431	if (err)
 432		goto out_drop_skcipher;
 433
 434	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 435				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 436
 437	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 438	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 439	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 440	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 441
 442	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 443
 444	inst->alg.init = cryptd_skcipher_init_tfm;
 445	inst->alg.exit = cryptd_skcipher_exit_tfm;
 446
 447	inst->alg.setkey = cryptd_skcipher_setkey;
 448	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 449	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 450
 451	inst->free = cryptd_skcipher_free;
 
 
 452
 453	err = skcipher_register_instance(tmpl, inst);
 454	if (err) {
 455out_drop_skcipher:
 456		crypto_drop_skcipher(&ctx->spawn);
 457out_free_inst:
 458		kfree(inst);
 459	}
 
 
 
 460	return err;
 461}
 462
 463static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 464{
 465	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 466	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 467	struct crypto_shash_spawn *spawn = &ictx->spawn;
 468	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 469	struct crypto_shash *hash;
 470
 471	hash = crypto_spawn_shash(spawn);
 472	if (IS_ERR(hash))
 473		return PTR_ERR(hash);
 474
 475	ctx->child = hash;
 476	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 477				 sizeof(struct cryptd_hash_request_ctx) +
 478				 crypto_shash_descsize(hash));
 479	return 0;
 480}
 481
 482static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 483{
 484	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 485
 486	crypto_free_shash(ctx->child);
 487}
 488
 489static int cryptd_hash_setkey(struct crypto_ahash *parent,
 490				   const u8 *key, unsigned int keylen)
 491{
 492	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 493	struct crypto_shash *child = ctx->child;
 494	int err;
 495
 496	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 497	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 498				      CRYPTO_TFM_REQ_MASK);
 499	err = crypto_shash_setkey(child, key, keylen);
 500	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
 501				       CRYPTO_TFM_RES_MASK);
 502	return err;
 503}
 504
 505static int cryptd_hash_enqueue(struct ahash_request *req,
 506				crypto_completion_t compl)
 507{
 508	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 509	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 510	struct cryptd_queue *queue =
 511		cryptd_get_queue(crypto_ahash_tfm(tfm));
 512
 513	rctx->complete = req->base.complete;
 514	req->base.complete = compl;
 515
 516	return cryptd_enqueue_request(queue, &req->base);
 517}
 518
 519static void cryptd_hash_complete(struct ahash_request *req, int err)
 520{
 521	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 522	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 523	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 524	int refcnt = refcount_read(&ctx->refcnt);
 525
 526	local_bh_disable();
 527	rctx->complete(&req->base, err);
 528	local_bh_enable();
 529
 530	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 531		crypto_free_ahash(tfm);
 532}
 533
 534static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 535{
 536	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 537	struct crypto_shash *child = ctx->child;
 538	struct ahash_request *req = ahash_request_cast(req_async);
 539	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 540	struct shash_desc *desc = &rctx->desc;
 541
 542	if (unlikely(err == -EINPROGRESS))
 543		goto out;
 544
 545	desc->tfm = child;
 
 546
 547	err = crypto_shash_init(desc);
 548
 549	req->base.complete = rctx->complete;
 550
 551out:
 552	cryptd_hash_complete(req, err);
 
 
 553}
 554
 555static int cryptd_hash_init_enqueue(struct ahash_request *req)
 556{
 557	return cryptd_hash_enqueue(req, cryptd_hash_init);
 558}
 559
 560static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 561{
 562	struct ahash_request *req = ahash_request_cast(req_async);
 563	struct cryptd_hash_request_ctx *rctx;
 564
 565	rctx = ahash_request_ctx(req);
 566
 567	if (unlikely(err == -EINPROGRESS))
 568		goto out;
 569
 570	err = shash_ahash_update(req, &rctx->desc);
 571
 572	req->base.complete = rctx->complete;
 573
 574out:
 575	cryptd_hash_complete(req, err);
 
 
 576}
 577
 578static int cryptd_hash_update_enqueue(struct ahash_request *req)
 579{
 580	return cryptd_hash_enqueue(req, cryptd_hash_update);
 581}
 582
 583static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 584{
 585	struct ahash_request *req = ahash_request_cast(req_async);
 586	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 587
 588	if (unlikely(err == -EINPROGRESS))
 589		goto out;
 590
 591	err = crypto_shash_final(&rctx->desc, req->result);
 592
 593	req->base.complete = rctx->complete;
 594
 595out:
 596	cryptd_hash_complete(req, err);
 
 
 597}
 598
 599static int cryptd_hash_final_enqueue(struct ahash_request *req)
 600{
 601	return cryptd_hash_enqueue(req, cryptd_hash_final);
 602}
 603
 604static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 605{
 606	struct ahash_request *req = ahash_request_cast(req_async);
 607	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 608
 609	if (unlikely(err == -EINPROGRESS))
 610		goto out;
 611
 612	err = shash_ahash_finup(req, &rctx->desc);
 613
 614	req->base.complete = rctx->complete;
 615
 616out:
 617	cryptd_hash_complete(req, err);
 
 
 618}
 619
 620static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 621{
 622	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 623}
 624
 625static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 626{
 627	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 628	struct crypto_shash *child = ctx->child;
 629	struct ahash_request *req = ahash_request_cast(req_async);
 630	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 631	struct shash_desc *desc = &rctx->desc;
 632
 633	if (unlikely(err == -EINPROGRESS))
 634		goto out;
 635
 636	desc->tfm = child;
 
 637
 638	err = shash_ahash_digest(req, desc);
 639
 640	req->base.complete = rctx->complete;
 641
 642out:
 643	cryptd_hash_complete(req, err);
 
 
 644}
 645
 646static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 647{
 648	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 649}
 650
 651static int cryptd_hash_export(struct ahash_request *req, void *out)
 652{
 653	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 654
 655	return crypto_shash_export(&rctx->desc, out);
 656}
 657
 658static int cryptd_hash_import(struct ahash_request *req, const void *in)
 659{
 660	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 661	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 662	struct shash_desc *desc = cryptd_shash_desc(req);
 663
 664	desc->tfm = ctx->child;
 665
 666	return crypto_shash_import(desc, in);
 667}
 668
 669static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 670			      struct cryptd_queue *queue)
 671{
 672	struct hashd_instance_ctx *ctx;
 673	struct ahash_instance *inst;
 674	struct shash_alg *salg;
 675	struct crypto_alg *alg;
 676	u32 type = 0;
 677	u32 mask = 0;
 678	int err;
 679
 680	cryptd_check_internal(tb, &type, &mask);
 681
 682	salg = shash_attr_alg(tb[1], type, mask);
 683	if (IS_ERR(salg))
 684		return PTR_ERR(salg);
 685
 686	alg = &salg->base;
 687	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
 688				     sizeof(*ctx));
 689	err = PTR_ERR(inst);
 690	if (IS_ERR(inst))
 691		goto out_put_alg;
 692
 693	ctx = ahash_instance_ctx(inst);
 694	ctx->queue = queue;
 695
 696	err = crypto_init_shash_spawn(&ctx->spawn, salg,
 697				      ahash_crypto_instance(inst));
 698	if (err)
 699		goto out_free_inst;
 700
 701	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
 702		(alg->cra_flags & (CRYPTO_ALG_INTERNAL |
 703				   CRYPTO_ALG_OPTIONAL_KEY));
 704
 705	inst->alg.halg.digestsize = salg->digestsize;
 706	inst->alg.halg.statesize = salg->statesize;
 707	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 708
 709	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 710	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 711
 712	inst->alg.init   = cryptd_hash_init_enqueue;
 713	inst->alg.update = cryptd_hash_update_enqueue;
 714	inst->alg.final  = cryptd_hash_final_enqueue;
 715	inst->alg.finup  = cryptd_hash_finup_enqueue;
 716	inst->alg.export = cryptd_hash_export;
 717	inst->alg.import = cryptd_hash_import;
 718	if (crypto_shash_alg_has_setkey(salg))
 719		inst->alg.setkey = cryptd_hash_setkey;
 720	inst->alg.digest = cryptd_hash_digest_enqueue;
 721
 722	err = ahash_register_instance(tmpl, inst);
 723	if (err) {
 724		crypto_drop_shash(&ctx->spawn);
 725out_free_inst:
 726		kfree(inst);
 727	}
 728
 729out_put_alg:
 730	crypto_mod_put(alg);
 731	return err;
 732}
 733
 734static int cryptd_aead_setkey(struct crypto_aead *parent,
 735			      const u8 *key, unsigned int keylen)
 736{
 737	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 738	struct crypto_aead *child = ctx->child;
 739
 740	return crypto_aead_setkey(child, key, keylen);
 741}
 742
 743static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 744				   unsigned int authsize)
 745{
 746	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 747	struct crypto_aead *child = ctx->child;
 748
 749	return crypto_aead_setauthsize(child, authsize);
 750}
 751
 752static void cryptd_aead_crypt(struct aead_request *req,
 753			struct crypto_aead *child,
 754			int err,
 755			int (*crypt)(struct aead_request *req))
 756{
 757	struct cryptd_aead_request_ctx *rctx;
 758	struct cryptd_aead_ctx *ctx;
 759	crypto_completion_t compl;
 760	struct crypto_aead *tfm;
 761	int refcnt;
 762
 763	rctx = aead_request_ctx(req);
 764	compl = rctx->complete;
 765
 766	tfm = crypto_aead_reqtfm(req);
 767
 768	if (unlikely(err == -EINPROGRESS))
 769		goto out;
 770	aead_request_set_tfm(req, child);
 771	err = crypt( req );
 772
 773out:
 774	ctx = crypto_aead_ctx(tfm);
 775	refcnt = refcount_read(&ctx->refcnt);
 776
 777	local_bh_disable();
 778	compl(&req->base, err);
 779	local_bh_enable();
 780
 781	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 782		crypto_free_aead(tfm);
 783}
 784
 785static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 786{
 787	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 788	struct crypto_aead *child = ctx->child;
 789	struct aead_request *req;
 790
 791	req = container_of(areq, struct aead_request, base);
 792	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 793}
 794
 795static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 796{
 797	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 798	struct crypto_aead *child = ctx->child;
 799	struct aead_request *req;
 800
 801	req = container_of(areq, struct aead_request, base);
 802	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 803}
 804
 805static int cryptd_aead_enqueue(struct aead_request *req,
 806				    crypto_completion_t compl)
 807{
 808	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 809	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 810	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 811
 812	rctx->complete = req->base.complete;
 813	req->base.complete = compl;
 814	return cryptd_enqueue_request(queue, &req->base);
 815}
 816
 817static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 818{
 819	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 820}
 821
 822static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 823{
 824	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 825}
 826
 827static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 828{
 829	struct aead_instance *inst = aead_alg_instance(tfm);
 830	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 831	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 832	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 833	struct crypto_aead *cipher;
 834
 835	cipher = crypto_spawn_aead(spawn);
 836	if (IS_ERR(cipher))
 837		return PTR_ERR(cipher);
 838
 
 839	ctx->child = cipher;
 840	crypto_aead_set_reqsize(
 841		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
 842			 crypto_aead_reqsize(cipher)));
 843	return 0;
 844}
 845
 846static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 847{
 848	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 849	crypto_free_aead(ctx->child);
 850}
 851
 852static int cryptd_create_aead(struct crypto_template *tmpl,
 853		              struct rtattr **tb,
 854			      struct cryptd_queue *queue)
 855{
 856	struct aead_instance_ctx *ctx;
 857	struct aead_instance *inst;
 858	struct aead_alg *alg;
 859	const char *name;
 860	u32 type = 0;
 861	u32 mask = CRYPTO_ALG_ASYNC;
 862	int err;
 863
 864	cryptd_check_internal(tb, &type, &mask);
 
 
 
 865
 866	name = crypto_attr_alg_name(tb[1]);
 867	if (IS_ERR(name))
 868		return PTR_ERR(name);
 
 869
 870	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 871	if (!inst)
 872		return -ENOMEM;
 873
 874	ctx = aead_instance_ctx(inst);
 875	ctx->queue = queue;
 876
 877	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
 878	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
 879	if (err)
 880		goto out_free_inst;
 881
 882	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 883	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 884	if (err)
 885		goto out_drop_aead;
 886
 887	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 888				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 889	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 890
 891	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 892	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 893
 894	inst->alg.init = cryptd_aead_init_tfm;
 895	inst->alg.exit = cryptd_aead_exit_tfm;
 896	inst->alg.setkey = cryptd_aead_setkey;
 897	inst->alg.setauthsize = cryptd_aead_setauthsize;
 898	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 899	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 900
 901	err = aead_register_instance(tmpl, inst);
 902	if (err) {
 903out_drop_aead:
 904		crypto_drop_aead(&ctx->aead_spawn);
 905out_free_inst:
 906		kfree(inst);
 907	}
 
 
 908	return err;
 909}
 910
 911static struct cryptd_queue queue;
 912
 913static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 914{
 915	struct crypto_attr_type *algt;
 916
 917	algt = crypto_get_attr_type(tb);
 918	if (IS_ERR(algt))
 919		return PTR_ERR(algt);
 920
 921	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 922	case CRYPTO_ALG_TYPE_BLKCIPHER:
 923		return cryptd_create_skcipher(tmpl, tb, &queue);
 924	case CRYPTO_ALG_TYPE_HASH:
 925		return cryptd_create_hash(tmpl, tb, &queue);
 926	case CRYPTO_ALG_TYPE_AEAD:
 927		return cryptd_create_aead(tmpl, tb, &queue);
 928	}
 929
 930	return -EINVAL;
 931}
 932
 933static void cryptd_free(struct crypto_instance *inst)
 934{
 935	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
 936	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
 937	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
 938
 939	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
 940	case CRYPTO_ALG_TYPE_AHASH:
 941		crypto_drop_shash(&hctx->spawn);
 942		kfree(ahash_instance(inst));
 943		return;
 944	case CRYPTO_ALG_TYPE_AEAD:
 945		crypto_drop_aead(&aead_ctx->aead_spawn);
 946		kfree(aead_instance(inst));
 947		return;
 948	default:
 949		crypto_drop_spawn(&ctx->spawn);
 950		kfree(inst);
 951	}
 952}
 953
 954static struct crypto_template cryptd_tmpl = {
 955	.name = "cryptd",
 956	.create = cryptd_create,
 957	.free = cryptd_free,
 958	.module = THIS_MODULE,
 959};
 960
 961struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 962					      u32 type, u32 mask)
 963{
 964	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 965	struct cryptd_skcipher_ctx *ctx;
 966	struct crypto_skcipher *tfm;
 967
 968	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 969		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 970		return ERR_PTR(-EINVAL);
 971
 972	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
 
 
 
 973	if (IS_ERR(tfm))
 974		return ERR_CAST(tfm);
 975
 976	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 977		crypto_free_skcipher(tfm);
 978		return ERR_PTR(-EINVAL);
 979	}
 980
 981	ctx = crypto_skcipher_ctx(tfm);
 982	refcount_set(&ctx->refcnt, 1);
 983
 984	return container_of(tfm, struct cryptd_skcipher, base);
 985}
 986EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
 987
 988struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
 989{
 990	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 991
 992	return &ctx->child->base;
 993}
 994EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
 995
 996bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
 997{
 998	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 999
1000	return refcount_read(&ctx->refcnt) - 1;
1001}
1002EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1003
1004void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1005{
1006	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1007
1008	if (refcount_dec_and_test(&ctx->refcnt))
1009		crypto_free_skcipher(&tfm->base);
1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1012
1013struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1014					u32 type, u32 mask)
1015{
1016	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1017	struct cryptd_hash_ctx *ctx;
1018	struct crypto_ahash *tfm;
1019
1020	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1021		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1022		return ERR_PTR(-EINVAL);
1023	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1024	if (IS_ERR(tfm))
1025		return ERR_CAST(tfm);
1026	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1027		crypto_free_ahash(tfm);
1028		return ERR_PTR(-EINVAL);
1029	}
1030
1031	ctx = crypto_ahash_ctx(tfm);
1032	refcount_set(&ctx->refcnt, 1);
1033
1034	return __cryptd_ahash_cast(tfm);
1035}
1036EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1037
1038struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1039{
1040	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1041
1042	return ctx->child;
1043}
1044EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1045
1046struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1047{
1048	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1049	return &rctx->desc;
1050}
1051EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1052
1053bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1054{
1055	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1056
1057	return refcount_read(&ctx->refcnt) - 1;
1058}
1059EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1060
1061void cryptd_free_ahash(struct cryptd_ahash *tfm)
1062{
1063	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1064
1065	if (refcount_dec_and_test(&ctx->refcnt))
1066		crypto_free_ahash(&tfm->base);
1067}
1068EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1069
1070struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1071						  u32 type, u32 mask)
1072{
1073	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1074	struct cryptd_aead_ctx *ctx;
1075	struct crypto_aead *tfm;
1076
1077	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1078		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1079		return ERR_PTR(-EINVAL);
1080	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1081	if (IS_ERR(tfm))
1082		return ERR_CAST(tfm);
1083	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1084		crypto_free_aead(tfm);
1085		return ERR_PTR(-EINVAL);
1086	}
1087
1088	ctx = crypto_aead_ctx(tfm);
1089	refcount_set(&ctx->refcnt, 1);
1090
1091	return __cryptd_aead_cast(tfm);
1092}
1093EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1094
1095struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1096{
1097	struct cryptd_aead_ctx *ctx;
1098	ctx = crypto_aead_ctx(&tfm->base);
1099	return ctx->child;
1100}
1101EXPORT_SYMBOL_GPL(cryptd_aead_child);
1102
1103bool cryptd_aead_queued(struct cryptd_aead *tfm)
1104{
1105	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1106
1107	return refcount_read(&ctx->refcnt) - 1;
1108}
1109EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1110
1111void cryptd_free_aead(struct cryptd_aead *tfm)
1112{
1113	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1114
1115	if (refcount_dec_and_test(&ctx->refcnt))
1116		crypto_free_aead(&tfm->base);
1117}
1118EXPORT_SYMBOL_GPL(cryptd_free_aead);
1119
1120static int __init cryptd_init(void)
1121{
1122	int err;
1123
1124	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1125				    1);
1126	if (!cryptd_wq)
1127		return -ENOMEM;
1128
1129	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1130	if (err)
1131		goto err_destroy_wq;
1132
1133	err = crypto_register_template(&cryptd_tmpl);
1134	if (err)
1135		goto err_fini_queue;
1136
1137	return 0;
1138
1139err_fini_queue:
1140	cryptd_fini_queue(&queue);
1141err_destroy_wq:
1142	destroy_workqueue(cryptd_wq);
1143	return err;
1144}
1145
1146static void __exit cryptd_exit(void)
1147{
1148	destroy_workqueue(cryptd_wq);
1149	cryptd_fini_queue(&queue);
1150	crypto_unregister_template(&cryptd_tmpl);
1151}
1152
1153subsys_initcall(cryptd_init);
1154module_exit(cryptd_exit);
1155
1156MODULE_LICENSE("GPL");
1157MODULE_DESCRIPTION("Software async crypto daemon");
1158MODULE_ALIAS_CRYPTO("cryptd");