Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1/*
  2 * Software multibuffer async crypto daemon.
  3 *
  4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
  5 *
  6 * Adapted from crypto daemon.
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14
 15#include <crypto/algapi.h>
 16#include <crypto/internal/hash.h>
 17#include <crypto/internal/aead.h>
 18#include <crypto/mcryptd.h>
 19#include <crypto/crypto_wq.h>
 20#include <linux/err.h>
 21#include <linux/init.h>
 22#include <linux/kernel.h>
 23#include <linux/list.h>
 24#include <linux/module.h>
 25#include <linux/scatterlist.h>
 26#include <linux/sched.h>
 27#include <linux/slab.h>
 28#include <linux/hardirq.h>
 29
 30#define MCRYPTD_MAX_CPU_QLEN 100
 31#define MCRYPTD_BATCH 9
 32
 33static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 34				   unsigned int tail);
 35
 36struct mcryptd_flush_list {
 37	struct list_head list;
 38	struct mutex lock;
 39};
 40
 41static struct mcryptd_flush_list __percpu *mcryptd_flist;
 42
 43struct hashd_instance_ctx {
 44	struct crypto_shash_spawn spawn;
 45	struct mcryptd_queue *queue;
 46};
 47
 48static void mcryptd_queue_worker(struct work_struct *work);
 49
 50void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
 51{
 52	struct mcryptd_flush_list *flist;
 53
 54	if (!cstate->flusher_engaged) {
 55		/* put the flusher on the flush list */
 56		flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
 57		mutex_lock(&flist->lock);
 58		list_add_tail(&cstate->flush_list, &flist->list);
 59		cstate->flusher_engaged = true;
 60		cstate->next_flush = jiffies + delay;
 61		queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
 62			&cstate->flush, delay);
 63		mutex_unlock(&flist->lock);
 64	}
 65}
 66EXPORT_SYMBOL(mcryptd_arm_flusher);
 67
 68static int mcryptd_init_queue(struct mcryptd_queue *queue,
 69			     unsigned int max_cpu_qlen)
 70{
 71	int cpu;
 72	struct mcryptd_cpu_queue *cpu_queue;
 73
 74	queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
 75	pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
 76	if (!queue->cpu_queue)
 77		return -ENOMEM;
 78	for_each_possible_cpu(cpu) {
 79		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 80		pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
 81		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 82		INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
 83	}
 84	return 0;
 85}
 86
 87static void mcryptd_fini_queue(struct mcryptd_queue *queue)
 88{
 89	int cpu;
 90	struct mcryptd_cpu_queue *cpu_queue;
 91
 92	for_each_possible_cpu(cpu) {
 93		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 94		BUG_ON(cpu_queue->queue.qlen);
 95	}
 96	free_percpu(queue->cpu_queue);
 97}
 98
 99static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
100				  struct crypto_async_request *request,
101				  struct mcryptd_hash_request_ctx *rctx)
102{
103	int cpu, err;
104	struct mcryptd_cpu_queue *cpu_queue;
105
106	cpu = get_cpu();
107	cpu_queue = this_cpu_ptr(queue->cpu_queue);
108	rctx->tag.cpu = cpu;
109
110	err = crypto_enqueue_request(&cpu_queue->queue, request);
111	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
112		 cpu, cpu_queue, request);
113	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
114	put_cpu();
115
116	return err;
117}
118
119/*
120 * Try to opportunisticlly flush the partially completed jobs if
121 * crypto daemon is the only task running.
122 */
123static void mcryptd_opportunistic_flush(void)
124{
125	struct mcryptd_flush_list *flist;
126	struct mcryptd_alg_cstate *cstate;
127
128	flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
129	while (single_task_running()) {
130		mutex_lock(&flist->lock);
131		cstate = list_first_entry_or_null(&flist->list,
132				struct mcryptd_alg_cstate, flush_list);
133		if (!cstate || !cstate->flusher_engaged) {
134			mutex_unlock(&flist->lock);
135			return;
136		}
137		list_del(&cstate->flush_list);
138		cstate->flusher_engaged = false;
139		mutex_unlock(&flist->lock);
140		cstate->alg_state->flusher(cstate);
141	}
142}
143
144/*
145 * Called in workqueue context, do one real cryption work (via
146 * req->complete) and reschedule itself if there are more work to
147 * do.
148 */
149static void mcryptd_queue_worker(struct work_struct *work)
150{
151	struct mcryptd_cpu_queue *cpu_queue;
152	struct crypto_async_request *req, *backlog;
153	int i;
154
155	/*
156	 * Need to loop through more than once for multi-buffer to
157	 * be effective.
158	 */
159
160	cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
161	i = 0;
162	while (i < MCRYPTD_BATCH || single_task_running()) {
163		/*
164		 * preempt_disable/enable is used to prevent
165		 * being preempted by mcryptd_enqueue_request()
166		 */
167		local_bh_disable();
168		preempt_disable();
169		backlog = crypto_get_backlog(&cpu_queue->queue);
170		req = crypto_dequeue_request(&cpu_queue->queue);
171		preempt_enable();
172		local_bh_enable();
173
174		if (!req) {
175			mcryptd_opportunistic_flush();
176			return;
177		}
178
179		if (backlog)
180			backlog->complete(backlog, -EINPROGRESS);
181		req->complete(req, 0);
182		if (!cpu_queue->queue.qlen)
183			return;
184		++i;
185	}
186	if (cpu_queue->queue.qlen)
187		queue_work(kcrypto_wq, &cpu_queue->work);
188}
189
190void mcryptd_flusher(struct work_struct *__work)
191{
192	struct	mcryptd_alg_cstate	*alg_cpu_state;
193	struct	mcryptd_alg_state	*alg_state;
194	struct	mcryptd_flush_list	*flist;
195	int	cpu;
196
197	cpu = smp_processor_id();
198	alg_cpu_state = container_of(to_delayed_work(__work),
199				     struct mcryptd_alg_cstate, flush);
200	alg_state = alg_cpu_state->alg_state;
201	if (alg_cpu_state->cpu != cpu)
202		pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
203				cpu, alg_cpu_state->cpu);
204
205	if (alg_cpu_state->flusher_engaged) {
206		flist = per_cpu_ptr(mcryptd_flist, cpu);
207		mutex_lock(&flist->lock);
208		list_del(&alg_cpu_state->flush_list);
209		alg_cpu_state->flusher_engaged = false;
210		mutex_unlock(&flist->lock);
211		alg_state->flusher(alg_cpu_state);
212	}
213}
214EXPORT_SYMBOL_GPL(mcryptd_flusher);
215
216static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
217{
218	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
219	struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
220
221	return ictx->queue;
222}
223
224static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
225				   unsigned int tail)
226{
227	char *p;
228	struct crypto_instance *inst;
229	int err;
230
231	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
232	if (!p)
233		return ERR_PTR(-ENOMEM);
234
235	inst = (void *)(p + head);
236
237	err = -ENAMETOOLONG;
238	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
239		    "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
240		goto out_free_inst;
241
242	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
243
244	inst->alg.cra_priority = alg->cra_priority + 50;
245	inst->alg.cra_blocksize = alg->cra_blocksize;
246	inst->alg.cra_alignmask = alg->cra_alignmask;
247
248out:
249	return p;
250
251out_free_inst:
252	kfree(p);
253	p = ERR_PTR(err);
254	goto out;
255}
256
257static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
258					  u32 *mask)
259{
260	struct crypto_attr_type *algt;
261
262	algt = crypto_get_attr_type(tb);
263	if (IS_ERR(algt))
264		return;
265	if ((algt->type & CRYPTO_ALG_INTERNAL))
266		*type |= CRYPTO_ALG_INTERNAL;
267	if ((algt->mask & CRYPTO_ALG_INTERNAL))
268		*mask |= CRYPTO_ALG_INTERNAL;
269}
270
271static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
272{
273	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
274	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
275	struct crypto_shash_spawn *spawn = &ictx->spawn;
276	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
277	struct crypto_shash *hash;
278
279	hash = crypto_spawn_shash(spawn);
280	if (IS_ERR(hash))
281		return PTR_ERR(hash);
282
283	ctx->child = hash;
284	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
285				 sizeof(struct mcryptd_hash_request_ctx) +
286				 crypto_shash_descsize(hash));
287	return 0;
288}
289
290static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
291{
292	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
293
294	crypto_free_shash(ctx->child);
295}
296
297static int mcryptd_hash_setkey(struct crypto_ahash *parent,
298				   const u8 *key, unsigned int keylen)
299{
300	struct mcryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
301	struct crypto_shash *child = ctx->child;
302	int err;
303
304	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
305	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
306				      CRYPTO_TFM_REQ_MASK);
307	err = crypto_shash_setkey(child, key, keylen);
308	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
309				       CRYPTO_TFM_RES_MASK);
310	return err;
311}
312
313static int mcryptd_hash_enqueue(struct ahash_request *req,
314				crypto_completion_t complete)
315{
316	int ret;
317
318	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
319	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
320	struct mcryptd_queue *queue =
321		mcryptd_get_queue(crypto_ahash_tfm(tfm));
322
323	rctx->complete = req->base.complete;
324	req->base.complete = complete;
325
326	ret = mcryptd_enqueue_request(queue, &req->base, rctx);
327
328	return ret;
329}
330
331static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
332{
333	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
334	struct crypto_shash *child = ctx->child;
335	struct ahash_request *req = ahash_request_cast(req_async);
336	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
337	struct shash_desc *desc = &rctx->desc;
338
339	if (unlikely(err == -EINPROGRESS))
340		goto out;
341
342	desc->tfm = child;
343	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
344
345	err = crypto_shash_init(desc);
346
347	req->base.complete = rctx->complete;
348
349out:
350	local_bh_disable();
351	rctx->complete(&req->base, err);
352	local_bh_enable();
353}
354
355static int mcryptd_hash_init_enqueue(struct ahash_request *req)
356{
357	return mcryptd_hash_enqueue(req, mcryptd_hash_init);
358}
359
360static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
361{
362	struct ahash_request *req = ahash_request_cast(req_async);
363	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
364
365	if (unlikely(err == -EINPROGRESS))
366		goto out;
367
368	err = shash_ahash_mcryptd_update(req, &rctx->desc);
369	if (err) {
370		req->base.complete = rctx->complete;
371		goto out;
372	}
373
374	return;
375out:
376	local_bh_disable();
377	rctx->complete(&req->base, err);
378	local_bh_enable();
379}
380
381static int mcryptd_hash_update_enqueue(struct ahash_request *req)
382{
383	return mcryptd_hash_enqueue(req, mcryptd_hash_update);
384}
385
386static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
387{
388	struct ahash_request *req = ahash_request_cast(req_async);
389	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
390
391	if (unlikely(err == -EINPROGRESS))
392		goto out;
393
394	err = shash_ahash_mcryptd_final(req, &rctx->desc);
395	if (err) {
396		req->base.complete = rctx->complete;
397		goto out;
398	}
399
400	return;
401out:
402	local_bh_disable();
403	rctx->complete(&req->base, err);
404	local_bh_enable();
405}
406
407static int mcryptd_hash_final_enqueue(struct ahash_request *req)
408{
409	return mcryptd_hash_enqueue(req, mcryptd_hash_final);
410}
411
412static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
413{
414	struct ahash_request *req = ahash_request_cast(req_async);
415	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
416
417	if (unlikely(err == -EINPROGRESS))
418		goto out;
419
420	err = shash_ahash_mcryptd_finup(req, &rctx->desc);
421
422	if (err) {
423		req->base.complete = rctx->complete;
424		goto out;
425	}
426
427	return;
428out:
429	local_bh_disable();
430	rctx->complete(&req->base, err);
431	local_bh_enable();
432}
433
434static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
435{
436	return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
437}
438
439static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
440{
441	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
442	struct crypto_shash *child = ctx->child;
443	struct ahash_request *req = ahash_request_cast(req_async);
444	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
445	struct shash_desc *desc = &rctx->desc;
446
447	if (unlikely(err == -EINPROGRESS))
448		goto out;
449
450	desc->tfm = child;
451	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */
452
453	err = shash_ahash_mcryptd_digest(req, desc);
454
455	if (err) {
456		req->base.complete = rctx->complete;
457		goto out;
458	}
459
460	return;
461out:
462	local_bh_disable();
463	rctx->complete(&req->base, err);
464	local_bh_enable();
465}
466
467static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
468{
469	return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
470}
471
472static int mcryptd_hash_export(struct ahash_request *req, void *out)
473{
474	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
475
476	return crypto_shash_export(&rctx->desc, out);
477}
478
479static int mcryptd_hash_import(struct ahash_request *req, const void *in)
480{
481	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
482
483	return crypto_shash_import(&rctx->desc, in);
484}
485
486static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
487			      struct mcryptd_queue *queue)
488{
489	struct hashd_instance_ctx *ctx;
490	struct ahash_instance *inst;
491	struct shash_alg *salg;
492	struct crypto_alg *alg;
493	u32 type = 0;
494	u32 mask = 0;
495	int err;
496
497	mcryptd_check_internal(tb, &type, &mask);
498
499	salg = shash_attr_alg(tb[1], type, mask);
500	if (IS_ERR(salg))
501		return PTR_ERR(salg);
502
503	alg = &salg->base;
504	pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
505	inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
506					sizeof(*ctx));
507	err = PTR_ERR(inst);
508	if (IS_ERR(inst))
509		goto out_put_alg;
510
511	ctx = ahash_instance_ctx(inst);
512	ctx->queue = queue;
513
514	err = crypto_init_shash_spawn(&ctx->spawn, salg,
515				      ahash_crypto_instance(inst));
516	if (err)
517		goto out_free_inst;
518
519	type = CRYPTO_ALG_ASYNC;
520	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
521		type |= CRYPTO_ALG_INTERNAL;
522	inst->alg.halg.base.cra_flags = type;
523
524	inst->alg.halg.digestsize = salg->digestsize;
525	inst->alg.halg.statesize = salg->statesize;
526	inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
527
528	inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
529	inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
530
531	inst->alg.init   = mcryptd_hash_init_enqueue;
532	inst->alg.update = mcryptd_hash_update_enqueue;
533	inst->alg.final  = mcryptd_hash_final_enqueue;
534	inst->alg.finup  = mcryptd_hash_finup_enqueue;
535	inst->alg.export = mcryptd_hash_export;
536	inst->alg.import = mcryptd_hash_import;
537	inst->alg.setkey = mcryptd_hash_setkey;
538	inst->alg.digest = mcryptd_hash_digest_enqueue;
539
540	err = ahash_register_instance(tmpl, inst);
541	if (err) {
542		crypto_drop_shash(&ctx->spawn);
543out_free_inst:
544		kfree(inst);
545	}
546
547out_put_alg:
548	crypto_mod_put(alg);
549	return err;
550}
551
552static struct mcryptd_queue mqueue;
553
554static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
555{
556	struct crypto_attr_type *algt;
557
558	algt = crypto_get_attr_type(tb);
559	if (IS_ERR(algt))
560		return PTR_ERR(algt);
561
562	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
563	case CRYPTO_ALG_TYPE_DIGEST:
564		return mcryptd_create_hash(tmpl, tb, &mqueue);
565	break;
566	}
567
568	return -EINVAL;
569}
570
571static void mcryptd_free(struct crypto_instance *inst)
572{
573	struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
574	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
575
576	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
577	case CRYPTO_ALG_TYPE_AHASH:
578		crypto_drop_shash(&hctx->spawn);
579		kfree(ahash_instance(inst));
580		return;
581	default:
582		crypto_drop_spawn(&ctx->spawn);
583		kfree(inst);
584	}
585}
586
587static struct crypto_template mcryptd_tmpl = {
588	.name = "mcryptd",
589	.create = mcryptd_create,
590	.free = mcryptd_free,
591	.module = THIS_MODULE,
592};
593
594struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
595					u32 type, u32 mask)
596{
597	char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
598	struct crypto_ahash *tfm;
599
600	if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
601		     "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
602		return ERR_PTR(-EINVAL);
603	tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
604	if (IS_ERR(tfm))
605		return ERR_CAST(tfm);
606	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
607		crypto_free_ahash(tfm);
608		return ERR_PTR(-EINVAL);
609	}
610
611	return __mcryptd_ahash_cast(tfm);
612}
613EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
614
615int shash_ahash_mcryptd_digest(struct ahash_request *req,
616			       struct shash_desc *desc)
617{
618	int err;
619
620	err = crypto_shash_init(desc) ?:
621	      shash_ahash_mcryptd_finup(req, desc);
622
623	return err;
624}
625EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
626
627int shash_ahash_mcryptd_update(struct ahash_request *req,
628			       struct shash_desc *desc)
629{
630	struct crypto_shash *tfm = desc->tfm;
631	struct shash_alg *shash = crypto_shash_alg(tfm);
632
633	/* alignment is to be done by multi-buffer crypto algorithm if needed */
634
635	return shash->update(desc, NULL, 0);
636}
637EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
638
639int shash_ahash_mcryptd_finup(struct ahash_request *req,
640			      struct shash_desc *desc)
641{
642	struct crypto_shash *tfm = desc->tfm;
643	struct shash_alg *shash = crypto_shash_alg(tfm);
644
645	/* alignment is to be done by multi-buffer crypto algorithm if needed */
646
647	return shash->finup(desc, NULL, 0, req->result);
648}
649EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
650
651int shash_ahash_mcryptd_final(struct ahash_request *req,
652			      struct shash_desc *desc)
653{
654	struct crypto_shash *tfm = desc->tfm;
655	struct shash_alg *shash = crypto_shash_alg(tfm);
656
657	/* alignment is to be done by multi-buffer crypto algorithm if needed */
658
659	return shash->final(desc, req->result);
660}
661EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
662
663struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
664{
665	struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
666
667	return ctx->child;
668}
669EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
670
671struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
672{
673	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
674	return &rctx->desc;
675}
676EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
677
678void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
679{
680	crypto_free_ahash(&tfm->base);
681}
682EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
683
684
685static int __init mcryptd_init(void)
686{
687	int err, cpu;
688	struct mcryptd_flush_list *flist;
689
690	mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
691	for_each_possible_cpu(cpu) {
692		flist = per_cpu_ptr(mcryptd_flist, cpu);
693		INIT_LIST_HEAD(&flist->list);
694		mutex_init(&flist->lock);
695	}
696
697	err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
698	if (err) {
699		free_percpu(mcryptd_flist);
700		return err;
701	}
702
703	err = crypto_register_template(&mcryptd_tmpl);
704	if (err) {
705		mcryptd_fini_queue(&mqueue);
706		free_percpu(mcryptd_flist);
707	}
708
709	return err;
710}
711
712static void __exit mcryptd_exit(void)
713{
714	mcryptd_fini_queue(&mqueue);
715	crypto_unregister_template(&mcryptd_tmpl);
716	free_percpu(mcryptd_flist);
717}
718
719subsys_initcall(mcryptd_init);
720module_exit(mcryptd_exit);
721
722MODULE_LICENSE("GPL");
723MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
724MODULE_ALIAS_CRYPTO("mcryptd");