Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Software async crypto daemon.
   3 *
   4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   5 *
   6 * Added AEAD support to cryptd.
   7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   8 *             Adrian Hoban <adrian.hoban@intel.com>
   9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  11 *    Copyright (c) 2010, Intel Corporation.
  12 *
  13 * This program is free software; you can redistribute it and/or modify it
  14 * under the terms of the GNU General Public License as published by the Free
  15 * Software Foundation; either version 2 of the License, or (at your option)
  16 * any later version.
  17 *
  18 */
  19
  20#include <crypto/internal/hash.h>
  21#include <crypto/internal/aead.h>
  22#include <crypto/internal/skcipher.h>
  23#include <crypto/cryptd.h>
  24#include <crypto/crypto_wq.h>
  25#include <linux/atomic.h>
  26#include <linux/err.h>
  27#include <linux/init.h>
  28#include <linux/kernel.h>
  29#include <linux/list.h>
  30#include <linux/module.h>
  31#include <linux/scatterlist.h>
  32#include <linux/sched.h>
  33#include <linux/slab.h>
  34
  35static unsigned int cryptd_max_cpu_qlen = 1000;
  36module_param(cryptd_max_cpu_qlen, uint, 0);
  37MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  38
  39struct cryptd_cpu_queue {
  40	struct crypto_queue queue;
  41	struct work_struct work;
  42};
  43
  44struct cryptd_queue {
  45	struct cryptd_cpu_queue __percpu *cpu_queue;
  46};
  47
  48struct cryptd_instance_ctx {
  49	struct crypto_spawn spawn;
  50	struct cryptd_queue *queue;
  51};
  52
  53struct skcipherd_instance_ctx {
  54	struct crypto_skcipher_spawn spawn;
  55	struct cryptd_queue *queue;
  56};
  57
  58struct hashd_instance_ctx {
  59	struct crypto_shash_spawn spawn;
  60	struct cryptd_queue *queue;
  61};
  62
  63struct aead_instance_ctx {
  64	struct crypto_aead_spawn aead_spawn;
  65	struct cryptd_queue *queue;
  66};
  67
  68struct cryptd_blkcipher_ctx {
  69	atomic_t refcnt;
  70	struct crypto_blkcipher *child;
  71};
  72
  73struct cryptd_blkcipher_request_ctx {
  74	crypto_completion_t complete;
  75};
  76
  77struct cryptd_skcipher_ctx {
  78	atomic_t refcnt;
  79	struct crypto_skcipher *child;
  80};
  81
  82struct cryptd_skcipher_request_ctx {
  83	crypto_completion_t complete;
  84};
  85
  86struct cryptd_hash_ctx {
  87	atomic_t refcnt;
  88	struct crypto_shash *child;
  89};
  90
  91struct cryptd_hash_request_ctx {
  92	crypto_completion_t complete;
  93	struct shash_desc desc;
  94};
  95
  96struct cryptd_aead_ctx {
  97	atomic_t refcnt;
  98	struct crypto_aead *child;
  99};
 100
 101struct cryptd_aead_request_ctx {
 102	crypto_completion_t complete;
 103};
 104
 105static void cryptd_queue_worker(struct work_struct *work);
 106
 107static int cryptd_init_queue(struct cryptd_queue *queue,
 108			     unsigned int max_cpu_qlen)
 109{
 110	int cpu;
 111	struct cryptd_cpu_queue *cpu_queue;
 112
 113	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 114	if (!queue->cpu_queue)
 115		return -ENOMEM;
 116	for_each_possible_cpu(cpu) {
 117		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 118		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 119		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 120	}
 121	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 122	return 0;
 123}
 124
 125static void cryptd_fini_queue(struct cryptd_queue *queue)
 126{
 127	int cpu;
 128	struct cryptd_cpu_queue *cpu_queue;
 129
 130	for_each_possible_cpu(cpu) {
 131		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 132		BUG_ON(cpu_queue->queue.qlen);
 133	}
 134	free_percpu(queue->cpu_queue);
 135}
 136
 137static int cryptd_enqueue_request(struct cryptd_queue *queue,
 138				  struct crypto_async_request *request)
 139{
 140	int cpu, err;
 141	struct cryptd_cpu_queue *cpu_queue;
 142	atomic_t *refcnt;
 
 143
 144	cpu = get_cpu();
 145	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 146	err = crypto_enqueue_request(&cpu_queue->queue, request);
 147
 148	refcnt = crypto_tfm_ctx(request->tfm);
 
 149
 150	if (err == -ENOSPC)
 151		goto out_put_cpu;
 152
 153	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
 154
 155	if (!atomic_read(refcnt))
 156		goto out_put_cpu;
 157
 158	atomic_inc(refcnt);
 159
 160out_put_cpu:
 161	put_cpu();
 162
 163	return err;
 164}
 165
 166/* Called in workqueue context, do one real cryption work (via
 167 * req->complete) and reschedule itself if there are more work to
 168 * do. */
 169static void cryptd_queue_worker(struct work_struct *work)
 170{
 171	struct cryptd_cpu_queue *cpu_queue;
 172	struct crypto_async_request *req, *backlog;
 173
 174	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 175	/*
 176	 * Only handle one request at a time to avoid hogging crypto workqueue.
 177	 * preempt_disable/enable is used to prevent being preempted by
 178	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 179	 * cryptd_enqueue_request() being accessed from software interrupts.
 180	 */
 181	local_bh_disable();
 182	preempt_disable();
 183	backlog = crypto_get_backlog(&cpu_queue->queue);
 184	req = crypto_dequeue_request(&cpu_queue->queue);
 185	preempt_enable();
 186	local_bh_enable();
 187
 188	if (!req)
 189		return;
 190
 191	if (backlog)
 192		backlog->complete(backlog, -EINPROGRESS);
 193	req->complete(req, 0);
 194
 195	if (cpu_queue->queue.qlen)
 196		queue_work(kcrypto_wq, &cpu_queue->work);
 197}
 198
 199static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 200{
 201	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 202	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 203	return ictx->queue;
 204}
 205
 206static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 207					 u32 *mask)
 208{
 209	struct crypto_attr_type *algt;
 210
 211	algt = crypto_get_attr_type(tb);
 212	if (IS_ERR(algt))
 213		return;
 214
 215	*type |= algt->type & CRYPTO_ALG_INTERNAL;
 216	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 217}
 218
 219static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
 220				   const u8 *key, unsigned int keylen)
 221{
 222	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
 223	struct crypto_blkcipher *child = ctx->child;
 224	int err;
 225
 226	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 227	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
 228					  CRYPTO_TFM_REQ_MASK);
 229	err = crypto_blkcipher_setkey(child, key, keylen);
 230	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
 231					    CRYPTO_TFM_RES_MASK);
 232	return err;
 233}
 234
 235static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
 236				   struct crypto_blkcipher *child,
 237				   int err,
 238				   int (*crypt)(struct blkcipher_desc *desc,
 239						struct scatterlist *dst,
 240						struct scatterlist *src,
 241						unsigned int len))
 242{
 243	struct cryptd_blkcipher_request_ctx *rctx;
 244	struct cryptd_blkcipher_ctx *ctx;
 245	struct crypto_ablkcipher *tfm;
 246	struct blkcipher_desc desc;
 247	int refcnt;
 248
 249	rctx = ablkcipher_request_ctx(req);
 250
 251	if (unlikely(err == -EINPROGRESS))
 252		goto out;
 253
 254	desc.tfm = child;
 255	desc.info = req->info;
 256	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 257
 258	err = crypt(&desc, req->dst, req->src, req->nbytes);
 259
 260	req->base.complete = rctx->complete;
 261
 262out:
 263	tfm = crypto_ablkcipher_reqtfm(req);
 264	ctx = crypto_ablkcipher_ctx(tfm);
 265	refcnt = atomic_read(&ctx->refcnt);
 266
 267	local_bh_disable();
 268	rctx->complete(&req->base, err);
 269	local_bh_enable();
 270
 271	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 272		crypto_free_ablkcipher(tfm);
 273}
 274
 275static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
 276{
 277	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 278	struct crypto_blkcipher *child = ctx->child;
 279
 280	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 281			       crypto_blkcipher_crt(child)->encrypt);
 282}
 283
 284static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 285{
 286	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 287	struct crypto_blkcipher *child = ctx->child;
 288
 289	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 290			       crypto_blkcipher_crt(child)->decrypt);
 291}
 292
 293static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 294				    crypto_completion_t compl)
 295{
 296	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
 297	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 298	struct cryptd_queue *queue;
 299
 300	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
 301	rctx->complete = req->base.complete;
 302	req->base.complete = compl;
 303
 304	return cryptd_enqueue_request(queue, &req->base);
 305}
 306
 307static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
 308{
 309	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
 310}
 311
 312static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
 313{
 314	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
 315}
 316
 317static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
 318{
 319	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 320	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 321	struct crypto_spawn *spawn = &ictx->spawn;
 322	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 323	struct crypto_blkcipher *cipher;
 324
 325	cipher = crypto_spawn_blkcipher(spawn);
 326	if (IS_ERR(cipher))
 327		return PTR_ERR(cipher);
 328
 329	ctx->child = cipher;
 330	tfm->crt_ablkcipher.reqsize =
 331		sizeof(struct cryptd_blkcipher_request_ctx);
 332	return 0;
 333}
 334
 335static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
 336{
 337	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 338
 339	crypto_free_blkcipher(ctx->child);
 340}
 341
 342static int cryptd_init_instance(struct crypto_instance *inst,
 343				struct crypto_alg *alg)
 344{
 345	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 346		     "cryptd(%s)",
 347		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 348		return -ENAMETOOLONG;
 349
 350	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 351
 352	inst->alg.cra_priority = alg->cra_priority + 50;
 353	inst->alg.cra_blocksize = alg->cra_blocksize;
 354	inst->alg.cra_alignmask = alg->cra_alignmask;
 355
 356	return 0;
 357}
 358
 359static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 360				   unsigned int tail)
 361{
 362	char *p;
 363	struct crypto_instance *inst;
 364	int err;
 365
 366	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
 367	if (!p)
 368		return ERR_PTR(-ENOMEM);
 369
 370	inst = (void *)(p + head);
 371
 372	err = cryptd_init_instance(inst, alg);
 373	if (err)
 374		goto out_free_inst;
 375
 376out:
 377	return p;
 378
 379out_free_inst:
 380	kfree(p);
 381	p = ERR_PTR(err);
 382	goto out;
 383}
 384
 385static int cryptd_create_blkcipher(struct crypto_template *tmpl,
 386				   struct rtattr **tb,
 387				   struct cryptd_queue *queue)
 388{
 389	struct cryptd_instance_ctx *ctx;
 390	struct crypto_instance *inst;
 391	struct crypto_alg *alg;
 392	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
 393	u32 mask = CRYPTO_ALG_TYPE_MASK;
 394	int err;
 395
 396	cryptd_check_internal(tb, &type, &mask);
 397
 398	alg = crypto_get_attr_alg(tb, type, mask);
 399	if (IS_ERR(alg))
 400		return PTR_ERR(alg);
 401
 402	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
 403	err = PTR_ERR(inst);
 404	if (IS_ERR(inst))
 405		goto out_put_alg;
 406
 407	ctx = crypto_instance_ctx(inst);
 408	ctx->queue = queue;
 409
 410	err = crypto_init_spawn(&ctx->spawn, alg, inst,
 411				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
 412	if (err)
 413		goto out_free_inst;
 414
 415	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 416	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
 417		type |= CRYPTO_ALG_INTERNAL;
 418	inst->alg.cra_flags = type;
 419	inst->alg.cra_type = &crypto_ablkcipher_type;
 420
 421	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
 422	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
 423	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 424
 425	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
 426
 427	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 428
 429	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
 430	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 431
 432	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
 433	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
 434	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
 435
 436	err = crypto_register_instance(tmpl, inst);
 437	if (err) {
 438		crypto_drop_spawn(&ctx->spawn);
 439out_free_inst:
 440		kfree(inst);
 441	}
 442
 443out_put_alg:
 444	crypto_mod_put(alg);
 445	return err;
 446}
 447
 448static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 449				  const u8 *key, unsigned int keylen)
 450{
 451	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 452	struct crypto_skcipher *child = ctx->child;
 453	int err;
 454
 455	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 456	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 457					 CRYPTO_TFM_REQ_MASK);
 458	err = crypto_skcipher_setkey(child, key, keylen);
 459	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 460					  CRYPTO_TFM_RES_MASK);
 461	return err;
 462}
 463
 464static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 465{
 466	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 467	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 468	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 469	int refcnt = atomic_read(&ctx->refcnt);
 470
 471	local_bh_disable();
 472	rctx->complete(&req->base, err);
 473	local_bh_enable();
 474
 475	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 476		crypto_free_skcipher(tfm);
 477}
 478
 479static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 480				    int err)
 481{
 482	struct skcipher_request *req = skcipher_request_cast(base);
 483	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 484	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 485	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 486	struct crypto_skcipher *child = ctx->child;
 487	SKCIPHER_REQUEST_ON_STACK(subreq, child);
 488
 489	if (unlikely(err == -EINPROGRESS))
 490		goto out;
 491
 492	skcipher_request_set_tfm(subreq, child);
 493	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 494				      NULL, NULL);
 495	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 496				   req->iv);
 497
 498	err = crypto_skcipher_encrypt(subreq);
 499	skcipher_request_zero(subreq);
 500
 501	req->base.complete = rctx->complete;
 502
 503out:
 504	cryptd_skcipher_complete(req, err);
 505}
 506
 507static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 508				    int err)
 509{
 510	struct skcipher_request *req = skcipher_request_cast(base);
 511	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 512	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 513	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 514	struct crypto_skcipher *child = ctx->child;
 515	SKCIPHER_REQUEST_ON_STACK(subreq, child);
 516
 517	if (unlikely(err == -EINPROGRESS))
 518		goto out;
 519
 520	skcipher_request_set_tfm(subreq, child);
 521	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 522				      NULL, NULL);
 523	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 524				   req->iv);
 525
 526	err = crypto_skcipher_decrypt(subreq);
 527	skcipher_request_zero(subreq);
 528
 529	req->base.complete = rctx->complete;
 530
 531out:
 532	cryptd_skcipher_complete(req, err);
 533}
 534
 535static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 536				   crypto_completion_t compl)
 537{
 538	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 539	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 540	struct cryptd_queue *queue;
 541
 542	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 543	rctx->complete = req->base.complete;
 544	req->base.complete = compl;
 545
 546	return cryptd_enqueue_request(queue, &req->base);
 547}
 548
 549static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 550{
 551	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 552}
 553
 554static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 555{
 556	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 557}
 558
 559static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 560{
 561	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 562	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 563	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 564	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 565	struct crypto_skcipher *cipher;
 566
 567	cipher = crypto_spawn_skcipher(spawn);
 568	if (IS_ERR(cipher))
 569		return PTR_ERR(cipher);
 570
 571	ctx->child = cipher;
 572	crypto_skcipher_set_reqsize(
 573		tfm, sizeof(struct cryptd_skcipher_request_ctx));
 574	return 0;
 575}
 576
 577static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 578{
 579	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 580
 581	crypto_free_skcipher(ctx->child);
 582}
 583
 584static void cryptd_skcipher_free(struct skcipher_instance *inst)
 585{
 586	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 587
 588	crypto_drop_skcipher(&ctx->spawn);
 589}
 590
 591static int cryptd_create_skcipher(struct crypto_template *tmpl,
 592				  struct rtattr **tb,
 593				  struct cryptd_queue *queue)
 594{
 595	struct skcipherd_instance_ctx *ctx;
 596	struct skcipher_instance *inst;
 597	struct skcipher_alg *alg;
 598	const char *name;
 599	u32 type;
 600	u32 mask;
 601	int err;
 602
 603	type = 0;
 604	mask = CRYPTO_ALG_ASYNC;
 605
 606	cryptd_check_internal(tb, &type, &mask);
 607
 608	name = crypto_attr_alg_name(tb[1]);
 609	if (IS_ERR(name))
 610		return PTR_ERR(name);
 611
 612	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 613	if (!inst)
 614		return -ENOMEM;
 615
 616	ctx = skcipher_instance_ctx(inst);
 617	ctx->queue = queue;
 618
 619	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
 620	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
 621	if (err)
 622		goto out_free_inst;
 623
 624	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 625	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 626	if (err)
 627		goto out_drop_skcipher;
 628
 629	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 630				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 631
 632	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 633	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 634	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 635	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 636
 637	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 638
 639	inst->alg.init = cryptd_skcipher_init_tfm;
 640	inst->alg.exit = cryptd_skcipher_exit_tfm;
 641
 642	inst->alg.setkey = cryptd_skcipher_setkey;
 643	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 644	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 645
 646	inst->free = cryptd_skcipher_free;
 647
 648	err = skcipher_register_instance(tmpl, inst);
 649	if (err) {
 650out_drop_skcipher:
 651		crypto_drop_skcipher(&ctx->spawn);
 652out_free_inst:
 653		kfree(inst);
 654	}
 655	return err;
 656}
 657
 658static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 659{
 660	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 661	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 662	struct crypto_shash_spawn *spawn = &ictx->spawn;
 663	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 664	struct crypto_shash *hash;
 665
 666	hash = crypto_spawn_shash(spawn);
 667	if (IS_ERR(hash))
 668		return PTR_ERR(hash);
 669
 670	ctx->child = hash;
 671	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 672				 sizeof(struct cryptd_hash_request_ctx) +
 673				 crypto_shash_descsize(hash));
 674	return 0;
 675}
 676
 677static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 678{
 679	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 680
 681	crypto_free_shash(ctx->child);
 682}
 683
 684static int cryptd_hash_setkey(struct crypto_ahash *parent,
 685				   const u8 *key, unsigned int keylen)
 686{
 687	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 688	struct crypto_shash *child = ctx->child;
 689	int err;
 690
 691	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 692	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 693				      CRYPTO_TFM_REQ_MASK);
 694	err = crypto_shash_setkey(child, key, keylen);
 695	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
 696				       CRYPTO_TFM_RES_MASK);
 697	return err;
 698}
 699
 700static int cryptd_hash_enqueue(struct ahash_request *req,
 701				crypto_completion_t compl)
 702{
 703	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 704	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 705	struct cryptd_queue *queue =
 706		cryptd_get_queue(crypto_ahash_tfm(tfm));
 707
 708	rctx->complete = req->base.complete;
 709	req->base.complete = compl;
 710
 711	return cryptd_enqueue_request(queue, &req->base);
 712}
 713
 714static void cryptd_hash_complete(struct ahash_request *req, int err)
 715{
 716	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 717	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 718	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 719	int refcnt = atomic_read(&ctx->refcnt);
 720
 721	local_bh_disable();
 722	rctx->complete(&req->base, err);
 723	local_bh_enable();
 724
 725	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 726		crypto_free_ahash(tfm);
 727}
 728
 729static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 730{
 731	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 732	struct crypto_shash *child = ctx->child;
 733	struct ahash_request *req = ahash_request_cast(req_async);
 734	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 735	struct shash_desc *desc = &rctx->desc;
 736
 737	if (unlikely(err == -EINPROGRESS))
 738		goto out;
 739
 740	desc->tfm = child;
 741	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 742
 743	err = crypto_shash_init(desc);
 744
 745	req->base.complete = rctx->complete;
 746
 747out:
 748	cryptd_hash_complete(req, err);
 749}
 750
 751static int cryptd_hash_init_enqueue(struct ahash_request *req)
 752{
 753	return cryptd_hash_enqueue(req, cryptd_hash_init);
 754}
 755
 756static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 757{
 758	struct ahash_request *req = ahash_request_cast(req_async);
 759	struct cryptd_hash_request_ctx *rctx;
 760
 761	rctx = ahash_request_ctx(req);
 762
 763	if (unlikely(err == -EINPROGRESS))
 764		goto out;
 765
 766	err = shash_ahash_update(req, &rctx->desc);
 767
 768	req->base.complete = rctx->complete;
 769
 770out:
 771	cryptd_hash_complete(req, err);
 772}
 773
 774static int cryptd_hash_update_enqueue(struct ahash_request *req)
 775{
 776	return cryptd_hash_enqueue(req, cryptd_hash_update);
 777}
 778
 779static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 780{
 781	struct ahash_request *req = ahash_request_cast(req_async);
 782	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 783
 784	if (unlikely(err == -EINPROGRESS))
 785		goto out;
 786
 787	err = crypto_shash_final(&rctx->desc, req->result);
 788
 789	req->base.complete = rctx->complete;
 790
 791out:
 792	cryptd_hash_complete(req, err);
 793}
 794
 795static int cryptd_hash_final_enqueue(struct ahash_request *req)
 796{
 797	return cryptd_hash_enqueue(req, cryptd_hash_final);
 798}
 799
 800static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 801{
 802	struct ahash_request *req = ahash_request_cast(req_async);
 803	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 804
 805	if (unlikely(err == -EINPROGRESS))
 806		goto out;
 807
 808	err = shash_ahash_finup(req, &rctx->desc);
 809
 810	req->base.complete = rctx->complete;
 811
 812out:
 813	cryptd_hash_complete(req, err);
 814}
 815
 816static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 817{
 818	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 819}
 820
 821static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 822{
 823	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 824	struct crypto_shash *child = ctx->child;
 825	struct ahash_request *req = ahash_request_cast(req_async);
 826	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 827	struct shash_desc *desc = &rctx->desc;
 828
 829	if (unlikely(err == -EINPROGRESS))
 830		goto out;
 831
 832	desc->tfm = child;
 833	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 834
 835	err = shash_ahash_digest(req, desc);
 836
 837	req->base.complete = rctx->complete;
 838
 839out:
 840	cryptd_hash_complete(req, err);
 841}
 842
 843static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 844{
 845	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 846}
 847
 848static int cryptd_hash_export(struct ahash_request *req, void *out)
 849{
 850	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 851
 852	return crypto_shash_export(&rctx->desc, out);
 853}
 854
 855static int cryptd_hash_import(struct ahash_request *req, const void *in)
 856{
 857	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 858	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 859	struct shash_desc *desc = cryptd_shash_desc(req);
 860
 861	desc->tfm = ctx->child;
 862	desc->flags = req->base.flags;
 863
 864	return crypto_shash_import(desc, in);
 865}
 866
 867static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 868			      struct cryptd_queue *queue)
 869{
 870	struct hashd_instance_ctx *ctx;
 871	struct ahash_instance *inst;
 872	struct shash_alg *salg;
 873	struct crypto_alg *alg;
 874	u32 type = 0;
 875	u32 mask = 0;
 876	int err;
 877
 878	cryptd_check_internal(tb, &type, &mask);
 879
 880	salg = shash_attr_alg(tb[1], type, mask);
 881	if (IS_ERR(salg))
 882		return PTR_ERR(salg);
 883
 884	alg = &salg->base;
 885	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
 886				     sizeof(*ctx));
 887	err = PTR_ERR(inst);
 888	if (IS_ERR(inst))
 889		goto out_put_alg;
 890
 891	ctx = ahash_instance_ctx(inst);
 892	ctx->queue = queue;
 893
 894	err = crypto_init_shash_spawn(&ctx->spawn, salg,
 895				      ahash_crypto_instance(inst));
 896	if (err)
 897		goto out_free_inst;
 898
 899	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
 900		(alg->cra_flags & (CRYPTO_ALG_INTERNAL |
 901				   CRYPTO_ALG_OPTIONAL_KEY));
 
 902
 903	inst->alg.halg.digestsize = salg->digestsize;
 904	inst->alg.halg.statesize = salg->statesize;
 905	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 906
 907	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 908	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 909
 910	inst->alg.init   = cryptd_hash_init_enqueue;
 911	inst->alg.update = cryptd_hash_update_enqueue;
 912	inst->alg.final  = cryptd_hash_final_enqueue;
 913	inst->alg.finup  = cryptd_hash_finup_enqueue;
 914	inst->alg.export = cryptd_hash_export;
 915	inst->alg.import = cryptd_hash_import;
 916	if (crypto_shash_alg_has_setkey(salg))
 917		inst->alg.setkey = cryptd_hash_setkey;
 918	inst->alg.digest = cryptd_hash_digest_enqueue;
 919
 920	err = ahash_register_instance(tmpl, inst);
 921	if (err) {
 922		crypto_drop_shash(&ctx->spawn);
 923out_free_inst:
 924		kfree(inst);
 925	}
 926
 927out_put_alg:
 928	crypto_mod_put(alg);
 929	return err;
 930}
 931
 932static int cryptd_aead_setkey(struct crypto_aead *parent,
 933			      const u8 *key, unsigned int keylen)
 934{
 935	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 936	struct crypto_aead *child = ctx->child;
 937
 938	return crypto_aead_setkey(child, key, keylen);
 939}
 940
 941static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 942				   unsigned int authsize)
 943{
 944	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 945	struct crypto_aead *child = ctx->child;
 946
 947	return crypto_aead_setauthsize(child, authsize);
 948}
 949
 950static void cryptd_aead_crypt(struct aead_request *req,
 951			struct crypto_aead *child,
 952			int err,
 953			int (*crypt)(struct aead_request *req))
 954{
 955	struct cryptd_aead_request_ctx *rctx;
 956	struct cryptd_aead_ctx *ctx;
 957	crypto_completion_t compl;
 958	struct crypto_aead *tfm;
 959	int refcnt;
 960
 961	rctx = aead_request_ctx(req);
 962	compl = rctx->complete;
 963
 964	tfm = crypto_aead_reqtfm(req);
 965
 966	if (unlikely(err == -EINPROGRESS))
 967		goto out;
 968	aead_request_set_tfm(req, child);
 969	err = crypt( req );
 970
 971out:
 972	ctx = crypto_aead_ctx(tfm);
 973	refcnt = atomic_read(&ctx->refcnt);
 974
 975	local_bh_disable();
 976	compl(&req->base, err);
 977	local_bh_enable();
 978
 979	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 980		crypto_free_aead(tfm);
 981}
 982
 983static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 984{
 985	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 986	struct crypto_aead *child = ctx->child;
 987	struct aead_request *req;
 988
 989	req = container_of(areq, struct aead_request, base);
 990	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 991}
 992
 993static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 994{
 995	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 996	struct crypto_aead *child = ctx->child;
 997	struct aead_request *req;
 998
 999	req = container_of(areq, struct aead_request, base);
1000	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
1001}
1002
1003static int cryptd_aead_enqueue(struct aead_request *req,
1004				    crypto_completion_t compl)
1005{
1006	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1007	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1008	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1009
1010	rctx->complete = req->base.complete;
1011	req->base.complete = compl;
1012	return cryptd_enqueue_request(queue, &req->base);
1013}
1014
1015static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1016{
1017	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1018}
1019
1020static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1021{
1022	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1023}
1024
1025static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1026{
1027	struct aead_instance *inst = aead_alg_instance(tfm);
1028	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1029	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1030	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1031	struct crypto_aead *cipher;
1032
1033	cipher = crypto_spawn_aead(spawn);
1034	if (IS_ERR(cipher))
1035		return PTR_ERR(cipher);
1036
1037	ctx->child = cipher;
1038	crypto_aead_set_reqsize(
1039		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1040			 crypto_aead_reqsize(cipher)));
1041	return 0;
1042}
1043
1044static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1045{
1046	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1047	crypto_free_aead(ctx->child);
1048}
1049
1050static int cryptd_create_aead(struct crypto_template *tmpl,
1051		              struct rtattr **tb,
1052			      struct cryptd_queue *queue)
1053{
1054	struct aead_instance_ctx *ctx;
1055	struct aead_instance *inst;
1056	struct aead_alg *alg;
1057	const char *name;
1058	u32 type = 0;
1059	u32 mask = CRYPTO_ALG_ASYNC;
1060	int err;
1061
1062	cryptd_check_internal(tb, &type, &mask);
1063
1064	name = crypto_attr_alg_name(tb[1]);
1065	if (IS_ERR(name))
1066		return PTR_ERR(name);
1067
1068	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1069	if (!inst)
1070		return -ENOMEM;
1071
1072	ctx = aead_instance_ctx(inst);
1073	ctx->queue = queue;
1074
1075	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1076	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1077	if (err)
1078		goto out_free_inst;
1079
1080	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1081	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1082	if (err)
1083		goto out_drop_aead;
1084
1085	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1086				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1087	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1088
1089	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1090	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1091
1092	inst->alg.init = cryptd_aead_init_tfm;
1093	inst->alg.exit = cryptd_aead_exit_tfm;
1094	inst->alg.setkey = cryptd_aead_setkey;
1095	inst->alg.setauthsize = cryptd_aead_setauthsize;
1096	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1097	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1098
1099	err = aead_register_instance(tmpl, inst);
1100	if (err) {
1101out_drop_aead:
1102		crypto_drop_aead(&ctx->aead_spawn);
1103out_free_inst:
1104		kfree(inst);
1105	}
1106	return err;
1107}
1108
1109static struct cryptd_queue queue;
1110
1111static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1112{
1113	struct crypto_attr_type *algt;
1114
1115	algt = crypto_get_attr_type(tb);
1116	if (IS_ERR(algt))
1117		return PTR_ERR(algt);
1118
1119	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1120	case CRYPTO_ALG_TYPE_BLKCIPHER:
1121		if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1122		    CRYPTO_ALG_TYPE_BLKCIPHER)
1123			return cryptd_create_blkcipher(tmpl, tb, &queue);
1124
1125		return cryptd_create_skcipher(tmpl, tb, &queue);
1126	case CRYPTO_ALG_TYPE_DIGEST:
1127		return cryptd_create_hash(tmpl, tb, &queue);
1128	case CRYPTO_ALG_TYPE_AEAD:
1129		return cryptd_create_aead(tmpl, tb, &queue);
1130	}
1131
1132	return -EINVAL;
1133}
1134
1135static void cryptd_free(struct crypto_instance *inst)
1136{
1137	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1138	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1139	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1140
1141	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1142	case CRYPTO_ALG_TYPE_AHASH:
1143		crypto_drop_shash(&hctx->spawn);
1144		kfree(ahash_instance(inst));
1145		return;
1146	case CRYPTO_ALG_TYPE_AEAD:
1147		crypto_drop_aead(&aead_ctx->aead_spawn);
1148		kfree(aead_instance(inst));
1149		return;
1150	default:
1151		crypto_drop_spawn(&ctx->spawn);
1152		kfree(inst);
1153	}
1154}
1155
1156static struct crypto_template cryptd_tmpl = {
1157	.name = "cryptd",
1158	.create = cryptd_create,
1159	.free = cryptd_free,
1160	.module = THIS_MODULE,
1161};
1162
1163struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1164						  u32 type, u32 mask)
1165{
1166	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1167	struct cryptd_blkcipher_ctx *ctx;
1168	struct crypto_tfm *tfm;
1169
1170	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1171		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1172		return ERR_PTR(-EINVAL);
1173	type = crypto_skcipher_type(type);
1174	mask &= ~CRYPTO_ALG_TYPE_MASK;
1175	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1176	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1177	if (IS_ERR(tfm))
1178		return ERR_CAST(tfm);
1179	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1180		crypto_free_tfm(tfm);
1181		return ERR_PTR(-EINVAL);
1182	}
1183
1184	ctx = crypto_tfm_ctx(tfm);
1185	atomic_set(&ctx->refcnt, 1);
1186
1187	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1188}
1189EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1190
1191struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1192{
1193	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1194	return ctx->child;
1195}
1196EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1197
1198bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1199{
1200	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1201
1202	return atomic_read(&ctx->refcnt) - 1;
1203}
1204EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1205
1206void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1207{
1208	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1209
1210	if (atomic_dec_and_test(&ctx->refcnt))
1211		crypto_free_ablkcipher(&tfm->base);
1212}
1213EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1214
1215struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1216					      u32 type, u32 mask)
1217{
1218	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1219	struct cryptd_skcipher_ctx *ctx;
1220	struct crypto_skcipher *tfm;
1221
1222	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1223		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1224		return ERR_PTR(-EINVAL);
1225
1226	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1227	if (IS_ERR(tfm))
1228		return ERR_CAST(tfm);
1229
1230	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1231		crypto_free_skcipher(tfm);
1232		return ERR_PTR(-EINVAL);
1233	}
1234
1235	ctx = crypto_skcipher_ctx(tfm);
1236	atomic_set(&ctx->refcnt, 1);
1237
1238	return container_of(tfm, struct cryptd_skcipher, base);
1239}
1240EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1241
1242struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1243{
1244	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1245
1246	return ctx->child;
1247}
1248EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1249
1250bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1251{
1252	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1253
1254	return atomic_read(&ctx->refcnt) - 1;
1255}
1256EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1257
1258void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1259{
1260	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1261
1262	if (atomic_dec_and_test(&ctx->refcnt))
1263		crypto_free_skcipher(&tfm->base);
1264}
1265EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1266
1267struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1268					u32 type, u32 mask)
1269{
1270	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1271	struct cryptd_hash_ctx *ctx;
1272	struct crypto_ahash *tfm;
1273
1274	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1275		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1276		return ERR_PTR(-EINVAL);
1277	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1278	if (IS_ERR(tfm))
1279		return ERR_CAST(tfm);
1280	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1281		crypto_free_ahash(tfm);
1282		return ERR_PTR(-EINVAL);
1283	}
1284
1285	ctx = crypto_ahash_ctx(tfm);
1286	atomic_set(&ctx->refcnt, 1);
1287
1288	return __cryptd_ahash_cast(tfm);
1289}
1290EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1291
1292struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1293{
1294	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1295
1296	return ctx->child;
1297}
1298EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1299
1300struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1301{
1302	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1303	return &rctx->desc;
1304}
1305EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1306
1307bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1308{
1309	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1310
1311	return atomic_read(&ctx->refcnt) - 1;
1312}
1313EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1314
1315void cryptd_free_ahash(struct cryptd_ahash *tfm)
1316{
1317	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1318
1319	if (atomic_dec_and_test(&ctx->refcnt))
1320		crypto_free_ahash(&tfm->base);
1321}
1322EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1323
1324struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1325						  u32 type, u32 mask)
1326{
1327	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1328	struct cryptd_aead_ctx *ctx;
1329	struct crypto_aead *tfm;
1330
1331	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1332		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1333		return ERR_PTR(-EINVAL);
1334	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1335	if (IS_ERR(tfm))
1336		return ERR_CAST(tfm);
1337	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1338		crypto_free_aead(tfm);
1339		return ERR_PTR(-EINVAL);
1340	}
1341
1342	ctx = crypto_aead_ctx(tfm);
1343	atomic_set(&ctx->refcnt, 1);
1344
1345	return __cryptd_aead_cast(tfm);
1346}
1347EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1348
1349struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1350{
1351	struct cryptd_aead_ctx *ctx;
1352	ctx = crypto_aead_ctx(&tfm->base);
1353	return ctx->child;
1354}
1355EXPORT_SYMBOL_GPL(cryptd_aead_child);
1356
1357bool cryptd_aead_queued(struct cryptd_aead *tfm)
1358{
1359	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1360
1361	return atomic_read(&ctx->refcnt) - 1;
1362}
1363EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1364
1365void cryptd_free_aead(struct cryptd_aead *tfm)
1366{
1367	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1368
1369	if (atomic_dec_and_test(&ctx->refcnt))
1370		crypto_free_aead(&tfm->base);
1371}
1372EXPORT_SYMBOL_GPL(cryptd_free_aead);
1373
1374static int __init cryptd_init(void)
1375{
1376	int err;
1377
1378	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1379	if (err)
1380		return err;
1381
1382	err = crypto_register_template(&cryptd_tmpl);
1383	if (err)
1384		cryptd_fini_queue(&queue);
1385
1386	return err;
1387}
1388
1389static void __exit cryptd_exit(void)
1390{
1391	cryptd_fini_queue(&queue);
1392	crypto_unregister_template(&cryptd_tmpl);
1393}
1394
1395subsys_initcall(cryptd_init);
1396module_exit(cryptd_exit);
1397
1398MODULE_LICENSE("GPL");
1399MODULE_DESCRIPTION("Software async crypto daemon");
1400MODULE_ALIAS_CRYPTO("cryptd");
v4.10.11
   1/*
   2 * Software async crypto daemon.
   3 *
   4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   5 *
   6 * Added AEAD support to cryptd.
   7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   8 *             Adrian Hoban <adrian.hoban@intel.com>
   9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  11 *    Copyright (c) 2010, Intel Corporation.
  12 *
  13 * This program is free software; you can redistribute it and/or modify it
  14 * under the terms of the GNU General Public License as published by the Free
  15 * Software Foundation; either version 2 of the License, or (at your option)
  16 * any later version.
  17 *
  18 */
  19
  20#include <crypto/internal/hash.h>
  21#include <crypto/internal/aead.h>
  22#include <crypto/internal/skcipher.h>
  23#include <crypto/cryptd.h>
  24#include <crypto/crypto_wq.h>
  25#include <linux/atomic.h>
  26#include <linux/err.h>
  27#include <linux/init.h>
  28#include <linux/kernel.h>
  29#include <linux/list.h>
  30#include <linux/module.h>
  31#include <linux/scatterlist.h>
  32#include <linux/sched.h>
  33#include <linux/slab.h>
  34
  35#define CRYPTD_MAX_CPU_QLEN 1000
 
 
  36
  37struct cryptd_cpu_queue {
  38	struct crypto_queue queue;
  39	struct work_struct work;
  40};
  41
  42struct cryptd_queue {
  43	struct cryptd_cpu_queue __percpu *cpu_queue;
  44};
  45
  46struct cryptd_instance_ctx {
  47	struct crypto_spawn spawn;
  48	struct cryptd_queue *queue;
  49};
  50
  51struct skcipherd_instance_ctx {
  52	struct crypto_skcipher_spawn spawn;
  53	struct cryptd_queue *queue;
  54};
  55
  56struct hashd_instance_ctx {
  57	struct crypto_shash_spawn spawn;
  58	struct cryptd_queue *queue;
  59};
  60
  61struct aead_instance_ctx {
  62	struct crypto_aead_spawn aead_spawn;
  63	struct cryptd_queue *queue;
  64};
  65
  66struct cryptd_blkcipher_ctx {
  67	atomic_t refcnt;
  68	struct crypto_blkcipher *child;
  69};
  70
  71struct cryptd_blkcipher_request_ctx {
  72	crypto_completion_t complete;
  73};
  74
  75struct cryptd_skcipher_ctx {
  76	atomic_t refcnt;
  77	struct crypto_skcipher *child;
  78};
  79
  80struct cryptd_skcipher_request_ctx {
  81	crypto_completion_t complete;
  82};
  83
  84struct cryptd_hash_ctx {
  85	atomic_t refcnt;
  86	struct crypto_shash *child;
  87};
  88
  89struct cryptd_hash_request_ctx {
  90	crypto_completion_t complete;
  91	struct shash_desc desc;
  92};
  93
  94struct cryptd_aead_ctx {
  95	atomic_t refcnt;
  96	struct crypto_aead *child;
  97};
  98
  99struct cryptd_aead_request_ctx {
 100	crypto_completion_t complete;
 101};
 102
 103static void cryptd_queue_worker(struct work_struct *work);
 104
 105static int cryptd_init_queue(struct cryptd_queue *queue,
 106			     unsigned int max_cpu_qlen)
 107{
 108	int cpu;
 109	struct cryptd_cpu_queue *cpu_queue;
 110
 111	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 112	if (!queue->cpu_queue)
 113		return -ENOMEM;
 114	for_each_possible_cpu(cpu) {
 115		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 116		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 117		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 118	}
 
 119	return 0;
 120}
 121
 122static void cryptd_fini_queue(struct cryptd_queue *queue)
 123{
 124	int cpu;
 125	struct cryptd_cpu_queue *cpu_queue;
 126
 127	for_each_possible_cpu(cpu) {
 128		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 129		BUG_ON(cpu_queue->queue.qlen);
 130	}
 131	free_percpu(queue->cpu_queue);
 132}
 133
 134static int cryptd_enqueue_request(struct cryptd_queue *queue,
 135				  struct crypto_async_request *request)
 136{
 137	int cpu, err;
 138	struct cryptd_cpu_queue *cpu_queue;
 139	atomic_t *refcnt;
 140	bool may_backlog;
 141
 142	cpu = get_cpu();
 143	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 144	err = crypto_enqueue_request(&cpu_queue->queue, request);
 145
 146	refcnt = crypto_tfm_ctx(request->tfm);
 147	may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
 148
 149	if (err == -EBUSY && !may_backlog)
 150		goto out_put_cpu;
 151
 152	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
 153
 154	if (!atomic_read(refcnt))
 155		goto out_put_cpu;
 156
 157	atomic_inc(refcnt);
 158
 159out_put_cpu:
 160	put_cpu();
 161
 162	return err;
 163}
 164
 165/* Called in workqueue context, do one real cryption work (via
 166 * req->complete) and reschedule itself if there are more work to
 167 * do. */
 168static void cryptd_queue_worker(struct work_struct *work)
 169{
 170	struct cryptd_cpu_queue *cpu_queue;
 171	struct crypto_async_request *req, *backlog;
 172
 173	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 174	/*
 175	 * Only handle one request at a time to avoid hogging crypto workqueue.
 176	 * preempt_disable/enable is used to prevent being preempted by
 177	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 178	 * cryptd_enqueue_request() being accessed from software interrupts.
 179	 */
 180	local_bh_disable();
 181	preempt_disable();
 182	backlog = crypto_get_backlog(&cpu_queue->queue);
 183	req = crypto_dequeue_request(&cpu_queue->queue);
 184	preempt_enable();
 185	local_bh_enable();
 186
 187	if (!req)
 188		return;
 189
 190	if (backlog)
 191		backlog->complete(backlog, -EINPROGRESS);
 192	req->complete(req, 0);
 193
 194	if (cpu_queue->queue.qlen)
 195		queue_work(kcrypto_wq, &cpu_queue->work);
 196}
 197
 198static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 199{
 200	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 201	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 202	return ictx->queue;
 203}
 204
 205static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 206					 u32 *mask)
 207{
 208	struct crypto_attr_type *algt;
 209
 210	algt = crypto_get_attr_type(tb);
 211	if (IS_ERR(algt))
 212		return;
 213
 214	*type |= algt->type & CRYPTO_ALG_INTERNAL;
 215	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 216}
 217
 218static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
 219				   const u8 *key, unsigned int keylen)
 220{
 221	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
 222	struct crypto_blkcipher *child = ctx->child;
 223	int err;
 224
 225	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 226	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
 227					  CRYPTO_TFM_REQ_MASK);
 228	err = crypto_blkcipher_setkey(child, key, keylen);
 229	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
 230					    CRYPTO_TFM_RES_MASK);
 231	return err;
 232}
 233
 234static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
 235				   struct crypto_blkcipher *child,
 236				   int err,
 237				   int (*crypt)(struct blkcipher_desc *desc,
 238						struct scatterlist *dst,
 239						struct scatterlist *src,
 240						unsigned int len))
 241{
 242	struct cryptd_blkcipher_request_ctx *rctx;
 243	struct cryptd_blkcipher_ctx *ctx;
 244	struct crypto_ablkcipher *tfm;
 245	struct blkcipher_desc desc;
 246	int refcnt;
 247
 248	rctx = ablkcipher_request_ctx(req);
 249
 250	if (unlikely(err == -EINPROGRESS))
 251		goto out;
 252
 253	desc.tfm = child;
 254	desc.info = req->info;
 255	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 256
 257	err = crypt(&desc, req->dst, req->src, req->nbytes);
 258
 259	req->base.complete = rctx->complete;
 260
 261out:
 262	tfm = crypto_ablkcipher_reqtfm(req);
 263	ctx = crypto_ablkcipher_ctx(tfm);
 264	refcnt = atomic_read(&ctx->refcnt);
 265
 266	local_bh_disable();
 267	rctx->complete(&req->base, err);
 268	local_bh_enable();
 269
 270	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 271		crypto_free_ablkcipher(tfm);
 272}
 273
 274static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
 275{
 276	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 277	struct crypto_blkcipher *child = ctx->child;
 278
 279	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 280			       crypto_blkcipher_crt(child)->encrypt);
 281}
 282
 283static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 284{
 285	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 286	struct crypto_blkcipher *child = ctx->child;
 287
 288	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 289			       crypto_blkcipher_crt(child)->decrypt);
 290}
 291
 292static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 293				    crypto_completion_t compl)
 294{
 295	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
 296	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 297	struct cryptd_queue *queue;
 298
 299	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
 300	rctx->complete = req->base.complete;
 301	req->base.complete = compl;
 302
 303	return cryptd_enqueue_request(queue, &req->base);
 304}
 305
 306static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
 307{
 308	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
 309}
 310
 311static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
 312{
 313	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
 314}
 315
 316static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
 317{
 318	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 319	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 320	struct crypto_spawn *spawn = &ictx->spawn;
 321	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 322	struct crypto_blkcipher *cipher;
 323
 324	cipher = crypto_spawn_blkcipher(spawn);
 325	if (IS_ERR(cipher))
 326		return PTR_ERR(cipher);
 327
 328	ctx->child = cipher;
 329	tfm->crt_ablkcipher.reqsize =
 330		sizeof(struct cryptd_blkcipher_request_ctx);
 331	return 0;
 332}
 333
 334static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
 335{
 336	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 337
 338	crypto_free_blkcipher(ctx->child);
 339}
 340
 341static int cryptd_init_instance(struct crypto_instance *inst,
 342				struct crypto_alg *alg)
 343{
 344	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 345		     "cryptd(%s)",
 346		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 347		return -ENAMETOOLONG;
 348
 349	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 350
 351	inst->alg.cra_priority = alg->cra_priority + 50;
 352	inst->alg.cra_blocksize = alg->cra_blocksize;
 353	inst->alg.cra_alignmask = alg->cra_alignmask;
 354
 355	return 0;
 356}
 357
 358static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 359				   unsigned int tail)
 360{
 361	char *p;
 362	struct crypto_instance *inst;
 363	int err;
 364
 365	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
 366	if (!p)
 367		return ERR_PTR(-ENOMEM);
 368
 369	inst = (void *)(p + head);
 370
 371	err = cryptd_init_instance(inst, alg);
 372	if (err)
 373		goto out_free_inst;
 374
 375out:
 376	return p;
 377
 378out_free_inst:
 379	kfree(p);
 380	p = ERR_PTR(err);
 381	goto out;
 382}
 383
 384static int cryptd_create_blkcipher(struct crypto_template *tmpl,
 385				   struct rtattr **tb,
 386				   struct cryptd_queue *queue)
 387{
 388	struct cryptd_instance_ctx *ctx;
 389	struct crypto_instance *inst;
 390	struct crypto_alg *alg;
 391	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
 392	u32 mask = CRYPTO_ALG_TYPE_MASK;
 393	int err;
 394
 395	cryptd_check_internal(tb, &type, &mask);
 396
 397	alg = crypto_get_attr_alg(tb, type, mask);
 398	if (IS_ERR(alg))
 399		return PTR_ERR(alg);
 400
 401	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
 402	err = PTR_ERR(inst);
 403	if (IS_ERR(inst))
 404		goto out_put_alg;
 405
 406	ctx = crypto_instance_ctx(inst);
 407	ctx->queue = queue;
 408
 409	err = crypto_init_spawn(&ctx->spawn, alg, inst,
 410				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
 411	if (err)
 412		goto out_free_inst;
 413
 414	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 415	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
 416		type |= CRYPTO_ALG_INTERNAL;
 417	inst->alg.cra_flags = type;
 418	inst->alg.cra_type = &crypto_ablkcipher_type;
 419
 420	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
 421	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
 422	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 423
 424	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
 425
 426	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 427
 428	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
 429	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 430
 431	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
 432	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
 433	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
 434
 435	err = crypto_register_instance(tmpl, inst);
 436	if (err) {
 437		crypto_drop_spawn(&ctx->spawn);
 438out_free_inst:
 439		kfree(inst);
 440	}
 441
 442out_put_alg:
 443	crypto_mod_put(alg);
 444	return err;
 445}
 446
 447static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 448				  const u8 *key, unsigned int keylen)
 449{
 450	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 451	struct crypto_skcipher *child = ctx->child;
 452	int err;
 453
 454	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 455	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 456					 CRYPTO_TFM_REQ_MASK);
 457	err = crypto_skcipher_setkey(child, key, keylen);
 458	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 459					  CRYPTO_TFM_RES_MASK);
 460	return err;
 461}
 462
 463static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 464{
 465	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 466	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 467	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 468	int refcnt = atomic_read(&ctx->refcnt);
 469
 470	local_bh_disable();
 471	rctx->complete(&req->base, err);
 472	local_bh_enable();
 473
 474	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 475		crypto_free_skcipher(tfm);
 476}
 477
 478static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 479				    int err)
 480{
 481	struct skcipher_request *req = skcipher_request_cast(base);
 482	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 483	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 484	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 485	struct crypto_skcipher *child = ctx->child;
 486	SKCIPHER_REQUEST_ON_STACK(subreq, child);
 487
 488	if (unlikely(err == -EINPROGRESS))
 489		goto out;
 490
 491	skcipher_request_set_tfm(subreq, child);
 492	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 493				      NULL, NULL);
 494	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 495				   req->iv);
 496
 497	err = crypto_skcipher_encrypt(subreq);
 498	skcipher_request_zero(subreq);
 499
 500	req->base.complete = rctx->complete;
 501
 502out:
 503	cryptd_skcipher_complete(req, err);
 504}
 505
 506static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 507				    int err)
 508{
 509	struct skcipher_request *req = skcipher_request_cast(base);
 510	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 511	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 512	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 513	struct crypto_skcipher *child = ctx->child;
 514	SKCIPHER_REQUEST_ON_STACK(subreq, child);
 515
 516	if (unlikely(err == -EINPROGRESS))
 517		goto out;
 518
 519	skcipher_request_set_tfm(subreq, child);
 520	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 521				      NULL, NULL);
 522	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 523				   req->iv);
 524
 525	err = crypto_skcipher_decrypt(subreq);
 526	skcipher_request_zero(subreq);
 527
 528	req->base.complete = rctx->complete;
 529
 530out:
 531	cryptd_skcipher_complete(req, err);
 532}
 533
 534static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 535				   crypto_completion_t compl)
 536{
 537	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 538	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 539	struct cryptd_queue *queue;
 540
 541	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 542	rctx->complete = req->base.complete;
 543	req->base.complete = compl;
 544
 545	return cryptd_enqueue_request(queue, &req->base);
 546}
 547
 548static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 549{
 550	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 551}
 552
 553static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 554{
 555	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 556}
 557
 558static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 559{
 560	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 561	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 562	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 563	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 564	struct crypto_skcipher *cipher;
 565
 566	cipher = crypto_spawn_skcipher(spawn);
 567	if (IS_ERR(cipher))
 568		return PTR_ERR(cipher);
 569
 570	ctx->child = cipher;
 571	crypto_skcipher_set_reqsize(
 572		tfm, sizeof(struct cryptd_skcipher_request_ctx));
 573	return 0;
 574}
 575
 576static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 577{
 578	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 579
 580	crypto_free_skcipher(ctx->child);
 581}
 582
 583static void cryptd_skcipher_free(struct skcipher_instance *inst)
 584{
 585	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 586
 587	crypto_drop_skcipher(&ctx->spawn);
 588}
 589
 590static int cryptd_create_skcipher(struct crypto_template *tmpl,
 591				  struct rtattr **tb,
 592				  struct cryptd_queue *queue)
 593{
 594	struct skcipherd_instance_ctx *ctx;
 595	struct skcipher_instance *inst;
 596	struct skcipher_alg *alg;
 597	const char *name;
 598	u32 type;
 599	u32 mask;
 600	int err;
 601
 602	type = 0;
 603	mask = CRYPTO_ALG_ASYNC;
 604
 605	cryptd_check_internal(tb, &type, &mask);
 606
 607	name = crypto_attr_alg_name(tb[1]);
 608	if (IS_ERR(name))
 609		return PTR_ERR(name);
 610
 611	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 612	if (!inst)
 613		return -ENOMEM;
 614
 615	ctx = skcipher_instance_ctx(inst);
 616	ctx->queue = queue;
 617
 618	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
 619	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
 620	if (err)
 621		goto out_free_inst;
 622
 623	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 624	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 625	if (err)
 626		goto out_drop_skcipher;
 627
 628	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 629				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 630
 631	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 632	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 633	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 634	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 635
 636	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 637
 638	inst->alg.init = cryptd_skcipher_init_tfm;
 639	inst->alg.exit = cryptd_skcipher_exit_tfm;
 640
 641	inst->alg.setkey = cryptd_skcipher_setkey;
 642	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 643	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 644
 645	inst->free = cryptd_skcipher_free;
 646
 647	err = skcipher_register_instance(tmpl, inst);
 648	if (err) {
 649out_drop_skcipher:
 650		crypto_drop_skcipher(&ctx->spawn);
 651out_free_inst:
 652		kfree(inst);
 653	}
 654	return err;
 655}
 656
 657static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 658{
 659	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 660	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 661	struct crypto_shash_spawn *spawn = &ictx->spawn;
 662	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 663	struct crypto_shash *hash;
 664
 665	hash = crypto_spawn_shash(spawn);
 666	if (IS_ERR(hash))
 667		return PTR_ERR(hash);
 668
 669	ctx->child = hash;
 670	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 671				 sizeof(struct cryptd_hash_request_ctx) +
 672				 crypto_shash_descsize(hash));
 673	return 0;
 674}
 675
 676static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 677{
 678	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 679
 680	crypto_free_shash(ctx->child);
 681}
 682
 683static int cryptd_hash_setkey(struct crypto_ahash *parent,
 684				   const u8 *key, unsigned int keylen)
 685{
 686	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 687	struct crypto_shash *child = ctx->child;
 688	int err;
 689
 690	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 691	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 692				      CRYPTO_TFM_REQ_MASK);
 693	err = crypto_shash_setkey(child, key, keylen);
 694	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
 695				       CRYPTO_TFM_RES_MASK);
 696	return err;
 697}
 698
 699static int cryptd_hash_enqueue(struct ahash_request *req,
 700				crypto_completion_t compl)
 701{
 702	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 703	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 704	struct cryptd_queue *queue =
 705		cryptd_get_queue(crypto_ahash_tfm(tfm));
 706
 707	rctx->complete = req->base.complete;
 708	req->base.complete = compl;
 709
 710	return cryptd_enqueue_request(queue, &req->base);
 711}
 712
 713static void cryptd_hash_complete(struct ahash_request *req, int err)
 714{
 715	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 716	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 717	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 718	int refcnt = atomic_read(&ctx->refcnt);
 719
 720	local_bh_disable();
 721	rctx->complete(&req->base, err);
 722	local_bh_enable();
 723
 724	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 725		crypto_free_ahash(tfm);
 726}
 727
 728static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 729{
 730	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 731	struct crypto_shash *child = ctx->child;
 732	struct ahash_request *req = ahash_request_cast(req_async);
 733	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 734	struct shash_desc *desc = &rctx->desc;
 735
 736	if (unlikely(err == -EINPROGRESS))
 737		goto out;
 738
 739	desc->tfm = child;
 740	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 741
 742	err = crypto_shash_init(desc);
 743
 744	req->base.complete = rctx->complete;
 745
 746out:
 747	cryptd_hash_complete(req, err);
 748}
 749
 750static int cryptd_hash_init_enqueue(struct ahash_request *req)
 751{
 752	return cryptd_hash_enqueue(req, cryptd_hash_init);
 753}
 754
 755static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 756{
 757	struct ahash_request *req = ahash_request_cast(req_async);
 758	struct cryptd_hash_request_ctx *rctx;
 759
 760	rctx = ahash_request_ctx(req);
 761
 762	if (unlikely(err == -EINPROGRESS))
 763		goto out;
 764
 765	err = shash_ahash_update(req, &rctx->desc);
 766
 767	req->base.complete = rctx->complete;
 768
 769out:
 770	cryptd_hash_complete(req, err);
 771}
 772
 773static int cryptd_hash_update_enqueue(struct ahash_request *req)
 774{
 775	return cryptd_hash_enqueue(req, cryptd_hash_update);
 776}
 777
 778static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 779{
 780	struct ahash_request *req = ahash_request_cast(req_async);
 781	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 782
 783	if (unlikely(err == -EINPROGRESS))
 784		goto out;
 785
 786	err = crypto_shash_final(&rctx->desc, req->result);
 787
 788	req->base.complete = rctx->complete;
 789
 790out:
 791	cryptd_hash_complete(req, err);
 792}
 793
 794static int cryptd_hash_final_enqueue(struct ahash_request *req)
 795{
 796	return cryptd_hash_enqueue(req, cryptd_hash_final);
 797}
 798
 799static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 800{
 801	struct ahash_request *req = ahash_request_cast(req_async);
 802	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 803
 804	if (unlikely(err == -EINPROGRESS))
 805		goto out;
 806
 807	err = shash_ahash_finup(req, &rctx->desc);
 808
 809	req->base.complete = rctx->complete;
 810
 811out:
 812	cryptd_hash_complete(req, err);
 813}
 814
 815static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 816{
 817	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 818}
 819
 820static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 821{
 822	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 823	struct crypto_shash *child = ctx->child;
 824	struct ahash_request *req = ahash_request_cast(req_async);
 825	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 826	struct shash_desc *desc = &rctx->desc;
 827
 828	if (unlikely(err == -EINPROGRESS))
 829		goto out;
 830
 831	desc->tfm = child;
 832	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 833
 834	err = shash_ahash_digest(req, desc);
 835
 836	req->base.complete = rctx->complete;
 837
 838out:
 839	cryptd_hash_complete(req, err);
 840}
 841
 842static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 843{
 844	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 845}
 846
 847static int cryptd_hash_export(struct ahash_request *req, void *out)
 848{
 849	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 850
 851	return crypto_shash_export(&rctx->desc, out);
 852}
 853
 854static int cryptd_hash_import(struct ahash_request *req, const void *in)
 855{
 856	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 857	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 858	struct shash_desc *desc = cryptd_shash_desc(req);
 859
 860	desc->tfm = ctx->child;
 861	desc->flags = req->base.flags;
 862
 863	return crypto_shash_import(desc, in);
 864}
 865
 866static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 867			      struct cryptd_queue *queue)
 868{
 869	struct hashd_instance_ctx *ctx;
 870	struct ahash_instance *inst;
 871	struct shash_alg *salg;
 872	struct crypto_alg *alg;
 873	u32 type = 0;
 874	u32 mask = 0;
 875	int err;
 876
 877	cryptd_check_internal(tb, &type, &mask);
 878
 879	salg = shash_attr_alg(tb[1], type, mask);
 880	if (IS_ERR(salg))
 881		return PTR_ERR(salg);
 882
 883	alg = &salg->base;
 884	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
 885				     sizeof(*ctx));
 886	err = PTR_ERR(inst);
 887	if (IS_ERR(inst))
 888		goto out_put_alg;
 889
 890	ctx = ahash_instance_ctx(inst);
 891	ctx->queue = queue;
 892
 893	err = crypto_init_shash_spawn(&ctx->spawn, salg,
 894				      ahash_crypto_instance(inst));
 895	if (err)
 896		goto out_free_inst;
 897
 898	type = CRYPTO_ALG_ASYNC;
 899	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
 900		type |= CRYPTO_ALG_INTERNAL;
 901	inst->alg.halg.base.cra_flags = type;
 902
 903	inst->alg.halg.digestsize = salg->digestsize;
 904	inst->alg.halg.statesize = salg->statesize;
 905	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 906
 907	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 908	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 909
 910	inst->alg.init   = cryptd_hash_init_enqueue;
 911	inst->alg.update = cryptd_hash_update_enqueue;
 912	inst->alg.final  = cryptd_hash_final_enqueue;
 913	inst->alg.finup  = cryptd_hash_finup_enqueue;
 914	inst->alg.export = cryptd_hash_export;
 915	inst->alg.import = cryptd_hash_import;
 916	inst->alg.setkey = cryptd_hash_setkey;
 
 917	inst->alg.digest = cryptd_hash_digest_enqueue;
 918
 919	err = ahash_register_instance(tmpl, inst);
 920	if (err) {
 921		crypto_drop_shash(&ctx->spawn);
 922out_free_inst:
 923		kfree(inst);
 924	}
 925
 926out_put_alg:
 927	crypto_mod_put(alg);
 928	return err;
 929}
 930
 931static int cryptd_aead_setkey(struct crypto_aead *parent,
 932			      const u8 *key, unsigned int keylen)
 933{
 934	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 935	struct crypto_aead *child = ctx->child;
 936
 937	return crypto_aead_setkey(child, key, keylen);
 938}
 939
 940static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 941				   unsigned int authsize)
 942{
 943	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 944	struct crypto_aead *child = ctx->child;
 945
 946	return crypto_aead_setauthsize(child, authsize);
 947}
 948
 949static void cryptd_aead_crypt(struct aead_request *req,
 950			struct crypto_aead *child,
 951			int err,
 952			int (*crypt)(struct aead_request *req))
 953{
 954	struct cryptd_aead_request_ctx *rctx;
 955	struct cryptd_aead_ctx *ctx;
 956	crypto_completion_t compl;
 957	struct crypto_aead *tfm;
 958	int refcnt;
 959
 960	rctx = aead_request_ctx(req);
 961	compl = rctx->complete;
 962
 963	tfm = crypto_aead_reqtfm(req);
 964
 965	if (unlikely(err == -EINPROGRESS))
 966		goto out;
 967	aead_request_set_tfm(req, child);
 968	err = crypt( req );
 969
 970out:
 971	ctx = crypto_aead_ctx(tfm);
 972	refcnt = atomic_read(&ctx->refcnt);
 973
 974	local_bh_disable();
 975	compl(&req->base, err);
 976	local_bh_enable();
 977
 978	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 979		crypto_free_aead(tfm);
 980}
 981
 982static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 983{
 984	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 985	struct crypto_aead *child = ctx->child;
 986	struct aead_request *req;
 987
 988	req = container_of(areq, struct aead_request, base);
 989	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 990}
 991
 992static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 993{
 994	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 995	struct crypto_aead *child = ctx->child;
 996	struct aead_request *req;
 997
 998	req = container_of(areq, struct aead_request, base);
 999	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
1000}
1001
1002static int cryptd_aead_enqueue(struct aead_request *req,
1003				    crypto_completion_t compl)
1004{
1005	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1006	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1007	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1008
1009	rctx->complete = req->base.complete;
1010	req->base.complete = compl;
1011	return cryptd_enqueue_request(queue, &req->base);
1012}
1013
1014static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1015{
1016	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1017}
1018
1019static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1020{
1021	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1022}
1023
1024static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1025{
1026	struct aead_instance *inst = aead_alg_instance(tfm);
1027	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1028	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1029	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1030	struct crypto_aead *cipher;
1031
1032	cipher = crypto_spawn_aead(spawn);
1033	if (IS_ERR(cipher))
1034		return PTR_ERR(cipher);
1035
1036	ctx->child = cipher;
1037	crypto_aead_set_reqsize(
1038		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1039			 crypto_aead_reqsize(cipher)));
1040	return 0;
1041}
1042
1043static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1044{
1045	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1046	crypto_free_aead(ctx->child);
1047}
1048
1049static int cryptd_create_aead(struct crypto_template *tmpl,
1050		              struct rtattr **tb,
1051			      struct cryptd_queue *queue)
1052{
1053	struct aead_instance_ctx *ctx;
1054	struct aead_instance *inst;
1055	struct aead_alg *alg;
1056	const char *name;
1057	u32 type = 0;
1058	u32 mask = CRYPTO_ALG_ASYNC;
1059	int err;
1060
1061	cryptd_check_internal(tb, &type, &mask);
1062
1063	name = crypto_attr_alg_name(tb[1]);
1064	if (IS_ERR(name))
1065		return PTR_ERR(name);
1066
1067	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1068	if (!inst)
1069		return -ENOMEM;
1070
1071	ctx = aead_instance_ctx(inst);
1072	ctx->queue = queue;
1073
1074	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1075	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1076	if (err)
1077		goto out_free_inst;
1078
1079	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1080	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1081	if (err)
1082		goto out_drop_aead;
1083
1084	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1085				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1086	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1087
1088	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1089	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1090
1091	inst->alg.init = cryptd_aead_init_tfm;
1092	inst->alg.exit = cryptd_aead_exit_tfm;
1093	inst->alg.setkey = cryptd_aead_setkey;
1094	inst->alg.setauthsize = cryptd_aead_setauthsize;
1095	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1096	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1097
1098	err = aead_register_instance(tmpl, inst);
1099	if (err) {
1100out_drop_aead:
1101		crypto_drop_aead(&ctx->aead_spawn);
1102out_free_inst:
1103		kfree(inst);
1104	}
1105	return err;
1106}
1107
1108static struct cryptd_queue queue;
1109
1110static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1111{
1112	struct crypto_attr_type *algt;
1113
1114	algt = crypto_get_attr_type(tb);
1115	if (IS_ERR(algt))
1116		return PTR_ERR(algt);
1117
1118	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1119	case CRYPTO_ALG_TYPE_BLKCIPHER:
1120		if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1121		    CRYPTO_ALG_TYPE_BLKCIPHER)
1122			return cryptd_create_blkcipher(tmpl, tb, &queue);
1123
1124		return cryptd_create_skcipher(tmpl, tb, &queue);
1125	case CRYPTO_ALG_TYPE_DIGEST:
1126		return cryptd_create_hash(tmpl, tb, &queue);
1127	case CRYPTO_ALG_TYPE_AEAD:
1128		return cryptd_create_aead(tmpl, tb, &queue);
1129	}
1130
1131	return -EINVAL;
1132}
1133
1134static void cryptd_free(struct crypto_instance *inst)
1135{
1136	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1137	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1138	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1139
1140	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1141	case CRYPTO_ALG_TYPE_AHASH:
1142		crypto_drop_shash(&hctx->spawn);
1143		kfree(ahash_instance(inst));
1144		return;
1145	case CRYPTO_ALG_TYPE_AEAD:
1146		crypto_drop_aead(&aead_ctx->aead_spawn);
1147		kfree(aead_instance(inst));
1148		return;
1149	default:
1150		crypto_drop_spawn(&ctx->spawn);
1151		kfree(inst);
1152	}
1153}
1154
1155static struct crypto_template cryptd_tmpl = {
1156	.name = "cryptd",
1157	.create = cryptd_create,
1158	.free = cryptd_free,
1159	.module = THIS_MODULE,
1160};
1161
1162struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1163						  u32 type, u32 mask)
1164{
1165	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1166	struct cryptd_blkcipher_ctx *ctx;
1167	struct crypto_tfm *tfm;
1168
1169	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1170		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1171		return ERR_PTR(-EINVAL);
1172	type = crypto_skcipher_type(type);
1173	mask &= ~CRYPTO_ALG_TYPE_MASK;
1174	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1175	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1176	if (IS_ERR(tfm))
1177		return ERR_CAST(tfm);
1178	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1179		crypto_free_tfm(tfm);
1180		return ERR_PTR(-EINVAL);
1181	}
1182
1183	ctx = crypto_tfm_ctx(tfm);
1184	atomic_set(&ctx->refcnt, 1);
1185
1186	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1187}
1188EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1189
1190struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1191{
1192	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1193	return ctx->child;
1194}
1195EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1196
1197bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1198{
1199	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1200
1201	return atomic_read(&ctx->refcnt) - 1;
1202}
1203EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1204
1205void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1206{
1207	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1208
1209	if (atomic_dec_and_test(&ctx->refcnt))
1210		crypto_free_ablkcipher(&tfm->base);
1211}
1212EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1213
1214struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1215					      u32 type, u32 mask)
1216{
1217	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1218	struct cryptd_skcipher_ctx *ctx;
1219	struct crypto_skcipher *tfm;
1220
1221	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1222		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1223		return ERR_PTR(-EINVAL);
1224
1225	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1226	if (IS_ERR(tfm))
1227		return ERR_CAST(tfm);
1228
1229	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1230		crypto_free_skcipher(tfm);
1231		return ERR_PTR(-EINVAL);
1232	}
1233
1234	ctx = crypto_skcipher_ctx(tfm);
1235	atomic_set(&ctx->refcnt, 1);
1236
1237	return container_of(tfm, struct cryptd_skcipher, base);
1238}
1239EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1240
1241struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1242{
1243	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1244
1245	return ctx->child;
1246}
1247EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1248
1249bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1250{
1251	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1252
1253	return atomic_read(&ctx->refcnt) - 1;
1254}
1255EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1256
1257void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1258{
1259	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1260
1261	if (atomic_dec_and_test(&ctx->refcnt))
1262		crypto_free_skcipher(&tfm->base);
1263}
1264EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1265
1266struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1267					u32 type, u32 mask)
1268{
1269	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1270	struct cryptd_hash_ctx *ctx;
1271	struct crypto_ahash *tfm;
1272
1273	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1274		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1275		return ERR_PTR(-EINVAL);
1276	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1277	if (IS_ERR(tfm))
1278		return ERR_CAST(tfm);
1279	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1280		crypto_free_ahash(tfm);
1281		return ERR_PTR(-EINVAL);
1282	}
1283
1284	ctx = crypto_ahash_ctx(tfm);
1285	atomic_set(&ctx->refcnt, 1);
1286
1287	return __cryptd_ahash_cast(tfm);
1288}
1289EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1290
1291struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1292{
1293	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1294
1295	return ctx->child;
1296}
1297EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1298
1299struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1300{
1301	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1302	return &rctx->desc;
1303}
1304EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1305
1306bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1307{
1308	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1309
1310	return atomic_read(&ctx->refcnt) - 1;
1311}
1312EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1313
1314void cryptd_free_ahash(struct cryptd_ahash *tfm)
1315{
1316	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1317
1318	if (atomic_dec_and_test(&ctx->refcnt))
1319		crypto_free_ahash(&tfm->base);
1320}
1321EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1322
1323struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1324						  u32 type, u32 mask)
1325{
1326	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1327	struct cryptd_aead_ctx *ctx;
1328	struct crypto_aead *tfm;
1329
1330	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1331		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1332		return ERR_PTR(-EINVAL);
1333	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1334	if (IS_ERR(tfm))
1335		return ERR_CAST(tfm);
1336	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1337		crypto_free_aead(tfm);
1338		return ERR_PTR(-EINVAL);
1339	}
1340
1341	ctx = crypto_aead_ctx(tfm);
1342	atomic_set(&ctx->refcnt, 1);
1343
1344	return __cryptd_aead_cast(tfm);
1345}
1346EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1347
1348struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1349{
1350	struct cryptd_aead_ctx *ctx;
1351	ctx = crypto_aead_ctx(&tfm->base);
1352	return ctx->child;
1353}
1354EXPORT_SYMBOL_GPL(cryptd_aead_child);
1355
1356bool cryptd_aead_queued(struct cryptd_aead *tfm)
1357{
1358	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1359
1360	return atomic_read(&ctx->refcnt) - 1;
1361}
1362EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1363
1364void cryptd_free_aead(struct cryptd_aead *tfm)
1365{
1366	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1367
1368	if (atomic_dec_and_test(&ctx->refcnt))
1369		crypto_free_aead(&tfm->base);
1370}
1371EXPORT_SYMBOL_GPL(cryptd_free_aead);
1372
1373static int __init cryptd_init(void)
1374{
1375	int err;
1376
1377	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
1378	if (err)
1379		return err;
1380
1381	err = crypto_register_template(&cryptd_tmpl);
1382	if (err)
1383		cryptd_fini_queue(&queue);
1384
1385	return err;
1386}
1387
1388static void __exit cryptd_exit(void)
1389{
1390	cryptd_fini_queue(&queue);
1391	crypto_unregister_template(&cryptd_tmpl);
1392}
1393
1394subsys_initcall(cryptd_init);
1395module_exit(cryptd_exit);
1396
1397MODULE_LICENSE("GPL");
1398MODULE_DESCRIPTION("Software async crypto daemon");
1399MODULE_ALIAS_CRYPTO("cryptd");