Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Software async crypto daemon.
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 *
   7 * Added AEAD support to cryptd.
   8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   9 *             Adrian Hoban <adrian.hoban@intel.com>
  10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  12 *    Copyright (c) 2010, Intel Corporation.
 
 
 
 
 
 
  13 */
  14
 
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/cryptd.h>
  19#include <linux/refcount.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29
  30static unsigned int cryptd_max_cpu_qlen = 1000;
  31module_param(cryptd_max_cpu_qlen, uint, 0);
  32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  33
  34static struct workqueue_struct *cryptd_wq;
  35
  36struct cryptd_cpu_queue {
  37	struct crypto_queue queue;
  38	struct work_struct work;
  39};
  40
  41struct cryptd_queue {
  42	struct cryptd_cpu_queue __percpu *cpu_queue;
  43};
  44
  45struct cryptd_instance_ctx {
  46	struct crypto_spawn spawn;
  47	struct cryptd_queue *queue;
  48};
  49
  50struct skcipherd_instance_ctx {
  51	struct crypto_skcipher_spawn spawn;
  52	struct cryptd_queue *queue;
  53};
  54
  55struct hashd_instance_ctx {
  56	struct crypto_shash_spawn spawn;
  57	struct cryptd_queue *queue;
  58};
  59
  60struct aead_instance_ctx {
  61	struct crypto_aead_spawn aead_spawn;
  62	struct cryptd_queue *queue;
  63};
  64
  65struct cryptd_skcipher_ctx {
  66	refcount_t refcnt;
  67	struct crypto_sync_skcipher *child;
  68};
  69
  70struct cryptd_skcipher_request_ctx {
  71	crypto_completion_t complete;
  72};
  73
  74struct cryptd_hash_ctx {
  75	refcount_t refcnt;
  76	struct crypto_shash *child;
  77};
  78
  79struct cryptd_hash_request_ctx {
  80	crypto_completion_t complete;
  81	struct shash_desc desc;
  82};
  83
  84struct cryptd_aead_ctx {
  85	refcount_t refcnt;
  86	struct crypto_aead *child;
  87};
  88
  89struct cryptd_aead_request_ctx {
  90	crypto_completion_t complete;
  91};
  92
  93static void cryptd_queue_worker(struct work_struct *work);
  94
  95static int cryptd_init_queue(struct cryptd_queue *queue,
  96			     unsigned int max_cpu_qlen)
  97{
  98	int cpu;
  99	struct cryptd_cpu_queue *cpu_queue;
 100
 101	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 102	if (!queue->cpu_queue)
 103		return -ENOMEM;
 104	for_each_possible_cpu(cpu) {
 105		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 106		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 107		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 108	}
 109	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 110	return 0;
 111}
 112
 113static void cryptd_fini_queue(struct cryptd_queue *queue)
 114{
 115	int cpu;
 116	struct cryptd_cpu_queue *cpu_queue;
 117
 118	for_each_possible_cpu(cpu) {
 119		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 120		BUG_ON(cpu_queue->queue.qlen);
 121	}
 122	free_percpu(queue->cpu_queue);
 123}
 124
 125static int cryptd_enqueue_request(struct cryptd_queue *queue,
 126				  struct crypto_async_request *request)
 127{
 128	int cpu, err;
 129	struct cryptd_cpu_queue *cpu_queue;
 130	refcount_t *refcnt;
 131
 132	cpu = get_cpu();
 133	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 134	err = crypto_enqueue_request(&cpu_queue->queue, request);
 135
 136	refcnt = crypto_tfm_ctx(request->tfm);
 137
 138	if (err == -ENOSPC)
 139		goto out_put_cpu;
 140
 141	queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
 142
 143	if (!refcount_read(refcnt))
 144		goto out_put_cpu;
 145
 146	refcount_inc(refcnt);
 147
 148out_put_cpu:
 149	put_cpu();
 150
 151	return err;
 152}
 153
 154/* Called in workqueue context, do one real cryption work (via
 155 * req->complete) and reschedule itself if there are more work to
 156 * do. */
 157static void cryptd_queue_worker(struct work_struct *work)
 158{
 159	struct cryptd_cpu_queue *cpu_queue;
 160	struct crypto_async_request *req, *backlog;
 161
 162	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 163	/*
 164	 * Only handle one request at a time to avoid hogging crypto workqueue.
 165	 * preempt_disable/enable is used to prevent being preempted by
 166	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 167	 * cryptd_enqueue_request() being accessed from software interrupts.
 168	 */
 169	local_bh_disable();
 170	preempt_disable();
 171	backlog = crypto_get_backlog(&cpu_queue->queue);
 172	req = crypto_dequeue_request(&cpu_queue->queue);
 173	preempt_enable();
 174	local_bh_enable();
 175
 176	if (!req)
 177		return;
 178
 179	if (backlog)
 180		backlog->complete(backlog, -EINPROGRESS);
 181	req->complete(req, 0);
 182
 183	if (cpu_queue->queue.qlen)
 184		queue_work(cryptd_wq, &cpu_queue->work);
 185}
 186
 187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 188{
 189	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 190	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 191	return ictx->queue;
 192}
 193
 194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
 195				 u32 *type, u32 *mask)
 196{
 197	/*
 198	 * cryptd is allowed to wrap internal algorithms, but in that case the
 199	 * resulting cryptd instance will be marked as internal as well.
 200	 */
 201	*type = algt->type & CRYPTO_ALG_INTERNAL;
 202	*mask = algt->mask & CRYPTO_ALG_INTERNAL;
 203
 204	/* No point in cryptd wrapping an algorithm that's already async. */
 205	*mask |= CRYPTO_ALG_ASYNC;
 206
 207	*mask |= crypto_algt_inherited_mask(algt);
 
 
 
 
 
 
 208}
 209
 210static int cryptd_init_instance(struct crypto_instance *inst,
 211				struct crypto_alg *alg)
 
 
 
 
 
 212{
 213	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 214		     "cryptd(%s)",
 215		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 216		return -ENAMETOOLONG;
 217
 218	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 219
 220	inst->alg.cra_priority = alg->cra_priority + 50;
 221	inst->alg.cra_blocksize = alg->cra_blocksize;
 222	inst->alg.cra_alignmask = alg->cra_alignmask;
 223
 224	return 0;
 225}
 226
 227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 228				  const u8 *key, unsigned int keylen)
 229{
 230	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 231	struct crypto_sync_skcipher *child = ctx->child;
 232
 233	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 234	crypto_sync_skcipher_set_flags(child,
 235				       crypto_skcipher_get_flags(parent) &
 236					 CRYPTO_TFM_REQ_MASK);
 237	return crypto_sync_skcipher_setkey(child, key, keylen);
 238}
 239
 240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 241{
 242	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 243	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 244	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 245	int refcnt = refcount_read(&ctx->refcnt);
 246
 
 247	local_bh_disable();
 248	rctx->complete(&req->base, err);
 249	local_bh_enable();
 250
 251	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 252		crypto_free_skcipher(tfm);
 253}
 254
 255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 256				    int err)
 257{
 258	struct skcipher_request *req = skcipher_request_cast(base);
 259	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 260	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 261	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 262	struct crypto_sync_skcipher *child = ctx->child;
 263	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 264
 265	if (unlikely(err == -EINPROGRESS))
 266		goto out;
 267
 268	skcipher_request_set_sync_tfm(subreq, child);
 269	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 270				      NULL, NULL);
 271	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 272				   req->iv);
 273
 274	err = crypto_skcipher_encrypt(subreq);
 275	skcipher_request_zero(subreq);
 276
 277	req->base.complete = rctx->complete;
 278
 279out:
 280	cryptd_skcipher_complete(req, err);
 281}
 282
 283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 284				    int err)
 285{
 286	struct skcipher_request *req = skcipher_request_cast(base);
 287	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 288	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 289	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 290	struct crypto_sync_skcipher *child = ctx->child;
 291	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 292
 293	if (unlikely(err == -EINPROGRESS))
 294		goto out;
 295
 296	skcipher_request_set_sync_tfm(subreq, child);
 297	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 298				      NULL, NULL);
 299	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 300				   req->iv);
 301
 302	err = crypto_skcipher_decrypt(subreq);
 303	skcipher_request_zero(subreq);
 304
 305	req->base.complete = rctx->complete;
 306
 307out:
 308	cryptd_skcipher_complete(req, err);
 309}
 310
 311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 312				   crypto_completion_t compl)
 313{
 314	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 315	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 316	struct cryptd_queue *queue;
 317
 318	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 319	rctx->complete = req->base.complete;
 320	req->base.complete = compl;
 321
 322	return cryptd_enqueue_request(queue, &req->base);
 323}
 324
 325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 326{
 327	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 328}
 329
 330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 331{
 332	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 333}
 334
 335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 336{
 337	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 338	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 339	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 340	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 341	struct crypto_skcipher *cipher;
 342
 343	cipher = crypto_spawn_skcipher(spawn);
 344	if (IS_ERR(cipher))
 345		return PTR_ERR(cipher);
 346
 347	ctx->child = (struct crypto_sync_skcipher *)cipher;
 348	crypto_skcipher_set_reqsize(
 349		tfm, sizeof(struct cryptd_skcipher_request_ctx));
 350	return 0;
 351}
 352
 353static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 354{
 355	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 356
 357	crypto_free_sync_skcipher(ctx->child);
 358}
 359
 360static void cryptd_skcipher_free(struct skcipher_instance *inst)
 
 361{
 362	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 363
 364	crypto_drop_skcipher(&ctx->spawn);
 365	kfree(inst);
 
 
 
 
 
 
 
 
 
 
 
 366}
 367
 368static int cryptd_create_skcipher(struct crypto_template *tmpl,
 369				  struct rtattr **tb,
 370				  struct crypto_attr_type *algt,
 371				  struct cryptd_queue *queue)
 372{
 373	struct skcipherd_instance_ctx *ctx;
 374	struct skcipher_instance *inst;
 375	struct skcipher_alg *alg;
 376	u32 type;
 377	u32 mask;
 378	int err;
 379
 380	cryptd_type_and_mask(algt, &type, &mask);
 
 
 
 
 
 
 
 
 381
 382	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 383	if (!inst)
 384		return -ENOMEM;
 385
 386	ctx = skcipher_instance_ctx(inst);
 387	ctx->queue = queue;
 388
 389	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
 390				   crypto_attr_alg_name(tb[1]), type, mask);
 391	if (err)
 392		goto err_free_inst;
 393
 394	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 395	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 396	if (err)
 397		goto err_free_inst;
 398
 399	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 400		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 401	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 402	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 403	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 404	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 405
 406	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 407
 408	inst->alg.init = cryptd_skcipher_init_tfm;
 409	inst->alg.exit = cryptd_skcipher_exit_tfm;
 410
 411	inst->alg.setkey = cryptd_skcipher_setkey;
 412	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 413	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 414
 415	inst->free = cryptd_skcipher_free;
 
 
 416
 417	err = skcipher_register_instance(tmpl, inst);
 418	if (err) {
 419err_free_inst:
 420		cryptd_skcipher_free(inst);
 
 421	}
 
 
 
 422	return err;
 423}
 424
 425static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 426{
 427	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 428	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 429	struct crypto_shash_spawn *spawn = &ictx->spawn;
 430	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 431	struct crypto_shash *hash;
 432
 433	hash = crypto_spawn_shash(spawn);
 434	if (IS_ERR(hash))
 435		return PTR_ERR(hash);
 436
 437	ctx->child = hash;
 438	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 439				 sizeof(struct cryptd_hash_request_ctx) +
 440				 crypto_shash_descsize(hash));
 441	return 0;
 442}
 443
 444static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 445{
 446	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 447
 448	crypto_free_shash(ctx->child);
 449}
 450
 451static int cryptd_hash_setkey(struct crypto_ahash *parent,
 452				   const u8 *key, unsigned int keylen)
 453{
 454	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 455	struct crypto_shash *child = ctx->child;
 
 456
 457	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 458	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 459				      CRYPTO_TFM_REQ_MASK);
 460	return crypto_shash_setkey(child, key, keylen);
 
 
 
 461}
 462
 463static int cryptd_hash_enqueue(struct ahash_request *req,
 464				crypto_completion_t compl)
 465{
 466	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 467	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 468	struct cryptd_queue *queue =
 469		cryptd_get_queue(crypto_ahash_tfm(tfm));
 470
 471	rctx->complete = req->base.complete;
 472	req->base.complete = compl;
 473
 474	return cryptd_enqueue_request(queue, &req->base);
 475}
 476
 477static void cryptd_hash_complete(struct ahash_request *req, int err)
 478{
 479	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 480	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 481	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 482	int refcnt = refcount_read(&ctx->refcnt);
 483
 484	local_bh_disable();
 485	rctx->complete(&req->base, err);
 486	local_bh_enable();
 487
 488	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 489		crypto_free_ahash(tfm);
 490}
 491
 492static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 493{
 494	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 495	struct crypto_shash *child = ctx->child;
 496	struct ahash_request *req = ahash_request_cast(req_async);
 497	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 498	struct shash_desc *desc = &rctx->desc;
 499
 500	if (unlikely(err == -EINPROGRESS))
 501		goto out;
 502
 503	desc->tfm = child;
 
 504
 505	err = crypto_shash_init(desc);
 506
 507	req->base.complete = rctx->complete;
 508
 509out:
 510	cryptd_hash_complete(req, err);
 
 
 511}
 512
 513static int cryptd_hash_init_enqueue(struct ahash_request *req)
 514{
 515	return cryptd_hash_enqueue(req, cryptd_hash_init);
 516}
 517
 518static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 519{
 520	struct ahash_request *req = ahash_request_cast(req_async);
 521	struct cryptd_hash_request_ctx *rctx;
 522
 523	rctx = ahash_request_ctx(req);
 524
 525	if (unlikely(err == -EINPROGRESS))
 526		goto out;
 527
 528	err = shash_ahash_update(req, &rctx->desc);
 529
 530	req->base.complete = rctx->complete;
 531
 532out:
 533	cryptd_hash_complete(req, err);
 
 
 534}
 535
 536static int cryptd_hash_update_enqueue(struct ahash_request *req)
 537{
 538	return cryptd_hash_enqueue(req, cryptd_hash_update);
 539}
 540
 541static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 542{
 543	struct ahash_request *req = ahash_request_cast(req_async);
 544	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 545
 546	if (unlikely(err == -EINPROGRESS))
 547		goto out;
 548
 549	err = crypto_shash_final(&rctx->desc, req->result);
 550
 551	req->base.complete = rctx->complete;
 552
 553out:
 554	cryptd_hash_complete(req, err);
 
 
 555}
 556
 557static int cryptd_hash_final_enqueue(struct ahash_request *req)
 558{
 559	return cryptd_hash_enqueue(req, cryptd_hash_final);
 560}
 561
 562static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 563{
 564	struct ahash_request *req = ahash_request_cast(req_async);
 565	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 566
 567	if (unlikely(err == -EINPROGRESS))
 568		goto out;
 569
 570	err = shash_ahash_finup(req, &rctx->desc);
 571
 572	req->base.complete = rctx->complete;
 573
 574out:
 575	cryptd_hash_complete(req, err);
 
 
 576}
 577
 578static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 579{
 580	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 581}
 582
 583static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 584{
 585	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 586	struct crypto_shash *child = ctx->child;
 587	struct ahash_request *req = ahash_request_cast(req_async);
 588	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 589	struct shash_desc *desc = &rctx->desc;
 590
 591	if (unlikely(err == -EINPROGRESS))
 592		goto out;
 593
 594	desc->tfm = child;
 
 595
 596	err = shash_ahash_digest(req, desc);
 597
 598	req->base.complete = rctx->complete;
 599
 600out:
 601	cryptd_hash_complete(req, err);
 
 
 602}
 603
 604static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 605{
 606	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 607}
 608
 609static int cryptd_hash_export(struct ahash_request *req, void *out)
 610{
 611	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 612
 613	return crypto_shash_export(&rctx->desc, out);
 614}
 615
 616static int cryptd_hash_import(struct ahash_request *req, const void *in)
 617{
 618	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 619	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 620	struct shash_desc *desc = cryptd_shash_desc(req);
 621
 622	desc->tfm = ctx->child;
 623
 624	return crypto_shash_import(desc, in);
 625}
 626
 627static void cryptd_hash_free(struct ahash_instance *inst)
 628{
 629	struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
 630
 631	crypto_drop_shash(&ctx->spawn);
 632	kfree(inst);
 633}
 634
 635static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 636			      struct crypto_attr_type *algt,
 637			      struct cryptd_queue *queue)
 638{
 639	struct hashd_instance_ctx *ctx;
 640	struct ahash_instance *inst;
 641	struct shash_alg *alg;
 642	u32 type;
 643	u32 mask;
 644	int err;
 645
 646	cryptd_type_and_mask(algt, &type, &mask);
 647
 648	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 649	if (!inst)
 650		return -ENOMEM;
 
 
 
 
 
 651
 652	ctx = ahash_instance_ctx(inst);
 653	ctx->queue = queue;
 654
 655	err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
 656				crypto_attr_alg_name(tb[1]), type, mask);
 657	if (err)
 658		goto err_free_inst;
 659	alg = crypto_spawn_shash_alg(&ctx->spawn);
 660
 661	err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
 662	if (err)
 663		goto err_free_inst;
 664
 665	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 666		(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
 667					CRYPTO_ALG_OPTIONAL_KEY));
 668	inst->alg.halg.digestsize = alg->digestsize;
 669	inst->alg.halg.statesize = alg->statesize;
 670	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 671
 672	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 673	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 674
 675	inst->alg.init   = cryptd_hash_init_enqueue;
 676	inst->alg.update = cryptd_hash_update_enqueue;
 677	inst->alg.final  = cryptd_hash_final_enqueue;
 678	inst->alg.finup  = cryptd_hash_finup_enqueue;
 679	inst->alg.export = cryptd_hash_export;
 680	inst->alg.import = cryptd_hash_import;
 681	if (crypto_shash_alg_has_setkey(alg))
 682		inst->alg.setkey = cryptd_hash_setkey;
 683	inst->alg.digest = cryptd_hash_digest_enqueue;
 684
 685	inst->free = cryptd_hash_free;
 686
 687	err = ahash_register_instance(tmpl, inst);
 688	if (err) {
 689err_free_inst:
 690		cryptd_hash_free(inst);
 
 691	}
 692	return err;
 693}
 694
 695static int cryptd_aead_setkey(struct crypto_aead *parent,
 696			      const u8 *key, unsigned int keylen)
 697{
 698	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 699	struct crypto_aead *child = ctx->child;
 700
 701	return crypto_aead_setkey(child, key, keylen);
 702}
 703
 704static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 705				   unsigned int authsize)
 706{
 707	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 708	struct crypto_aead *child = ctx->child;
 709
 710	return crypto_aead_setauthsize(child, authsize);
 711}
 712
 713static void cryptd_aead_crypt(struct aead_request *req,
 714			struct crypto_aead *child,
 715			int err,
 716			int (*crypt)(struct aead_request *req))
 717{
 718	struct cryptd_aead_request_ctx *rctx;
 719	struct cryptd_aead_ctx *ctx;
 720	crypto_completion_t compl;
 721	struct crypto_aead *tfm;
 722	int refcnt;
 723
 724	rctx = aead_request_ctx(req);
 725	compl = rctx->complete;
 726
 727	tfm = crypto_aead_reqtfm(req);
 728
 729	if (unlikely(err == -EINPROGRESS))
 730		goto out;
 731	aead_request_set_tfm(req, child);
 732	err = crypt( req );
 733
 734out:
 735	ctx = crypto_aead_ctx(tfm);
 736	refcnt = refcount_read(&ctx->refcnt);
 737
 738	local_bh_disable();
 739	compl(&req->base, err);
 740	local_bh_enable();
 741
 742	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 743		crypto_free_aead(tfm);
 744}
 745
 746static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 747{
 748	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 749	struct crypto_aead *child = ctx->child;
 750	struct aead_request *req;
 751
 752	req = container_of(areq, struct aead_request, base);
 753	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 754}
 755
 756static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 757{
 758	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 759	struct crypto_aead *child = ctx->child;
 760	struct aead_request *req;
 761
 762	req = container_of(areq, struct aead_request, base);
 763	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 764}
 765
 766static int cryptd_aead_enqueue(struct aead_request *req,
 767				    crypto_completion_t compl)
 768{
 769	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 770	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 771	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 772
 773	rctx->complete = req->base.complete;
 774	req->base.complete = compl;
 775	return cryptd_enqueue_request(queue, &req->base);
 776}
 777
 778static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 779{
 780	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 781}
 782
 783static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 784{
 785	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 786}
 787
 788static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 789{
 790	struct aead_instance *inst = aead_alg_instance(tfm);
 791	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 792	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 793	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 794	struct crypto_aead *cipher;
 795
 796	cipher = crypto_spawn_aead(spawn);
 797	if (IS_ERR(cipher))
 798		return PTR_ERR(cipher);
 799
 
 800	ctx->child = cipher;
 801	crypto_aead_set_reqsize(
 802		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
 803			 crypto_aead_reqsize(cipher)));
 804	return 0;
 805}
 806
 807static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 808{
 809	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 810	crypto_free_aead(ctx->child);
 811}
 812
 813static void cryptd_aead_free(struct aead_instance *inst)
 814{
 815	struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
 816
 817	crypto_drop_aead(&ctx->aead_spawn);
 818	kfree(inst);
 819}
 820
 821static int cryptd_create_aead(struct crypto_template *tmpl,
 822		              struct rtattr **tb,
 823			      struct crypto_attr_type *algt,
 824			      struct cryptd_queue *queue)
 825{
 826	struct aead_instance_ctx *ctx;
 827	struct aead_instance *inst;
 828	struct aead_alg *alg;
 829	u32 type;
 830	u32 mask;
 831	int err;
 832
 833	cryptd_type_and_mask(algt, &type, &mask);
 834
 835	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 836	if (!inst)
 837		return -ENOMEM;
 
 
 
 
 838
 839	ctx = aead_instance_ctx(inst);
 840	ctx->queue = queue;
 841
 842	err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
 843			       crypto_attr_alg_name(tb[1]), type, mask);
 844	if (err)
 845		goto err_free_inst;
 846
 847	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 848	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 849	if (err)
 850		goto err_free_inst;
 851
 852	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 853		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 854	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 855
 856	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 857	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 
 
 
 858
 859	inst->alg.init = cryptd_aead_init_tfm;
 860	inst->alg.exit = cryptd_aead_exit_tfm;
 861	inst->alg.setkey = cryptd_aead_setkey;
 862	inst->alg.setauthsize = cryptd_aead_setauthsize;
 863	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 864	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 865
 866	inst->free = cryptd_aead_free;
 867
 868	err = aead_register_instance(tmpl, inst);
 869	if (err) {
 870err_free_inst:
 871		cryptd_aead_free(inst);
 
 872	}
 
 
 873	return err;
 874}
 875
 876static struct cryptd_queue queue;
 877
 878static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 879{
 880	struct crypto_attr_type *algt;
 881
 882	algt = crypto_get_attr_type(tb);
 883	if (IS_ERR(algt))
 884		return PTR_ERR(algt);
 885
 886	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 887	case CRYPTO_ALG_TYPE_SKCIPHER:
 888		return cryptd_create_skcipher(tmpl, tb, algt, &queue);
 889	case CRYPTO_ALG_TYPE_HASH:
 890		return cryptd_create_hash(tmpl, tb, algt, &queue);
 891	case CRYPTO_ALG_TYPE_AEAD:
 892		return cryptd_create_aead(tmpl, tb, algt, &queue);
 893	}
 894
 895	return -EINVAL;
 896}
 897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898static struct crypto_template cryptd_tmpl = {
 899	.name = "cryptd",
 900	.create = cryptd_create,
 
 901	.module = THIS_MODULE,
 902};
 903
 904struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 905					      u32 type, u32 mask)
 906{
 907	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 908	struct cryptd_skcipher_ctx *ctx;
 909	struct crypto_skcipher *tfm;
 910
 911	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 912		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 913		return ERR_PTR(-EINVAL);
 914
 915	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
 
 
 
 916	if (IS_ERR(tfm))
 917		return ERR_CAST(tfm);
 918
 919	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 920		crypto_free_skcipher(tfm);
 921		return ERR_PTR(-EINVAL);
 922	}
 923
 924	ctx = crypto_skcipher_ctx(tfm);
 925	refcount_set(&ctx->refcnt, 1);
 926
 927	return container_of(tfm, struct cryptd_skcipher, base);
 928}
 929EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
 930
 931struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
 932{
 933	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 934
 935	return &ctx->child->base;
 936}
 937EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
 938
 939bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
 940{
 941	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 942
 943	return refcount_read(&ctx->refcnt) - 1;
 944}
 945EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
 946
 947void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
 948{
 949	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 950
 951	if (refcount_dec_and_test(&ctx->refcnt))
 952		crypto_free_skcipher(&tfm->base);
 953}
 954EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
 955
 956struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
 957					u32 type, u32 mask)
 958{
 959	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 960	struct cryptd_hash_ctx *ctx;
 961	struct crypto_ahash *tfm;
 962
 963	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 964		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 965		return ERR_PTR(-EINVAL);
 966	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
 967	if (IS_ERR(tfm))
 968		return ERR_CAST(tfm);
 969	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 970		crypto_free_ahash(tfm);
 971		return ERR_PTR(-EINVAL);
 972	}
 973
 974	ctx = crypto_ahash_ctx(tfm);
 975	refcount_set(&ctx->refcnt, 1);
 976
 977	return __cryptd_ahash_cast(tfm);
 978}
 979EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
 980
 981struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
 982{
 983	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 984
 985	return ctx->child;
 986}
 987EXPORT_SYMBOL_GPL(cryptd_ahash_child);
 988
 989struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
 990{
 991	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 992	return &rctx->desc;
 993}
 994EXPORT_SYMBOL_GPL(cryptd_shash_desc);
 995
 996bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
 997{
 998	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 999
1000	return refcount_read(&ctx->refcnt) - 1;
1001}
1002EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1003
1004void cryptd_free_ahash(struct cryptd_ahash *tfm)
1005{
1006	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1007
1008	if (refcount_dec_and_test(&ctx->refcnt))
1009		crypto_free_ahash(&tfm->base);
1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1012
1013struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1014						  u32 type, u32 mask)
1015{
1016	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1017	struct cryptd_aead_ctx *ctx;
1018	struct crypto_aead *tfm;
1019
1020	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1021		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1022		return ERR_PTR(-EINVAL);
1023	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1024	if (IS_ERR(tfm))
1025		return ERR_CAST(tfm);
1026	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1027		crypto_free_aead(tfm);
1028		return ERR_PTR(-EINVAL);
1029	}
1030
1031	ctx = crypto_aead_ctx(tfm);
1032	refcount_set(&ctx->refcnt, 1);
1033
1034	return __cryptd_aead_cast(tfm);
1035}
1036EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1037
1038struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1039{
1040	struct cryptd_aead_ctx *ctx;
1041	ctx = crypto_aead_ctx(&tfm->base);
1042	return ctx->child;
1043}
1044EXPORT_SYMBOL_GPL(cryptd_aead_child);
1045
1046bool cryptd_aead_queued(struct cryptd_aead *tfm)
1047{
1048	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1049
1050	return refcount_read(&ctx->refcnt) - 1;
1051}
1052EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1053
1054void cryptd_free_aead(struct cryptd_aead *tfm)
1055{
1056	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1057
1058	if (refcount_dec_and_test(&ctx->refcnt))
1059		crypto_free_aead(&tfm->base);
1060}
1061EXPORT_SYMBOL_GPL(cryptd_free_aead);
1062
1063static int __init cryptd_init(void)
1064{
1065	int err;
1066
1067	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1068				    1);
1069	if (!cryptd_wq)
1070		return -ENOMEM;
1071
1072	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1073	if (err)
1074		goto err_destroy_wq;
1075
1076	err = crypto_register_template(&cryptd_tmpl);
1077	if (err)
1078		goto err_fini_queue;
1079
1080	return 0;
1081
1082err_fini_queue:
1083	cryptd_fini_queue(&queue);
1084err_destroy_wq:
1085	destroy_workqueue(cryptd_wq);
1086	return err;
1087}
1088
1089static void __exit cryptd_exit(void)
1090{
1091	destroy_workqueue(cryptd_wq);
1092	cryptd_fini_queue(&queue);
1093	crypto_unregister_template(&cryptd_tmpl);
1094}
1095
1096subsys_initcall(cryptd_init);
1097module_exit(cryptd_exit);
1098
1099MODULE_LICENSE("GPL");
1100MODULE_DESCRIPTION("Software async crypto daemon");
1101MODULE_ALIAS_CRYPTO("cryptd");
v3.5.6
 
  1/*
  2 * Software async crypto daemon.
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * Added AEAD support to cryptd.
  7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8 *             Adrian Hoban <adrian.hoban@intel.com>
  9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 11 *    Copyright (c) 2010, Intel Corporation.
 12 *
 13 * This program is free software; you can redistribute it and/or modify it
 14 * under the terms of the GNU General Public License as published by the Free
 15 * Software Foundation; either version 2 of the License, or (at your option)
 16 * any later version.
 17 *
 18 */
 19
 20#include <crypto/algapi.h>
 21#include <crypto/internal/hash.h>
 22#include <crypto/internal/aead.h>
 
 23#include <crypto/cryptd.h>
 24#include <crypto/crypto_wq.h>
 25#include <linux/err.h>
 26#include <linux/init.h>
 27#include <linux/kernel.h>
 28#include <linux/list.h>
 29#include <linux/module.h>
 30#include <linux/scatterlist.h>
 31#include <linux/sched.h>
 32#include <linux/slab.h>
 
 
 
 
 
 33
 34#define CRYPTD_MAX_CPU_QLEN 100
 35
 36struct cryptd_cpu_queue {
 37	struct crypto_queue queue;
 38	struct work_struct work;
 39};
 40
 41struct cryptd_queue {
 42	struct cryptd_cpu_queue __percpu *cpu_queue;
 43};
 44
 45struct cryptd_instance_ctx {
 46	struct crypto_spawn spawn;
 47	struct cryptd_queue *queue;
 48};
 49
 
 
 
 
 
 50struct hashd_instance_ctx {
 51	struct crypto_shash_spawn spawn;
 52	struct cryptd_queue *queue;
 53};
 54
 55struct aead_instance_ctx {
 56	struct crypto_aead_spawn aead_spawn;
 57	struct cryptd_queue *queue;
 58};
 59
 60struct cryptd_blkcipher_ctx {
 61	struct crypto_blkcipher *child;
 
 62};
 63
 64struct cryptd_blkcipher_request_ctx {
 65	crypto_completion_t complete;
 66};
 67
 68struct cryptd_hash_ctx {
 
 69	struct crypto_shash *child;
 70};
 71
 72struct cryptd_hash_request_ctx {
 73	crypto_completion_t complete;
 74	struct shash_desc desc;
 75};
 76
 77struct cryptd_aead_ctx {
 
 78	struct crypto_aead *child;
 79};
 80
 81struct cryptd_aead_request_ctx {
 82	crypto_completion_t complete;
 83};
 84
 85static void cryptd_queue_worker(struct work_struct *work);
 86
 87static int cryptd_init_queue(struct cryptd_queue *queue,
 88			     unsigned int max_cpu_qlen)
 89{
 90	int cpu;
 91	struct cryptd_cpu_queue *cpu_queue;
 92
 93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 94	if (!queue->cpu_queue)
 95		return -ENOMEM;
 96	for_each_possible_cpu(cpu) {
 97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
 
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
 
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
 
 
 
 
 
 
 
 
 
 
 
 
 
126	put_cpu();
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/* Only handle one request at a time to avoid hogging crypto
141	 * workqueue. preempt_disable/enable is used to prevent
142	 * being preempted by cryptd_enqueue_request() */
 
 
 
 
143	preempt_disable();
144	backlog = crypto_get_backlog(&cpu_queue->queue);
145	req = crypto_dequeue_request(&cpu_queue->queue);
146	preempt_enable();
 
147
148	if (!req)
149		return;
150
151	if (backlog)
152		backlog->complete(backlog, -EINPROGRESS);
153	req->complete(req, 0);
154
155	if (cpu_queue->queue.qlen)
156		queue_work(kcrypto_wq, &cpu_queue->work);
157}
158
159static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
160{
161	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
162	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
163	return ictx->queue;
164}
165
166static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
167				   const u8 *key, unsigned int keylen)
168{
169	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
170	struct crypto_blkcipher *child = ctx->child;
171	int err;
 
 
 
 
 
 
172
173	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
174	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
175					  CRYPTO_TFM_REQ_MASK);
176	err = crypto_blkcipher_setkey(child, key, keylen);
177	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
178					    CRYPTO_TFM_RES_MASK);
179	return err;
180}
181
182static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
183				   struct crypto_blkcipher *child,
184				   int err,
185				   int (*crypt)(struct blkcipher_desc *desc,
186						struct scatterlist *dst,
187						struct scatterlist *src,
188						unsigned int len))
189{
190	struct cryptd_blkcipher_request_ctx *rctx;
191	struct blkcipher_desc desc;
 
 
 
 
192
193	rctx = ablkcipher_request_ctx(req);
 
 
194
195	if (unlikely(err == -EINPROGRESS))
196		goto out;
197
198	desc.tfm = child;
199	desc.info = req->info;
200	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 
201
202	err = crypt(&desc, req->dst, req->src, req->nbytes);
 
 
 
 
 
203
204	req->base.complete = rctx->complete;
 
 
 
 
 
205
206out:
207	local_bh_disable();
208	rctx->complete(&req->base, err);
209	local_bh_enable();
 
 
 
210}
211
212static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
 
213{
214	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
215	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
216
217	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
218			       crypto_blkcipher_crt(child)->encrypt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219}
220
221static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 
222{
223	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
224	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
 
 
 
 
 
 
 
 
 
225
226	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
227			       crypto_blkcipher_crt(child)->decrypt);
 
 
 
 
 
228}
229
230static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
231				    crypto_completion_t complete)
232{
233	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
234	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
235	struct cryptd_queue *queue;
236
237	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
238	rctx->complete = req->base.complete;
239	req->base.complete = complete;
240
241	return cryptd_enqueue_request(queue, &req->base);
242}
243
244static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
245{
246	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
247}
248
249static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
250{
251	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
252}
253
254static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
255{
256	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
257	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
258	struct crypto_spawn *spawn = &ictx->spawn;
259	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
260	struct crypto_blkcipher *cipher;
261
262	cipher = crypto_spawn_blkcipher(spawn);
263	if (IS_ERR(cipher))
264		return PTR_ERR(cipher);
265
266	ctx->child = cipher;
267	tfm->crt_ablkcipher.reqsize =
268		sizeof(struct cryptd_blkcipher_request_ctx);
269	return 0;
270}
271
272static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
273{
274	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
275
276	crypto_free_blkcipher(ctx->child);
277}
278
279static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
280				   unsigned int tail)
281{
282	char *p;
283	struct crypto_instance *inst;
284	int err;
285
286	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
287	if (!p)
288		return ERR_PTR(-ENOMEM);
289
290	inst = (void *)(p + head);
291
292	err = -ENAMETOOLONG;
293	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
294		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
295		goto out_free_inst;
296
297	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
298
299	inst->alg.cra_priority = alg->cra_priority + 50;
300	inst->alg.cra_blocksize = alg->cra_blocksize;
301	inst->alg.cra_alignmask = alg->cra_alignmask;
302
303out:
304	return p;
305
306out_free_inst:
307	kfree(p);
308	p = ERR_PTR(err);
309	goto out;
310}
311
312static int cryptd_create_blkcipher(struct crypto_template *tmpl,
313				   struct rtattr **tb,
314				   struct cryptd_queue *queue)
 
315{
316	struct cryptd_instance_ctx *ctx;
317	struct crypto_instance *inst;
318	struct crypto_alg *alg;
 
 
319	int err;
320
321	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
322				  CRYPTO_ALG_TYPE_MASK);
323	if (IS_ERR(alg))
324		return PTR_ERR(alg);
325
326	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
327	err = PTR_ERR(inst);
328	if (IS_ERR(inst))
329		goto out_put_alg;
330
331	ctx = crypto_instance_ctx(inst);
 
 
 
 
332	ctx->queue = queue;
333
334	err = crypto_init_spawn(&ctx->spawn, alg, inst,
335				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
336	if (err)
337		goto out_free_inst;
338
339	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
340	inst->alg.cra_type = &crypto_ablkcipher_type;
 
 
341
342	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
343	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
344	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 
 
 
345
346	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
347
348	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 
349
350	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
351	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 
352
353	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
354	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
355	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
356
357	err = crypto_register_instance(tmpl, inst);
358	if (err) {
359		crypto_drop_spawn(&ctx->spawn);
360out_free_inst:
361		kfree(inst);
362	}
363
364out_put_alg:
365	crypto_mod_put(alg);
366	return err;
367}
368
369static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
370{
371	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
372	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
373	struct crypto_shash_spawn *spawn = &ictx->spawn;
374	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
375	struct crypto_shash *hash;
376
377	hash = crypto_spawn_shash(spawn);
378	if (IS_ERR(hash))
379		return PTR_ERR(hash);
380
381	ctx->child = hash;
382	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
383				 sizeof(struct cryptd_hash_request_ctx) +
384				 crypto_shash_descsize(hash));
385	return 0;
386}
387
388static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
389{
390	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
391
392	crypto_free_shash(ctx->child);
393}
394
395static int cryptd_hash_setkey(struct crypto_ahash *parent,
396				   const u8 *key, unsigned int keylen)
397{
398	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
399	struct crypto_shash *child = ctx->child;
400	int err;
401
402	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
403	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
404				      CRYPTO_TFM_REQ_MASK);
405	err = crypto_shash_setkey(child, key, keylen);
406	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
407				       CRYPTO_TFM_RES_MASK);
408	return err;
409}
410
411static int cryptd_hash_enqueue(struct ahash_request *req,
412				crypto_completion_t complete)
413{
414	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
415	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416	struct cryptd_queue *queue =
417		cryptd_get_queue(crypto_ahash_tfm(tfm));
418
419	rctx->complete = req->base.complete;
420	req->base.complete = complete;
421
422	return cryptd_enqueue_request(queue, &req->base);
423}
424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
426{
427	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
428	struct crypto_shash *child = ctx->child;
429	struct ahash_request *req = ahash_request_cast(req_async);
430	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
431	struct shash_desc *desc = &rctx->desc;
432
433	if (unlikely(err == -EINPROGRESS))
434		goto out;
435
436	desc->tfm = child;
437	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
438
439	err = crypto_shash_init(desc);
440
441	req->base.complete = rctx->complete;
442
443out:
444	local_bh_disable();
445	rctx->complete(&req->base, err);
446	local_bh_enable();
447}
448
449static int cryptd_hash_init_enqueue(struct ahash_request *req)
450{
451	return cryptd_hash_enqueue(req, cryptd_hash_init);
452}
453
454static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
455{
456	struct ahash_request *req = ahash_request_cast(req_async);
457	struct cryptd_hash_request_ctx *rctx;
458
459	rctx = ahash_request_ctx(req);
460
461	if (unlikely(err == -EINPROGRESS))
462		goto out;
463
464	err = shash_ahash_update(req, &rctx->desc);
465
466	req->base.complete = rctx->complete;
467
468out:
469	local_bh_disable();
470	rctx->complete(&req->base, err);
471	local_bh_enable();
472}
473
474static int cryptd_hash_update_enqueue(struct ahash_request *req)
475{
476	return cryptd_hash_enqueue(req, cryptd_hash_update);
477}
478
479static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
480{
481	struct ahash_request *req = ahash_request_cast(req_async);
482	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
483
484	if (unlikely(err == -EINPROGRESS))
485		goto out;
486
487	err = crypto_shash_final(&rctx->desc, req->result);
488
489	req->base.complete = rctx->complete;
490
491out:
492	local_bh_disable();
493	rctx->complete(&req->base, err);
494	local_bh_enable();
495}
496
497static int cryptd_hash_final_enqueue(struct ahash_request *req)
498{
499	return cryptd_hash_enqueue(req, cryptd_hash_final);
500}
501
502static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
503{
504	struct ahash_request *req = ahash_request_cast(req_async);
505	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
506
507	if (unlikely(err == -EINPROGRESS))
508		goto out;
509
510	err = shash_ahash_finup(req, &rctx->desc);
511
512	req->base.complete = rctx->complete;
513
514out:
515	local_bh_disable();
516	rctx->complete(&req->base, err);
517	local_bh_enable();
518}
519
520static int cryptd_hash_finup_enqueue(struct ahash_request *req)
521{
522	return cryptd_hash_enqueue(req, cryptd_hash_finup);
523}
524
525static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
526{
527	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
528	struct crypto_shash *child = ctx->child;
529	struct ahash_request *req = ahash_request_cast(req_async);
530	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
531	struct shash_desc *desc = &rctx->desc;
532
533	if (unlikely(err == -EINPROGRESS))
534		goto out;
535
536	desc->tfm = child;
537	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
538
539	err = shash_ahash_digest(req, desc);
540
541	req->base.complete = rctx->complete;
542
543out:
544	local_bh_disable();
545	rctx->complete(&req->base, err);
546	local_bh_enable();
547}
548
549static int cryptd_hash_digest_enqueue(struct ahash_request *req)
550{
551	return cryptd_hash_enqueue(req, cryptd_hash_digest);
552}
553
554static int cryptd_hash_export(struct ahash_request *req, void *out)
555{
556	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
557
558	return crypto_shash_export(&rctx->desc, out);
559}
560
561static int cryptd_hash_import(struct ahash_request *req, const void *in)
562{
563	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 
 
 
 
 
 
 
 
 
 
564
565	return crypto_shash_import(&rctx->desc, in);
 
566}
567
568static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 
569			      struct cryptd_queue *queue)
570{
571	struct hashd_instance_ctx *ctx;
572	struct ahash_instance *inst;
573	struct shash_alg *salg;
574	struct crypto_alg *alg;
 
575	int err;
576
577	salg = shash_attr_alg(tb[1], 0, 0);
578	if (IS_ERR(salg))
579		return PTR_ERR(salg);
580
581	alg = &salg->base;
582	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
583				     sizeof(*ctx));
584	err = PTR_ERR(inst);
585	if (IS_ERR(inst))
586		goto out_put_alg;
587
588	ctx = ahash_instance_ctx(inst);
589	ctx->queue = queue;
590
591	err = crypto_init_shash_spawn(&ctx->spawn, salg,
592				      ahash_crypto_instance(inst));
593	if (err)
594		goto out_free_inst;
 
595
596	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
 
 
597
598	inst->alg.halg.digestsize = salg->digestsize;
 
 
 
 
599	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
600
601	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
602	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
603
604	inst->alg.init   = cryptd_hash_init_enqueue;
605	inst->alg.update = cryptd_hash_update_enqueue;
606	inst->alg.final  = cryptd_hash_final_enqueue;
607	inst->alg.finup  = cryptd_hash_finup_enqueue;
608	inst->alg.export = cryptd_hash_export;
609	inst->alg.import = cryptd_hash_import;
610	inst->alg.setkey = cryptd_hash_setkey;
 
611	inst->alg.digest = cryptd_hash_digest_enqueue;
612
 
 
613	err = ahash_register_instance(tmpl, inst);
614	if (err) {
615		crypto_drop_shash(&ctx->spawn);
616out_free_inst:
617		kfree(inst);
618	}
 
 
 
 
 
 
 
 
619
620out_put_alg:
621	crypto_mod_put(alg);
622	return err;
 
 
 
 
 
 
 
623}
624
625static void cryptd_aead_crypt(struct aead_request *req,
626			struct crypto_aead *child,
627			int err,
628			int (*crypt)(struct aead_request *req))
629{
630	struct cryptd_aead_request_ctx *rctx;
 
 
 
 
 
631	rctx = aead_request_ctx(req);
 
 
 
632
633	if (unlikely(err == -EINPROGRESS))
634		goto out;
635	aead_request_set_tfm(req, child);
636	err = crypt( req );
637	req->base.complete = rctx->complete;
638out:
 
 
 
639	local_bh_disable();
640	rctx->complete(&req->base, err);
641	local_bh_enable();
 
 
 
642}
643
644static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
645{
646	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
647	struct crypto_aead *child = ctx->child;
648	struct aead_request *req;
649
650	req = container_of(areq, struct aead_request, base);
651	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
652}
653
654static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
655{
656	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
657	struct crypto_aead *child = ctx->child;
658	struct aead_request *req;
659
660	req = container_of(areq, struct aead_request, base);
661	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
662}
663
664static int cryptd_aead_enqueue(struct aead_request *req,
665				    crypto_completion_t complete)
666{
667	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
668	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
669	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
670
671	rctx->complete = req->base.complete;
672	req->base.complete = complete;
673	return cryptd_enqueue_request(queue, &req->base);
674}
675
676static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
677{
678	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
679}
680
681static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
682{
683	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
684}
685
686static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
687{
688	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
689	struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
690	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
691	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
692	struct crypto_aead *cipher;
693
694	cipher = crypto_spawn_aead(spawn);
695	if (IS_ERR(cipher))
696		return PTR_ERR(cipher);
697
698	crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
699	ctx->child = cipher;
700	tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
 
 
701	return 0;
702}
703
704static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
705{
706	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
707	crypto_free_aead(ctx->child);
708}
709
 
 
 
 
 
 
 
 
710static int cryptd_create_aead(struct crypto_template *tmpl,
711		              struct rtattr **tb,
 
712			      struct cryptd_queue *queue)
713{
714	struct aead_instance_ctx *ctx;
715	struct crypto_instance *inst;
716	struct crypto_alg *alg;
 
 
717	int err;
718
719	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
720				CRYPTO_ALG_TYPE_MASK);
721        if (IS_ERR(alg))
722		return PTR_ERR(alg);
723
724	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
725	err = PTR_ERR(inst);
726	if (IS_ERR(inst))
727		goto out_put_alg;
728
729	ctx = crypto_instance_ctx(inst);
730	ctx->queue = queue;
731
732	err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
733			CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
734	if (err)
735		goto out_free_inst;
736
737	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
738	inst->alg.cra_type = alg->cra_type;
739	inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
740	inst->alg.cra_init = cryptd_aead_init_tfm;
741	inst->alg.cra_exit = cryptd_aead_exit_tfm;
742	inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
743	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
744	inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
745	inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
746	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
747	inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
748	inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
749	inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
750	inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
751
752	err = crypto_register_instance(tmpl, inst);
 
 
 
 
 
 
 
 
 
753	if (err) {
754		crypto_drop_spawn(&ctx->aead_spawn.base);
755out_free_inst:
756		kfree(inst);
757	}
758out_put_alg:
759	crypto_mod_put(alg);
760	return err;
761}
762
763static struct cryptd_queue queue;
764
765static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
766{
767	struct crypto_attr_type *algt;
768
769	algt = crypto_get_attr_type(tb);
770	if (IS_ERR(algt))
771		return PTR_ERR(algt);
772
773	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
774	case CRYPTO_ALG_TYPE_BLKCIPHER:
775		return cryptd_create_blkcipher(tmpl, tb, &queue);
776	case CRYPTO_ALG_TYPE_DIGEST:
777		return cryptd_create_hash(tmpl, tb, &queue);
778	case CRYPTO_ALG_TYPE_AEAD:
779		return cryptd_create_aead(tmpl, tb, &queue);
780	}
781
782	return -EINVAL;
783}
784
785static void cryptd_free(struct crypto_instance *inst)
786{
787	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
788	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
789	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
790
791	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
792	case CRYPTO_ALG_TYPE_AHASH:
793		crypto_drop_shash(&hctx->spawn);
794		kfree(ahash_instance(inst));
795		return;
796	case CRYPTO_ALG_TYPE_AEAD:
797		crypto_drop_spawn(&aead_ctx->aead_spawn.base);
798		kfree(inst);
799		return;
800	default:
801		crypto_drop_spawn(&ctx->spawn);
802		kfree(inst);
803	}
804}
805
806static struct crypto_template cryptd_tmpl = {
807	.name = "cryptd",
808	.create = cryptd_create,
809	.free = cryptd_free,
810	.module = THIS_MODULE,
811};
812
813struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
814						  u32 type, u32 mask)
815{
816	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
817	struct crypto_tfm *tfm;
 
818
819	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
820		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
821		return ERR_PTR(-EINVAL);
822	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
823	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
824	mask &= ~CRYPTO_ALG_TYPE_MASK;
825	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
826	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
827	if (IS_ERR(tfm))
828		return ERR_CAST(tfm);
829	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
830		crypto_free_tfm(tfm);
 
831		return ERR_PTR(-EINVAL);
832	}
833
834	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
 
 
 
835}
836EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
837
838struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
839{
840	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
841	return ctx->child;
 
842}
843EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
844
845void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
846{
847	crypto_free_ablkcipher(&tfm->base);
 
 
848}
849EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 
 
 
 
 
 
 
 
 
850
851struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
852					u32 type, u32 mask)
853{
854	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
855	struct crypto_ahash *tfm;
856
857	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
858		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
859		return ERR_PTR(-EINVAL);
860	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
861	if (IS_ERR(tfm))
862		return ERR_CAST(tfm);
863	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
864		crypto_free_ahash(tfm);
865		return ERR_PTR(-EINVAL);
866	}
867
 
 
 
868	return __cryptd_ahash_cast(tfm);
869}
870EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
871
872struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
873{
874	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
875
876	return ctx->child;
877}
878EXPORT_SYMBOL_GPL(cryptd_ahash_child);
879
880struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
881{
882	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
883	return &rctx->desc;
884}
885EXPORT_SYMBOL_GPL(cryptd_shash_desc);
886
 
 
 
 
 
 
 
 
887void cryptd_free_ahash(struct cryptd_ahash *tfm)
888{
889	crypto_free_ahash(&tfm->base);
 
 
 
890}
891EXPORT_SYMBOL_GPL(cryptd_free_ahash);
892
893struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
894						  u32 type, u32 mask)
895{
896	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
897	struct crypto_aead *tfm;
898
899	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
900		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
901		return ERR_PTR(-EINVAL);
902	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
903	if (IS_ERR(tfm))
904		return ERR_CAST(tfm);
905	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
906		crypto_free_aead(tfm);
907		return ERR_PTR(-EINVAL);
908	}
 
 
 
 
909	return __cryptd_aead_cast(tfm);
910}
911EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
912
913struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
914{
915	struct cryptd_aead_ctx *ctx;
916	ctx = crypto_aead_ctx(&tfm->base);
917	return ctx->child;
918}
919EXPORT_SYMBOL_GPL(cryptd_aead_child);
920
 
 
 
 
 
 
 
 
921void cryptd_free_aead(struct cryptd_aead *tfm)
922{
923	crypto_free_aead(&tfm->base);
 
 
 
924}
925EXPORT_SYMBOL_GPL(cryptd_free_aead);
926
927static int __init cryptd_init(void)
928{
929	int err;
930
931	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
 
 
 
 
 
932	if (err)
933		return err;
934
935	err = crypto_register_template(&cryptd_tmpl);
936	if (err)
937		cryptd_fini_queue(&queue);
938
 
 
 
 
 
 
939	return err;
940}
941
942static void __exit cryptd_exit(void)
943{
 
944	cryptd_fini_queue(&queue);
945	crypto_unregister_template(&cryptd_tmpl);
946}
947
948subsys_initcall(cryptd_init);
949module_exit(cryptd_exit);
950
951MODULE_LICENSE("GPL");
952MODULE_DESCRIPTION("Software async crypto daemon");