Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Software async crypto daemon.
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 *
   7 * Added AEAD support to cryptd.
   8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   9 *             Adrian Hoban <adrian.hoban@intel.com>
  10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  12 *    Copyright (c) 2010, Intel Corporation.
 
 
 
 
 
 
  13 */
  14
 
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/cryptd.h>
  19#include <linux/refcount.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29
  30static unsigned int cryptd_max_cpu_qlen = 1000;
  31module_param(cryptd_max_cpu_qlen, uint, 0);
  32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  33
  34static struct workqueue_struct *cryptd_wq;
  35
  36struct cryptd_cpu_queue {
  37	struct crypto_queue queue;
  38	struct work_struct work;
  39};
  40
  41struct cryptd_queue {
  42	/*
  43	 * Protected by disabling BH to allow enqueueing from softinterrupt and
  44	 * dequeuing from kworker (cryptd_queue_worker()).
  45	 */
  46	struct cryptd_cpu_queue __percpu *cpu_queue;
  47};
  48
  49struct cryptd_instance_ctx {
  50	struct crypto_spawn spawn;
  51	struct cryptd_queue *queue;
  52};
  53
  54struct skcipherd_instance_ctx {
  55	struct crypto_skcipher_spawn spawn;
  56	struct cryptd_queue *queue;
  57};
  58
  59struct hashd_instance_ctx {
  60	struct crypto_shash_spawn spawn;
  61	struct cryptd_queue *queue;
  62};
  63
  64struct aead_instance_ctx {
  65	struct crypto_aead_spawn aead_spawn;
  66	struct cryptd_queue *queue;
  67};
  68
  69struct cryptd_skcipher_ctx {
  70	refcount_t refcnt;
  71	struct crypto_skcipher *child;
  72};
  73
  74struct cryptd_skcipher_request_ctx {
  75	crypto_completion_t complete;
  76	struct skcipher_request req;
  77};
  78
  79struct cryptd_hash_ctx {
  80	refcount_t refcnt;
  81	struct crypto_shash *child;
  82};
  83
  84struct cryptd_hash_request_ctx {
  85	crypto_completion_t complete;
  86	struct shash_desc desc;
  87};
  88
  89struct cryptd_aead_ctx {
  90	refcount_t refcnt;
  91	struct crypto_aead *child;
  92};
  93
  94struct cryptd_aead_request_ctx {
  95	crypto_completion_t complete;
  96};
  97
  98static void cryptd_queue_worker(struct work_struct *work);
  99
 100static int cryptd_init_queue(struct cryptd_queue *queue,
 101			     unsigned int max_cpu_qlen)
 102{
 103	int cpu;
 104	struct cryptd_cpu_queue *cpu_queue;
 105
 106	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 107	if (!queue->cpu_queue)
 108		return -ENOMEM;
 109	for_each_possible_cpu(cpu) {
 110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 111		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 112		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 113	}
 114	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 115	return 0;
 116}
 117
 118static void cryptd_fini_queue(struct cryptd_queue *queue)
 119{
 120	int cpu;
 121	struct cryptd_cpu_queue *cpu_queue;
 122
 123	for_each_possible_cpu(cpu) {
 124		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 125		BUG_ON(cpu_queue->queue.qlen);
 126	}
 127	free_percpu(queue->cpu_queue);
 128}
 129
 130static int cryptd_enqueue_request(struct cryptd_queue *queue,
 131				  struct crypto_async_request *request)
 132{
 133	int err;
 134	struct cryptd_cpu_queue *cpu_queue;
 135	refcount_t *refcnt;
 136
 137	local_bh_disable();
 138	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 139	err = crypto_enqueue_request(&cpu_queue->queue, request);
 140
 141	refcnt = crypto_tfm_ctx(request->tfm);
 142
 143	if (err == -ENOSPC)
 144		goto out;
 145
 146	queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
 147
 148	if (!refcount_read(refcnt))
 149		goto out;
 150
 151	refcount_inc(refcnt);
 152
 153out:
 154	local_bh_enable();
 155
 156	return err;
 157}
 158
 159/* Called in workqueue context, do one real cryption work (via
 160 * req->complete) and reschedule itself if there are more work to
 161 * do. */
 162static void cryptd_queue_worker(struct work_struct *work)
 163{
 164	struct cryptd_cpu_queue *cpu_queue;
 165	struct crypto_async_request *req, *backlog;
 166
 167	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 168	/*
 169	 * Only handle one request at a time to avoid hogging crypto workqueue.
 
 
 
 170	 */
 171	local_bh_disable();
 
 172	backlog = crypto_get_backlog(&cpu_queue->queue);
 173	req = crypto_dequeue_request(&cpu_queue->queue);
 
 174	local_bh_enable();
 175
 176	if (!req)
 177		return;
 178
 179	if (backlog)
 180		backlog->complete(backlog, -EINPROGRESS);
 181	req->complete(req, 0);
 182
 183	if (cpu_queue->queue.qlen)
 184		queue_work(cryptd_wq, &cpu_queue->work);
 185}
 186
 187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 188{
 189	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 190	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 191	return ictx->queue;
 192}
 193
 194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
 195				 u32 *type, u32 *mask)
 196{
 197	/*
 198	 * cryptd is allowed to wrap internal algorithms, but in that case the
 199	 * resulting cryptd instance will be marked as internal as well.
 200	 */
 201	*type = algt->type & CRYPTO_ALG_INTERNAL;
 202	*mask = algt->mask & CRYPTO_ALG_INTERNAL;
 203
 204	/* No point in cryptd wrapping an algorithm that's already async. */
 205	*mask |= CRYPTO_ALG_ASYNC;
 
 206
 207	*mask |= crypto_algt_inherited_mask(algt);
 
 208}
 209
 210static int cryptd_init_instance(struct crypto_instance *inst,
 211				struct crypto_alg *alg)
 212{
 213	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 214		     "cryptd(%s)",
 215		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 216		return -ENAMETOOLONG;
 217
 218	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 219
 220	inst->alg.cra_priority = alg->cra_priority + 50;
 221	inst->alg.cra_blocksize = alg->cra_blocksize;
 222	inst->alg.cra_alignmask = alg->cra_alignmask;
 223
 224	return 0;
 
 
 
 
 
 
 225}
 226
 227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 228				  const u8 *key, unsigned int keylen)
 
 
 
 
 
 229{
 230	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 231	struct crypto_skcipher *child = ctx->child;
 232
 233	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 234	crypto_skcipher_set_flags(child,
 235				  crypto_skcipher_get_flags(parent) &
 236				  CRYPTO_TFM_REQ_MASK);
 237	return crypto_skcipher_setkey(child, key, keylen);
 238}
 239
 240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 241{
 242	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 243	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 244	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 245	int refcnt = refcount_read(&ctx->refcnt);
 246
 247	local_bh_disable();
 248	rctx->complete(&req->base, err);
 249	local_bh_enable();
 250
 251	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 252		crypto_free_skcipher(tfm);
 253}
 254
 255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 256				    int err)
 257{
 258	struct skcipher_request *req = skcipher_request_cast(base);
 259	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 260	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 261	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 262	struct skcipher_request *subreq = &rctx->req;
 263	struct crypto_skcipher *child = ctx->child;
 264
 265	if (unlikely(err == -EINPROGRESS))
 266		goto out;
 267
 268	skcipher_request_set_tfm(subreq, child);
 269	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 270				      NULL, NULL);
 271	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 272				   req->iv);
 273
 274	err = crypto_skcipher_encrypt(subreq);
 275	skcipher_request_zero(subreq);
 276
 277	req->base.complete = rctx->complete;
 278
 279out:
 280	cryptd_skcipher_complete(req, err);
 
 
 281}
 282
 283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 284				    int err)
 285{
 286	struct skcipher_request *req = skcipher_request_cast(base);
 287	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 288	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 289	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 290	struct skcipher_request *subreq = &rctx->req;
 291	struct crypto_skcipher *child = ctx->child;
 292
 293	if (unlikely(err == -EINPROGRESS))
 294		goto out;
 295
 296	skcipher_request_set_tfm(subreq, child);
 297	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 298				      NULL, NULL);
 299	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 300				   req->iv);
 301
 302	err = crypto_skcipher_decrypt(subreq);
 303	skcipher_request_zero(subreq);
 304
 305	req->base.complete = rctx->complete;
 
 
 
 306
 307out:
 308	cryptd_skcipher_complete(req, err);
 309}
 310
 311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 312				   crypto_completion_t compl)
 313{
 314	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 315	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 316	struct cryptd_queue *queue;
 317
 318	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 319	rctx->complete = req->base.complete;
 320	req->base.complete = compl;
 321
 322	return cryptd_enqueue_request(queue, &req->base);
 323}
 324
 325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 326{
 327	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 328}
 329
 330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 331{
 332	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 333}
 334
 335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 336{
 337	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 338	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 339	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 340	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 341	struct crypto_skcipher *cipher;
 342
 343	cipher = crypto_spawn_skcipher(spawn);
 344	if (IS_ERR(cipher))
 345		return PTR_ERR(cipher);
 346
 347	ctx->child = cipher;
 348	crypto_skcipher_set_reqsize(
 349		tfm, sizeof(struct cryptd_skcipher_request_ctx) +
 350		     crypto_skcipher_reqsize(cipher));
 351	return 0;
 352}
 353
 354static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 355{
 356	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 357
 358	crypto_free_skcipher(ctx->child);
 359}
 360
 361static void cryptd_skcipher_free(struct skcipher_instance *inst)
 
 362{
 363	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 
 
 
 
 
 
 
 
 
 364
 365	crypto_drop_skcipher(&ctx->spawn);
 366	kfree(inst);
 367}
 368
 369static int cryptd_create_skcipher(struct crypto_template *tmpl,
 370				  struct rtattr **tb,
 371				  struct crypto_attr_type *algt,
 372				  struct cryptd_queue *queue)
 373{
 374	struct skcipherd_instance_ctx *ctx;
 375	struct skcipher_instance *inst;
 376	struct skcipher_alg *alg;
 377	u32 type;
 378	u32 mask;
 379	int err;
 380
 381	cryptd_type_and_mask(algt, &type, &mask);
 
 
 382
 383	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 384	if (!inst)
 385		return -ENOMEM;
 
 
 386
 387	ctx = skcipher_instance_ctx(inst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 388	ctx->queue = queue;
 389
 390	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
 391				   crypto_attr_alg_name(tb[1]), type, mask);
 392	if (err)
 393		goto err_free_inst;
 394
 395	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 396	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 397	if (err)
 398		goto err_free_inst;
 
 399
 400	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 401		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 402	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 403	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 404	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 405	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 406
 407	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 408
 409	inst->alg.init = cryptd_skcipher_init_tfm;
 410	inst->alg.exit = cryptd_skcipher_exit_tfm;
 411
 412	inst->alg.setkey = cryptd_skcipher_setkey;
 413	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 414	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 415
 416	inst->free = cryptd_skcipher_free;
 
 
 417
 418	err = skcipher_register_instance(tmpl, inst);
 419	if (err) {
 420err_free_inst:
 421		cryptd_skcipher_free(inst);
 
 422	}
 
 
 
 423	return err;
 424}
 425
 426static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 427{
 428	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 429	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 430	struct crypto_shash_spawn *spawn = &ictx->spawn;
 431	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 432	struct crypto_shash *hash;
 433
 434	hash = crypto_spawn_shash(spawn);
 435	if (IS_ERR(hash))
 436		return PTR_ERR(hash);
 437
 438	ctx->child = hash;
 439	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 440				 sizeof(struct cryptd_hash_request_ctx) +
 441				 crypto_shash_descsize(hash));
 442	return 0;
 443}
 444
 445static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 446{
 447	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 448
 449	crypto_free_shash(ctx->child);
 450}
 451
 452static int cryptd_hash_setkey(struct crypto_ahash *parent,
 453				   const u8 *key, unsigned int keylen)
 454{
 455	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 456	struct crypto_shash *child = ctx->child;
 
 457
 458	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 459	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 460				      CRYPTO_TFM_REQ_MASK);
 461	return crypto_shash_setkey(child, key, keylen);
 
 
 
 462}
 463
 464static int cryptd_hash_enqueue(struct ahash_request *req,
 465				crypto_completion_t compl)
 466{
 467	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 468	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 469	struct cryptd_queue *queue =
 470		cryptd_get_queue(crypto_ahash_tfm(tfm));
 471
 472	rctx->complete = req->base.complete;
 473	req->base.complete = compl;
 474
 475	return cryptd_enqueue_request(queue, &req->base);
 476}
 477
 478static void cryptd_hash_complete(struct ahash_request *req, int err)
 479{
 480	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 481	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 482	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 483	int refcnt = refcount_read(&ctx->refcnt);
 484
 485	local_bh_disable();
 486	rctx->complete(&req->base, err);
 487	local_bh_enable();
 488
 489	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 490		crypto_free_ahash(tfm);
 491}
 492
 493static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 494{
 495	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 496	struct crypto_shash *child = ctx->child;
 497	struct ahash_request *req = ahash_request_cast(req_async);
 498	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 499	struct shash_desc *desc = &rctx->desc;
 500
 501	if (unlikely(err == -EINPROGRESS))
 502		goto out;
 503
 504	desc->tfm = child;
 
 505
 506	err = crypto_shash_init(desc);
 507
 508	req->base.complete = rctx->complete;
 509
 510out:
 511	cryptd_hash_complete(req, err);
 
 
 512}
 513
 514static int cryptd_hash_init_enqueue(struct ahash_request *req)
 515{
 516	return cryptd_hash_enqueue(req, cryptd_hash_init);
 517}
 518
 519static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 520{
 521	struct ahash_request *req = ahash_request_cast(req_async);
 522	struct cryptd_hash_request_ctx *rctx;
 523
 524	rctx = ahash_request_ctx(req);
 525
 526	if (unlikely(err == -EINPROGRESS))
 527		goto out;
 528
 529	err = shash_ahash_update(req, &rctx->desc);
 530
 531	req->base.complete = rctx->complete;
 532
 533out:
 534	cryptd_hash_complete(req, err);
 
 
 535}
 536
 537static int cryptd_hash_update_enqueue(struct ahash_request *req)
 538{
 539	return cryptd_hash_enqueue(req, cryptd_hash_update);
 540}
 541
 542static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 543{
 544	struct ahash_request *req = ahash_request_cast(req_async);
 545	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 546
 547	if (unlikely(err == -EINPROGRESS))
 548		goto out;
 549
 550	err = crypto_shash_final(&rctx->desc, req->result);
 551
 552	req->base.complete = rctx->complete;
 553
 554out:
 555	cryptd_hash_complete(req, err);
 
 
 556}
 557
 558static int cryptd_hash_final_enqueue(struct ahash_request *req)
 559{
 560	return cryptd_hash_enqueue(req, cryptd_hash_final);
 561}
 562
 563static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 564{
 565	struct ahash_request *req = ahash_request_cast(req_async);
 566	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 567
 568	if (unlikely(err == -EINPROGRESS))
 569		goto out;
 570
 571	err = shash_ahash_finup(req, &rctx->desc);
 572
 573	req->base.complete = rctx->complete;
 574
 575out:
 576	cryptd_hash_complete(req, err);
 
 
 577}
 578
 579static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 580{
 581	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 582}
 583
 584static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 585{
 586	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 587	struct crypto_shash *child = ctx->child;
 588	struct ahash_request *req = ahash_request_cast(req_async);
 589	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 590	struct shash_desc *desc = &rctx->desc;
 591
 592	if (unlikely(err == -EINPROGRESS))
 593		goto out;
 594
 595	desc->tfm = child;
 
 596
 597	err = shash_ahash_digest(req, desc);
 598
 599	req->base.complete = rctx->complete;
 600
 601out:
 602	cryptd_hash_complete(req, err);
 
 
 603}
 604
 605static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 606{
 607	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 608}
 609
 610static int cryptd_hash_export(struct ahash_request *req, void *out)
 611{
 612	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 613
 614	return crypto_shash_export(&rctx->desc, out);
 615}
 616
 617static int cryptd_hash_import(struct ahash_request *req, const void *in)
 618{
 619	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 620	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 621	struct shash_desc *desc = cryptd_shash_desc(req);
 622
 623	desc->tfm = ctx->child;
 624
 625	return crypto_shash_import(desc, in);
 626}
 627
 628static void cryptd_hash_free(struct ahash_instance *inst)
 629{
 630	struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
 631
 632	crypto_drop_shash(&ctx->spawn);
 633	kfree(inst);
 634}
 635
 636static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 637			      struct crypto_attr_type *algt,
 638			      struct cryptd_queue *queue)
 639{
 640	struct hashd_instance_ctx *ctx;
 641	struct ahash_instance *inst;
 642	struct shash_alg *alg;
 643	u32 type;
 644	u32 mask;
 
 645	int err;
 646
 647	cryptd_type_and_mask(algt, &type, &mask);
 648
 649	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 650	if (!inst)
 651		return -ENOMEM;
 
 
 
 
 
 
 
 652
 653	ctx = ahash_instance_ctx(inst);
 654	ctx->queue = queue;
 655
 656	err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
 657				crypto_attr_alg_name(tb[1]), type, mask);
 658	if (err)
 659		goto err_free_inst;
 660	alg = crypto_spawn_shash_alg(&ctx->spawn);
 661
 662	err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
 663	if (err)
 664		goto err_free_inst;
 
 665
 666	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 667		(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
 668					CRYPTO_ALG_OPTIONAL_KEY));
 669	inst->alg.halg.digestsize = alg->digestsize;
 670	inst->alg.halg.statesize = alg->statesize;
 671	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 672
 673	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 674	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 675
 676	inst->alg.init   = cryptd_hash_init_enqueue;
 677	inst->alg.update = cryptd_hash_update_enqueue;
 678	inst->alg.final  = cryptd_hash_final_enqueue;
 679	inst->alg.finup  = cryptd_hash_finup_enqueue;
 680	inst->alg.export = cryptd_hash_export;
 681	inst->alg.import = cryptd_hash_import;
 682	if (crypto_shash_alg_has_setkey(alg))
 683		inst->alg.setkey = cryptd_hash_setkey;
 684	inst->alg.digest = cryptd_hash_digest_enqueue;
 685
 686	inst->free = cryptd_hash_free;
 687
 688	err = ahash_register_instance(tmpl, inst);
 689	if (err) {
 690err_free_inst:
 691		cryptd_hash_free(inst);
 
 692	}
 
 
 
 693	return err;
 694}
 695
 696static int cryptd_aead_setkey(struct crypto_aead *parent,
 697			      const u8 *key, unsigned int keylen)
 698{
 699	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 700	struct crypto_aead *child = ctx->child;
 701
 702	return crypto_aead_setkey(child, key, keylen);
 703}
 704
 705static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 706				   unsigned int authsize)
 707{
 708	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 709	struct crypto_aead *child = ctx->child;
 710
 711	return crypto_aead_setauthsize(child, authsize);
 712}
 713
 714static void cryptd_aead_crypt(struct aead_request *req,
 715			struct crypto_aead *child,
 716			int err,
 717			int (*crypt)(struct aead_request *req))
 718{
 719	struct cryptd_aead_request_ctx *rctx;
 720	struct cryptd_aead_ctx *ctx;
 721	crypto_completion_t compl;
 722	struct crypto_aead *tfm;
 723	int refcnt;
 724
 725	rctx = aead_request_ctx(req);
 726	compl = rctx->complete;
 727
 728	tfm = crypto_aead_reqtfm(req);
 729
 730	if (unlikely(err == -EINPROGRESS))
 731		goto out;
 732	aead_request_set_tfm(req, child);
 733	err = crypt( req );
 734
 735out:
 736	ctx = crypto_aead_ctx(tfm);
 737	refcnt = refcount_read(&ctx->refcnt);
 738
 739	local_bh_disable();
 740	compl(&req->base, err);
 741	local_bh_enable();
 742
 743	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 744		crypto_free_aead(tfm);
 745}
 746
 747static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 748{
 749	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 750	struct crypto_aead *child = ctx->child;
 751	struct aead_request *req;
 752
 753	req = container_of(areq, struct aead_request, base);
 754	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 755}
 756
 757static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 758{
 759	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 760	struct crypto_aead *child = ctx->child;
 761	struct aead_request *req;
 762
 763	req = container_of(areq, struct aead_request, base);
 764	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 765}
 766
 767static int cryptd_aead_enqueue(struct aead_request *req,
 768				    crypto_completion_t compl)
 769{
 770	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 771	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 772	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 773
 774	rctx->complete = req->base.complete;
 775	req->base.complete = compl;
 776	return cryptd_enqueue_request(queue, &req->base);
 777}
 778
 779static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 780{
 781	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 782}
 783
 784static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 785{
 786	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 787}
 788
 789static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 790{
 791	struct aead_instance *inst = aead_alg_instance(tfm);
 792	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 793	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 794	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 795	struct crypto_aead *cipher;
 796
 797	cipher = crypto_spawn_aead(spawn);
 798	if (IS_ERR(cipher))
 799		return PTR_ERR(cipher);
 800
 801	ctx->child = cipher;
 802	crypto_aead_set_reqsize(
 803		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
 804			 crypto_aead_reqsize(cipher)));
 805	return 0;
 806}
 807
 808static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 809{
 810	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 811	crypto_free_aead(ctx->child);
 812}
 813
 814static void cryptd_aead_free(struct aead_instance *inst)
 815{
 816	struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
 817
 818	crypto_drop_aead(&ctx->aead_spawn);
 819	kfree(inst);
 820}
 821
 822static int cryptd_create_aead(struct crypto_template *tmpl,
 823		              struct rtattr **tb,
 824			      struct crypto_attr_type *algt,
 825			      struct cryptd_queue *queue)
 826{
 827	struct aead_instance_ctx *ctx;
 828	struct aead_instance *inst;
 829	struct aead_alg *alg;
 830	u32 type;
 831	u32 mask;
 
 832	int err;
 833
 834	cryptd_type_and_mask(algt, &type, &mask);
 
 
 
 
 835
 836	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 837	if (!inst)
 838		return -ENOMEM;
 839
 840	ctx = aead_instance_ctx(inst);
 841	ctx->queue = queue;
 842
 843	err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
 844			       crypto_attr_alg_name(tb[1]), type, mask);
 845	if (err)
 846		goto err_free_inst;
 847
 848	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 849	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 850	if (err)
 851		goto err_free_inst;
 852
 853	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 854		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 855	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 856
 857	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 858	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 859
 860	inst->alg.init = cryptd_aead_init_tfm;
 861	inst->alg.exit = cryptd_aead_exit_tfm;
 862	inst->alg.setkey = cryptd_aead_setkey;
 863	inst->alg.setauthsize = cryptd_aead_setauthsize;
 864	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 865	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 866
 867	inst->free = cryptd_aead_free;
 868
 869	err = aead_register_instance(tmpl, inst);
 870	if (err) {
 871err_free_inst:
 872		cryptd_aead_free(inst);
 
 
 873	}
 874	return err;
 875}
 876
 877static struct cryptd_queue queue;
 878
 879static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 880{
 881	struct crypto_attr_type *algt;
 882
 883	algt = crypto_get_attr_type(tb);
 884	if (IS_ERR(algt))
 885		return PTR_ERR(algt);
 886
 887	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 888	case CRYPTO_ALG_TYPE_SKCIPHER:
 889		return cryptd_create_skcipher(tmpl, tb, algt, &queue);
 890	case CRYPTO_ALG_TYPE_HASH:
 891		return cryptd_create_hash(tmpl, tb, algt, &queue);
 892	case CRYPTO_ALG_TYPE_AEAD:
 893		return cryptd_create_aead(tmpl, tb, algt, &queue);
 894	}
 895
 896	return -EINVAL;
 897}
 898
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899static struct crypto_template cryptd_tmpl = {
 900	.name = "cryptd",
 901	.create = cryptd_create,
 
 902	.module = THIS_MODULE,
 903};
 904
 905struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 906					      u32 type, u32 mask)
 907{
 908	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 909	struct cryptd_skcipher_ctx *ctx;
 910	struct crypto_skcipher *tfm;
 911
 912	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 913		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 914		return ERR_PTR(-EINVAL);
 915
 916	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
 
 
 917	if (IS_ERR(tfm))
 918		return ERR_CAST(tfm);
 919
 920	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 921		crypto_free_skcipher(tfm);
 922		return ERR_PTR(-EINVAL);
 923	}
 924
 925	ctx = crypto_skcipher_ctx(tfm);
 926	refcount_set(&ctx->refcnt, 1);
 927
 928	return container_of(tfm, struct cryptd_skcipher, base);
 929}
 930EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
 931
 932struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
 933{
 934	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 935
 936	return ctx->child;
 937}
 938EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
 939
 940bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
 941{
 942	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 943
 944	return refcount_read(&ctx->refcnt) - 1;
 945}
 946EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
 947
 948void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
 949{
 950	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 951
 952	if (refcount_dec_and_test(&ctx->refcnt))
 953		crypto_free_skcipher(&tfm->base);
 954}
 955EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
 956
 957struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
 958					u32 type, u32 mask)
 959{
 960	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 961	struct cryptd_hash_ctx *ctx;
 962	struct crypto_ahash *tfm;
 963
 964	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 965		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 966		return ERR_PTR(-EINVAL);
 967	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
 968	if (IS_ERR(tfm))
 969		return ERR_CAST(tfm);
 970	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 971		crypto_free_ahash(tfm);
 972		return ERR_PTR(-EINVAL);
 973	}
 974
 975	ctx = crypto_ahash_ctx(tfm);
 976	refcount_set(&ctx->refcnt, 1);
 977
 978	return __cryptd_ahash_cast(tfm);
 979}
 980EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
 981
 982struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
 983{
 984	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 985
 986	return ctx->child;
 987}
 988EXPORT_SYMBOL_GPL(cryptd_ahash_child);
 989
 990struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
 991{
 992	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 993	return &rctx->desc;
 994}
 995EXPORT_SYMBOL_GPL(cryptd_shash_desc);
 996
 997bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
 998{
 999	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1000
1001	return refcount_read(&ctx->refcnt) - 1;
1002}
1003EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1004
1005void cryptd_free_ahash(struct cryptd_ahash *tfm)
1006{
1007	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1008
1009	if (refcount_dec_and_test(&ctx->refcnt))
1010		crypto_free_ahash(&tfm->base);
1011}
1012EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1013
1014struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1015						  u32 type, u32 mask)
1016{
1017	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1018	struct cryptd_aead_ctx *ctx;
1019	struct crypto_aead *tfm;
1020
1021	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1022		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1023		return ERR_PTR(-EINVAL);
1024	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1025	if (IS_ERR(tfm))
1026		return ERR_CAST(tfm);
1027	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1028		crypto_free_aead(tfm);
1029		return ERR_PTR(-EINVAL);
1030	}
1031
1032	ctx = crypto_aead_ctx(tfm);
1033	refcount_set(&ctx->refcnt, 1);
1034
1035	return __cryptd_aead_cast(tfm);
1036}
1037EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1038
1039struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1040{
1041	struct cryptd_aead_ctx *ctx;
1042	ctx = crypto_aead_ctx(&tfm->base);
1043	return ctx->child;
1044}
1045EXPORT_SYMBOL_GPL(cryptd_aead_child);
1046
1047bool cryptd_aead_queued(struct cryptd_aead *tfm)
1048{
1049	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1050
1051	return refcount_read(&ctx->refcnt) - 1;
1052}
1053EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1054
1055void cryptd_free_aead(struct cryptd_aead *tfm)
1056{
1057	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1058
1059	if (refcount_dec_and_test(&ctx->refcnt))
1060		crypto_free_aead(&tfm->base);
1061}
1062EXPORT_SYMBOL_GPL(cryptd_free_aead);
1063
1064static int __init cryptd_init(void)
1065{
1066	int err;
1067
1068	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1069				    1);
1070	if (!cryptd_wq)
1071		return -ENOMEM;
1072
1073	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1074	if (err)
1075		goto err_destroy_wq;
1076
1077	err = crypto_register_template(&cryptd_tmpl);
1078	if (err)
1079		goto err_fini_queue;
1080
1081	return 0;
1082
1083err_fini_queue:
1084	cryptd_fini_queue(&queue);
1085err_destroy_wq:
1086	destroy_workqueue(cryptd_wq);
1087	return err;
1088}
1089
1090static void __exit cryptd_exit(void)
1091{
1092	destroy_workqueue(cryptd_wq);
1093	cryptd_fini_queue(&queue);
1094	crypto_unregister_template(&cryptd_tmpl);
1095}
1096
1097subsys_initcall(cryptd_init);
1098module_exit(cryptd_exit);
1099
1100MODULE_LICENSE("GPL");
1101MODULE_DESCRIPTION("Software async crypto daemon");
1102MODULE_ALIAS_CRYPTO("cryptd");
v4.6
 
   1/*
   2 * Software async crypto daemon.
   3 *
   4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   5 *
   6 * Added AEAD support to cryptd.
   7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   8 *             Adrian Hoban <adrian.hoban@intel.com>
   9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  11 *    Copyright (c) 2010, Intel Corporation.
  12 *
  13 * This program is free software; you can redistribute it and/or modify it
  14 * under the terms of the GNU General Public License as published by the Free
  15 * Software Foundation; either version 2 of the License, or (at your option)
  16 * any later version.
  17 *
  18 */
  19
  20#include <crypto/algapi.h>
  21#include <crypto/internal/hash.h>
  22#include <crypto/internal/aead.h>
 
  23#include <crypto/cryptd.h>
  24#include <crypto/crypto_wq.h>
  25#include <linux/err.h>
  26#include <linux/init.h>
  27#include <linux/kernel.h>
  28#include <linux/list.h>
  29#include <linux/module.h>
  30#include <linux/scatterlist.h>
  31#include <linux/sched.h>
  32#include <linux/slab.h>
 
 
 
 
 
  33
  34#define CRYPTD_MAX_CPU_QLEN 100
  35
  36struct cryptd_cpu_queue {
  37	struct crypto_queue queue;
  38	struct work_struct work;
  39};
  40
  41struct cryptd_queue {
 
 
 
 
  42	struct cryptd_cpu_queue __percpu *cpu_queue;
  43};
  44
  45struct cryptd_instance_ctx {
  46	struct crypto_spawn spawn;
  47	struct cryptd_queue *queue;
  48};
  49
 
 
 
 
 
  50struct hashd_instance_ctx {
  51	struct crypto_shash_spawn spawn;
  52	struct cryptd_queue *queue;
  53};
  54
  55struct aead_instance_ctx {
  56	struct crypto_aead_spawn aead_spawn;
  57	struct cryptd_queue *queue;
  58};
  59
  60struct cryptd_blkcipher_ctx {
  61	struct crypto_blkcipher *child;
 
  62};
  63
  64struct cryptd_blkcipher_request_ctx {
  65	crypto_completion_t complete;
 
  66};
  67
  68struct cryptd_hash_ctx {
 
  69	struct crypto_shash *child;
  70};
  71
  72struct cryptd_hash_request_ctx {
  73	crypto_completion_t complete;
  74	struct shash_desc desc;
  75};
  76
  77struct cryptd_aead_ctx {
 
  78	struct crypto_aead *child;
  79};
  80
  81struct cryptd_aead_request_ctx {
  82	crypto_completion_t complete;
  83};
  84
  85static void cryptd_queue_worker(struct work_struct *work);
  86
  87static int cryptd_init_queue(struct cryptd_queue *queue,
  88			     unsigned int max_cpu_qlen)
  89{
  90	int cpu;
  91	struct cryptd_cpu_queue *cpu_queue;
  92
  93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
  94	if (!queue->cpu_queue)
  95		return -ENOMEM;
  96	for_each_possible_cpu(cpu) {
  97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 100	}
 
 101	return 0;
 102}
 103
 104static void cryptd_fini_queue(struct cryptd_queue *queue)
 105{
 106	int cpu;
 107	struct cryptd_cpu_queue *cpu_queue;
 108
 109	for_each_possible_cpu(cpu) {
 110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 111		BUG_ON(cpu_queue->queue.qlen);
 112	}
 113	free_percpu(queue->cpu_queue);
 114}
 115
 116static int cryptd_enqueue_request(struct cryptd_queue *queue,
 117				  struct crypto_async_request *request)
 118{
 119	int cpu, err;
 120	struct cryptd_cpu_queue *cpu_queue;
 
 121
 122	cpu = get_cpu();
 123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 124	err = crypto_enqueue_request(&cpu_queue->queue, request);
 125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
 126	put_cpu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 127
 128	return err;
 129}
 130
 131/* Called in workqueue context, do one real cryption work (via
 132 * req->complete) and reschedule itself if there are more work to
 133 * do. */
 134static void cryptd_queue_worker(struct work_struct *work)
 135{
 136	struct cryptd_cpu_queue *cpu_queue;
 137	struct crypto_async_request *req, *backlog;
 138
 139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 140	/*
 141	 * Only handle one request at a time to avoid hogging crypto workqueue.
 142	 * preempt_disable/enable is used to prevent being preempted by
 143	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 144	 * cryptd_enqueue_request() being accessed from software interrupts.
 145	 */
 146	local_bh_disable();
 147	preempt_disable();
 148	backlog = crypto_get_backlog(&cpu_queue->queue);
 149	req = crypto_dequeue_request(&cpu_queue->queue);
 150	preempt_enable();
 151	local_bh_enable();
 152
 153	if (!req)
 154		return;
 155
 156	if (backlog)
 157		backlog->complete(backlog, -EINPROGRESS);
 158	req->complete(req, 0);
 159
 160	if (cpu_queue->queue.qlen)
 161		queue_work(kcrypto_wq, &cpu_queue->work);
 162}
 163
 164static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 165{
 166	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 167	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 168	return ictx->queue;
 169}
 170
 171static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 172					 u32 *mask)
 173{
 174	struct crypto_attr_type *algt;
 
 
 
 
 
 175
 176	algt = crypto_get_attr_type(tb);
 177	if (IS_ERR(algt))
 178		return;
 179
 180	*type |= algt->type & CRYPTO_ALG_INTERNAL;
 181	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 182}
 183
 184static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
 185				   const u8 *key, unsigned int keylen)
 186{
 187	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
 188	struct crypto_blkcipher *child = ctx->child;
 189	int err;
 
 
 
 
 
 
 
 190
 191	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 192	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
 193					  CRYPTO_TFM_REQ_MASK);
 194	err = crypto_blkcipher_setkey(child, key, keylen);
 195	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
 196					    CRYPTO_TFM_RES_MASK);
 197	return err;
 198}
 199
 200static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
 201				   struct crypto_blkcipher *child,
 202				   int err,
 203				   int (*crypt)(struct blkcipher_desc *desc,
 204						struct scatterlist *dst,
 205						struct scatterlist *src,
 206						unsigned int len))
 207{
 208	struct cryptd_blkcipher_request_ctx *rctx;
 209	struct blkcipher_desc desc;
 210
 211	rctx = ablkcipher_request_ctx(req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212
 213	if (unlikely(err == -EINPROGRESS))
 214		goto out;
 215
 216	desc.tfm = child;
 217	desc.info = req->info;
 218	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
 
 219
 220	err = crypt(&desc, req->dst, req->src, req->nbytes);
 
 221
 222	req->base.complete = rctx->complete;
 223
 224out:
 225	local_bh_disable();
 226	rctx->complete(&req->base, err);
 227	local_bh_enable();
 228}
 229
 230static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
 
 231{
 232	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 233	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
 
 
 
 234
 235	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 236			       crypto_blkcipher_crt(child)->encrypt);
 237}
 
 
 
 
 
 238
 239static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 240{
 241	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 242	struct crypto_blkcipher *child = ctx->child;
 243
 244	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 245			       crypto_blkcipher_crt(child)->decrypt);
 246}
 247
 248static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 249				    crypto_completion_t compl)
 250{
 251	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
 252	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 253	struct cryptd_queue *queue;
 254
 255	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
 256	rctx->complete = req->base.complete;
 257	req->base.complete = compl;
 258
 259	return cryptd_enqueue_request(queue, &req->base);
 260}
 261
 262static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
 263{
 264	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
 265}
 266
 267static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
 268{
 269	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
 270}
 271
 272static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
 273{
 274	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 275	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 276	struct crypto_spawn *spawn = &ictx->spawn;
 277	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 278	struct crypto_blkcipher *cipher;
 279
 280	cipher = crypto_spawn_blkcipher(spawn);
 281	if (IS_ERR(cipher))
 282		return PTR_ERR(cipher);
 283
 284	ctx->child = cipher;
 285	tfm->crt_ablkcipher.reqsize =
 286		sizeof(struct cryptd_blkcipher_request_ctx);
 
 287	return 0;
 288}
 289
 290static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
 291{
 292	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 293
 294	crypto_free_blkcipher(ctx->child);
 295}
 296
 297static int cryptd_init_instance(struct crypto_instance *inst,
 298				struct crypto_alg *alg)
 299{
 300	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 301		     "cryptd(%s)",
 302		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 303		return -ENAMETOOLONG;
 304
 305	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 306
 307	inst->alg.cra_priority = alg->cra_priority + 50;
 308	inst->alg.cra_blocksize = alg->cra_blocksize;
 309	inst->alg.cra_alignmask = alg->cra_alignmask;
 310
 311	return 0;
 
 312}
 313
 314static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 315				   unsigned int tail)
 
 
 316{
 317	char *p;
 318	struct crypto_instance *inst;
 
 
 
 319	int err;
 320
 321	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
 322	if (!p)
 323		return ERR_PTR(-ENOMEM);
 324
 325	inst = (void *)(p + head);
 326
 327	err = cryptd_init_instance(inst, alg);
 328	if (err)
 329		goto out_free_inst;
 330
 331out:
 332	return p;
 333
 334out_free_inst:
 335	kfree(p);
 336	p = ERR_PTR(err);
 337	goto out;
 338}
 339
 340static int cryptd_create_blkcipher(struct crypto_template *tmpl,
 341				   struct rtattr **tb,
 342				   struct cryptd_queue *queue)
 343{
 344	struct cryptd_instance_ctx *ctx;
 345	struct crypto_instance *inst;
 346	struct crypto_alg *alg;
 347	u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
 348	u32 mask = CRYPTO_ALG_TYPE_MASK;
 349	int err;
 350
 351	cryptd_check_internal(tb, &type, &mask);
 352
 353	alg = crypto_get_attr_alg(tb, type, mask);
 354	if (IS_ERR(alg))
 355		return PTR_ERR(alg);
 356
 357	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
 358	err = PTR_ERR(inst);
 359	if (IS_ERR(inst))
 360		goto out_put_alg;
 361
 362	ctx = crypto_instance_ctx(inst);
 363	ctx->queue = queue;
 364
 365	err = crypto_init_spawn(&ctx->spawn, alg, inst,
 366				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
 367	if (err)
 368		goto out_free_inst;
 369
 370	type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 371	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
 372		type |= CRYPTO_ALG_INTERNAL;
 373	inst->alg.cra_flags = type;
 374	inst->alg.cra_type = &crypto_ablkcipher_type;
 375
 376	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
 377	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
 378	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 
 
 
 379
 380	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
 381
 382	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 
 383
 384	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
 385	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 
 386
 387	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
 388	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
 389	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
 390
 391	err = crypto_register_instance(tmpl, inst);
 392	if (err) {
 393		crypto_drop_spawn(&ctx->spawn);
 394out_free_inst:
 395		kfree(inst);
 396	}
 397
 398out_put_alg:
 399	crypto_mod_put(alg);
 400	return err;
 401}
 402
 403static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 404{
 405	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 406	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 407	struct crypto_shash_spawn *spawn = &ictx->spawn;
 408	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 409	struct crypto_shash *hash;
 410
 411	hash = crypto_spawn_shash(spawn);
 412	if (IS_ERR(hash))
 413		return PTR_ERR(hash);
 414
 415	ctx->child = hash;
 416	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 417				 sizeof(struct cryptd_hash_request_ctx) +
 418				 crypto_shash_descsize(hash));
 419	return 0;
 420}
 421
 422static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 423{
 424	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 425
 426	crypto_free_shash(ctx->child);
 427}
 428
 429static int cryptd_hash_setkey(struct crypto_ahash *parent,
 430				   const u8 *key, unsigned int keylen)
 431{
 432	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 433	struct crypto_shash *child = ctx->child;
 434	int err;
 435
 436	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 437	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 438				      CRYPTO_TFM_REQ_MASK);
 439	err = crypto_shash_setkey(child, key, keylen);
 440	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
 441				       CRYPTO_TFM_RES_MASK);
 442	return err;
 443}
 444
 445static int cryptd_hash_enqueue(struct ahash_request *req,
 446				crypto_completion_t compl)
 447{
 448	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 449	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 450	struct cryptd_queue *queue =
 451		cryptd_get_queue(crypto_ahash_tfm(tfm));
 452
 453	rctx->complete = req->base.complete;
 454	req->base.complete = compl;
 455
 456	return cryptd_enqueue_request(queue, &req->base);
 457}
 458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 459static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 460{
 461	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 462	struct crypto_shash *child = ctx->child;
 463	struct ahash_request *req = ahash_request_cast(req_async);
 464	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 465	struct shash_desc *desc = &rctx->desc;
 466
 467	if (unlikely(err == -EINPROGRESS))
 468		goto out;
 469
 470	desc->tfm = child;
 471	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 472
 473	err = crypto_shash_init(desc);
 474
 475	req->base.complete = rctx->complete;
 476
 477out:
 478	local_bh_disable();
 479	rctx->complete(&req->base, err);
 480	local_bh_enable();
 481}
 482
 483static int cryptd_hash_init_enqueue(struct ahash_request *req)
 484{
 485	return cryptd_hash_enqueue(req, cryptd_hash_init);
 486}
 487
 488static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 489{
 490	struct ahash_request *req = ahash_request_cast(req_async);
 491	struct cryptd_hash_request_ctx *rctx;
 492
 493	rctx = ahash_request_ctx(req);
 494
 495	if (unlikely(err == -EINPROGRESS))
 496		goto out;
 497
 498	err = shash_ahash_update(req, &rctx->desc);
 499
 500	req->base.complete = rctx->complete;
 501
 502out:
 503	local_bh_disable();
 504	rctx->complete(&req->base, err);
 505	local_bh_enable();
 506}
 507
 508static int cryptd_hash_update_enqueue(struct ahash_request *req)
 509{
 510	return cryptd_hash_enqueue(req, cryptd_hash_update);
 511}
 512
 513static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 514{
 515	struct ahash_request *req = ahash_request_cast(req_async);
 516	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 517
 518	if (unlikely(err == -EINPROGRESS))
 519		goto out;
 520
 521	err = crypto_shash_final(&rctx->desc, req->result);
 522
 523	req->base.complete = rctx->complete;
 524
 525out:
 526	local_bh_disable();
 527	rctx->complete(&req->base, err);
 528	local_bh_enable();
 529}
 530
 531static int cryptd_hash_final_enqueue(struct ahash_request *req)
 532{
 533	return cryptd_hash_enqueue(req, cryptd_hash_final);
 534}
 535
 536static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 537{
 538	struct ahash_request *req = ahash_request_cast(req_async);
 539	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 540
 541	if (unlikely(err == -EINPROGRESS))
 542		goto out;
 543
 544	err = shash_ahash_finup(req, &rctx->desc);
 545
 546	req->base.complete = rctx->complete;
 547
 548out:
 549	local_bh_disable();
 550	rctx->complete(&req->base, err);
 551	local_bh_enable();
 552}
 553
 554static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 555{
 556	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 557}
 558
 559static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 560{
 561	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 562	struct crypto_shash *child = ctx->child;
 563	struct ahash_request *req = ahash_request_cast(req_async);
 564	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 565	struct shash_desc *desc = &rctx->desc;
 566
 567	if (unlikely(err == -EINPROGRESS))
 568		goto out;
 569
 570	desc->tfm = child;
 571	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 572
 573	err = shash_ahash_digest(req, desc);
 574
 575	req->base.complete = rctx->complete;
 576
 577out:
 578	local_bh_disable();
 579	rctx->complete(&req->base, err);
 580	local_bh_enable();
 581}
 582
 583static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 584{
 585	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 586}
 587
 588static int cryptd_hash_export(struct ahash_request *req, void *out)
 589{
 590	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 591
 592	return crypto_shash_export(&rctx->desc, out);
 593}
 594
 595static int cryptd_hash_import(struct ahash_request *req, const void *in)
 596{
 597	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 
 
 
 
 
 
 
 
 
 
 598
 599	return crypto_shash_import(&rctx->desc, in);
 
 600}
 601
 602static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 
 603			      struct cryptd_queue *queue)
 604{
 605	struct hashd_instance_ctx *ctx;
 606	struct ahash_instance *inst;
 607	struct shash_alg *salg;
 608	struct crypto_alg *alg;
 609	u32 type = 0;
 610	u32 mask = 0;
 611	int err;
 612
 613	cryptd_check_internal(tb, &type, &mask);
 614
 615	salg = shash_attr_alg(tb[1], type, mask);
 616	if (IS_ERR(salg))
 617		return PTR_ERR(salg);
 618
 619	alg = &salg->base;
 620	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
 621				     sizeof(*ctx));
 622	err = PTR_ERR(inst);
 623	if (IS_ERR(inst))
 624		goto out_put_alg;
 625
 626	ctx = ahash_instance_ctx(inst);
 627	ctx->queue = queue;
 628
 629	err = crypto_init_shash_spawn(&ctx->spawn, salg,
 630				      ahash_crypto_instance(inst));
 631	if (err)
 632		goto out_free_inst;
 
 633
 634	type = CRYPTO_ALG_ASYNC;
 635	if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
 636		type |= CRYPTO_ALG_INTERNAL;
 637	inst->alg.halg.base.cra_flags = type;
 638
 639	inst->alg.halg.digestsize = salg->digestsize;
 640	inst->alg.halg.statesize = salg->statesize;
 
 
 
 641	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 642
 643	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 644	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 645
 646	inst->alg.init   = cryptd_hash_init_enqueue;
 647	inst->alg.update = cryptd_hash_update_enqueue;
 648	inst->alg.final  = cryptd_hash_final_enqueue;
 649	inst->alg.finup  = cryptd_hash_finup_enqueue;
 650	inst->alg.export = cryptd_hash_export;
 651	inst->alg.import = cryptd_hash_import;
 652	inst->alg.setkey = cryptd_hash_setkey;
 
 653	inst->alg.digest = cryptd_hash_digest_enqueue;
 654
 
 
 655	err = ahash_register_instance(tmpl, inst);
 656	if (err) {
 657		crypto_drop_shash(&ctx->spawn);
 658out_free_inst:
 659		kfree(inst);
 660	}
 661
 662out_put_alg:
 663	crypto_mod_put(alg);
 664	return err;
 665}
 666
 667static int cryptd_aead_setkey(struct crypto_aead *parent,
 668			      const u8 *key, unsigned int keylen)
 669{
 670	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 671	struct crypto_aead *child = ctx->child;
 672
 673	return crypto_aead_setkey(child, key, keylen);
 674}
 675
 676static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 677				   unsigned int authsize)
 678{
 679	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 680	struct crypto_aead *child = ctx->child;
 681
 682	return crypto_aead_setauthsize(child, authsize);
 683}
 684
 685static void cryptd_aead_crypt(struct aead_request *req,
 686			struct crypto_aead *child,
 687			int err,
 688			int (*crypt)(struct aead_request *req))
 689{
 690	struct cryptd_aead_request_ctx *rctx;
 
 691	crypto_completion_t compl;
 
 
 692
 693	rctx = aead_request_ctx(req);
 694	compl = rctx->complete;
 695
 
 
 696	if (unlikely(err == -EINPROGRESS))
 697		goto out;
 698	aead_request_set_tfm(req, child);
 699	err = crypt( req );
 
 700out:
 
 
 
 701	local_bh_disable();
 702	compl(&req->base, err);
 703	local_bh_enable();
 
 
 
 704}
 705
 706static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 707{
 708	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 709	struct crypto_aead *child = ctx->child;
 710	struct aead_request *req;
 711
 712	req = container_of(areq, struct aead_request, base);
 713	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 714}
 715
 716static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 717{
 718	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 719	struct crypto_aead *child = ctx->child;
 720	struct aead_request *req;
 721
 722	req = container_of(areq, struct aead_request, base);
 723	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 724}
 725
 726static int cryptd_aead_enqueue(struct aead_request *req,
 727				    crypto_completion_t compl)
 728{
 729	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 730	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 731	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 732
 733	rctx->complete = req->base.complete;
 734	req->base.complete = compl;
 735	return cryptd_enqueue_request(queue, &req->base);
 736}
 737
 738static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 739{
 740	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 741}
 742
 743static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 744{
 745	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 746}
 747
 748static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 749{
 750	struct aead_instance *inst = aead_alg_instance(tfm);
 751	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 752	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 753	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 754	struct crypto_aead *cipher;
 755
 756	cipher = crypto_spawn_aead(spawn);
 757	if (IS_ERR(cipher))
 758		return PTR_ERR(cipher);
 759
 760	ctx->child = cipher;
 761	crypto_aead_set_reqsize(
 762		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
 763			 crypto_aead_reqsize(cipher)));
 764	return 0;
 765}
 766
 767static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 768{
 769	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 770	crypto_free_aead(ctx->child);
 771}
 772
 
 
 
 
 
 
 
 
 773static int cryptd_create_aead(struct crypto_template *tmpl,
 774		              struct rtattr **tb,
 
 775			      struct cryptd_queue *queue)
 776{
 777	struct aead_instance_ctx *ctx;
 778	struct aead_instance *inst;
 779	struct aead_alg *alg;
 780	const char *name;
 781	u32 type = 0;
 782	u32 mask = CRYPTO_ALG_ASYNC;
 783	int err;
 784
 785	cryptd_check_internal(tb, &type, &mask);
 786
 787	name = crypto_attr_alg_name(tb[1]);
 788	if (IS_ERR(name))
 789		return PTR_ERR(name);
 790
 791	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 792	if (!inst)
 793		return -ENOMEM;
 794
 795	ctx = aead_instance_ctx(inst);
 796	ctx->queue = queue;
 797
 798	crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
 799	err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
 800	if (err)
 801		goto out_free_inst;
 802
 803	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 804	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 805	if (err)
 806		goto out_drop_aead;
 807
 808	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 809				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 810	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 811
 812	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 813	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 814
 815	inst->alg.init = cryptd_aead_init_tfm;
 816	inst->alg.exit = cryptd_aead_exit_tfm;
 817	inst->alg.setkey = cryptd_aead_setkey;
 818	inst->alg.setauthsize = cryptd_aead_setauthsize;
 819	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 820	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 821
 
 
 822	err = aead_register_instance(tmpl, inst);
 823	if (err) {
 824out_drop_aead:
 825		crypto_drop_aead(&ctx->aead_spawn);
 826out_free_inst:
 827		kfree(inst);
 828	}
 829	return err;
 830}
 831
 832static struct cryptd_queue queue;
 833
 834static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 835{
 836	struct crypto_attr_type *algt;
 837
 838	algt = crypto_get_attr_type(tb);
 839	if (IS_ERR(algt))
 840		return PTR_ERR(algt);
 841
 842	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 843	case CRYPTO_ALG_TYPE_BLKCIPHER:
 844		return cryptd_create_blkcipher(tmpl, tb, &queue);
 845	case CRYPTO_ALG_TYPE_DIGEST:
 846		return cryptd_create_hash(tmpl, tb, &queue);
 847	case CRYPTO_ALG_TYPE_AEAD:
 848		return cryptd_create_aead(tmpl, tb, &queue);
 849	}
 850
 851	return -EINVAL;
 852}
 853
 854static void cryptd_free(struct crypto_instance *inst)
 855{
 856	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
 857	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
 858	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
 859
 860	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
 861	case CRYPTO_ALG_TYPE_AHASH:
 862		crypto_drop_shash(&hctx->spawn);
 863		kfree(ahash_instance(inst));
 864		return;
 865	case CRYPTO_ALG_TYPE_AEAD:
 866		crypto_drop_aead(&aead_ctx->aead_spawn);
 867		kfree(aead_instance(inst));
 868		return;
 869	default:
 870		crypto_drop_spawn(&ctx->spawn);
 871		kfree(inst);
 872	}
 873}
 874
 875static struct crypto_template cryptd_tmpl = {
 876	.name = "cryptd",
 877	.create = cryptd_create,
 878	.free = cryptd_free,
 879	.module = THIS_MODULE,
 880};
 881
 882struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
 883						  u32 type, u32 mask)
 884{
 885	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 886	struct crypto_tfm *tfm;
 
 887
 888	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 889		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 890		return ERR_PTR(-EINVAL);
 891	type = crypto_skcipher_type(type);
 892	mask &= ~CRYPTO_ALG_TYPE_MASK;
 893	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
 894	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
 895	if (IS_ERR(tfm))
 896		return ERR_CAST(tfm);
 897	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
 898		crypto_free_tfm(tfm);
 
 899		return ERR_PTR(-EINVAL);
 900	}
 901
 902	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
 
 
 
 903}
 904EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
 905
 906struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
 907{
 908	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
 
 909	return ctx->child;
 910}
 911EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
 
 
 
 
 
 
 
 
 912
 913void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
 914{
 915	crypto_free_ablkcipher(&tfm->base);
 
 
 
 916}
 917EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 918
 919struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
 920					u32 type, u32 mask)
 921{
 922	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
 923	struct crypto_ahash *tfm;
 924
 925	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 926		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 927		return ERR_PTR(-EINVAL);
 928	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
 929	if (IS_ERR(tfm))
 930		return ERR_CAST(tfm);
 931	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 932		crypto_free_ahash(tfm);
 933		return ERR_PTR(-EINVAL);
 934	}
 935
 
 
 
 936	return __cryptd_ahash_cast(tfm);
 937}
 938EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
 939
 940struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
 941{
 942	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 943
 944	return ctx->child;
 945}
 946EXPORT_SYMBOL_GPL(cryptd_ahash_child);
 947
 948struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
 949{
 950	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 951	return &rctx->desc;
 952}
 953EXPORT_SYMBOL_GPL(cryptd_shash_desc);
 954
 
 
 
 
 
 
 
 
 955void cryptd_free_ahash(struct cryptd_ahash *tfm)
 956{
 957	crypto_free_ahash(&tfm->base);
 
 
 
 958}
 959EXPORT_SYMBOL_GPL(cryptd_free_ahash);
 960
 961struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
 962						  u32 type, u32 mask)
 963{
 964	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
 965	struct crypto_aead *tfm;
 966
 967	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 968		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 969		return ERR_PTR(-EINVAL);
 970	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
 971	if (IS_ERR(tfm))
 972		return ERR_CAST(tfm);
 973	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 974		crypto_free_aead(tfm);
 975		return ERR_PTR(-EINVAL);
 976	}
 
 
 
 
 977	return __cryptd_aead_cast(tfm);
 978}
 979EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
 980
 981struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
 982{
 983	struct cryptd_aead_ctx *ctx;
 984	ctx = crypto_aead_ctx(&tfm->base);
 985	return ctx->child;
 986}
 987EXPORT_SYMBOL_GPL(cryptd_aead_child);
 988
 
 
 
 
 
 
 
 
 989void cryptd_free_aead(struct cryptd_aead *tfm)
 990{
 991	crypto_free_aead(&tfm->base);
 
 
 
 992}
 993EXPORT_SYMBOL_GPL(cryptd_free_aead);
 994
 995static int __init cryptd_init(void)
 996{
 997	int err;
 998
 999	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
 
 
 
 
 
1000	if (err)
1001		return err;
1002
1003	err = crypto_register_template(&cryptd_tmpl);
1004	if (err)
1005		cryptd_fini_queue(&queue);
 
 
1006
 
 
 
 
1007	return err;
1008}
1009
1010static void __exit cryptd_exit(void)
1011{
 
1012	cryptd_fini_queue(&queue);
1013	crypto_unregister_template(&cryptd_tmpl);
1014}
1015
1016subsys_initcall(cryptd_init);
1017module_exit(cryptd_exit);
1018
1019MODULE_LICENSE("GPL");
1020MODULE_DESCRIPTION("Software async crypto daemon");
1021MODULE_ALIAS_CRYPTO("cryptd");