Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Software async crypto daemon.
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 *
   7 * Added AEAD support to cryptd.
   8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   9 *             Adrian Hoban <adrian.hoban@intel.com>
  10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  12 *    Copyright (c) 2010, Intel Corporation.
 
 
 
 
 
 
  13 */
  14
 
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/cryptd.h>
  19#include <linux/refcount.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29
  30static unsigned int cryptd_max_cpu_qlen = 1000;
  31module_param(cryptd_max_cpu_qlen, uint, 0);
  32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  33
  34static struct workqueue_struct *cryptd_wq;
  35
  36struct cryptd_cpu_queue {
  37	struct crypto_queue queue;
  38	struct work_struct work;
  39};
  40
  41struct cryptd_queue {
  42	/*
  43	 * Protected by disabling BH to allow enqueueing from softinterrupt and
  44	 * dequeuing from kworker (cryptd_queue_worker()).
  45	 */
  46	struct cryptd_cpu_queue __percpu *cpu_queue;
  47};
  48
  49struct cryptd_instance_ctx {
  50	struct crypto_spawn spawn;
  51	struct cryptd_queue *queue;
  52};
  53
  54struct skcipherd_instance_ctx {
  55	struct crypto_skcipher_spawn spawn;
  56	struct cryptd_queue *queue;
  57};
  58
  59struct hashd_instance_ctx {
  60	struct crypto_shash_spawn spawn;
  61	struct cryptd_queue *queue;
  62};
  63
  64struct aead_instance_ctx {
  65	struct crypto_aead_spawn aead_spawn;
  66	struct cryptd_queue *queue;
  67};
  68
  69struct cryptd_skcipher_ctx {
  70	refcount_t refcnt;
  71	struct crypto_skcipher *child;
  72};
  73
  74struct cryptd_skcipher_request_ctx {
  75	struct skcipher_request req;
  76};
  77
  78struct cryptd_hash_ctx {
  79	refcount_t refcnt;
  80	struct crypto_shash *child;
  81};
  82
  83struct cryptd_hash_request_ctx {
  84	crypto_completion_t complete;
  85	void *data;
  86	struct shash_desc desc;
  87};
  88
  89struct cryptd_aead_ctx {
  90	refcount_t refcnt;
  91	struct crypto_aead *child;
  92};
  93
  94struct cryptd_aead_request_ctx {
  95	struct aead_request req;
  96};
  97
  98static void cryptd_queue_worker(struct work_struct *work);
  99
 100static int cryptd_init_queue(struct cryptd_queue *queue,
 101			     unsigned int max_cpu_qlen)
 102{
 103	int cpu;
 104	struct cryptd_cpu_queue *cpu_queue;
 105
 106	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 107	if (!queue->cpu_queue)
 108		return -ENOMEM;
 109	for_each_possible_cpu(cpu) {
 110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 111		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 112		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 113	}
 114	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 115	return 0;
 116}
 117
 118static void cryptd_fini_queue(struct cryptd_queue *queue)
 119{
 120	int cpu;
 121	struct cryptd_cpu_queue *cpu_queue;
 122
 123	for_each_possible_cpu(cpu) {
 124		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 125		BUG_ON(cpu_queue->queue.qlen);
 126	}
 127	free_percpu(queue->cpu_queue);
 128}
 129
 130static int cryptd_enqueue_request(struct cryptd_queue *queue,
 131				  struct crypto_async_request *request)
 132{
 133	int err;
 134	struct cryptd_cpu_queue *cpu_queue;
 135	refcount_t *refcnt;
 136
 137	local_bh_disable();
 138	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 139	err = crypto_enqueue_request(&cpu_queue->queue, request);
 140
 141	refcnt = crypto_tfm_ctx(request->tfm);
 142
 143	if (err == -ENOSPC)
 144		goto out;
 145
 146	queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
 147
 148	if (!refcount_read(refcnt))
 149		goto out;
 150
 151	refcount_inc(refcnt);
 152
 153out:
 154	local_bh_enable();
 155
 156	return err;
 157}
 158
 159/* Called in workqueue context, do one real cryption work (via
 160 * req->complete) and reschedule itself if there are more work to
 161 * do. */
 162static void cryptd_queue_worker(struct work_struct *work)
 163{
 164	struct cryptd_cpu_queue *cpu_queue;
 165	struct crypto_async_request *req, *backlog;
 166
 167	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 168	/*
 169	 * Only handle one request at a time to avoid hogging crypto workqueue.
 170	 */
 171	local_bh_disable();
 172	backlog = crypto_get_backlog(&cpu_queue->queue);
 173	req = crypto_dequeue_request(&cpu_queue->queue);
 174	local_bh_enable();
 175
 176	if (!req)
 177		return;
 178
 179	if (backlog)
 180		crypto_request_complete(backlog, -EINPROGRESS);
 181	crypto_request_complete(req, 0);
 182
 183	if (cpu_queue->queue.qlen)
 184		queue_work(cryptd_wq, &cpu_queue->work);
 185}
 186
 187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 188{
 189	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 190	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 191	return ictx->queue;
 192}
 193
 194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
 195				 u32 *type, u32 *mask)
 196{
 197	/*
 198	 * cryptd is allowed to wrap internal algorithms, but in that case the
 199	 * resulting cryptd instance will be marked as internal as well.
 200	 */
 201	*type = algt->type & CRYPTO_ALG_INTERNAL;
 202	*mask = algt->mask & CRYPTO_ALG_INTERNAL;
 203
 204	/* No point in cryptd wrapping an algorithm that's already async. */
 205	*mask |= CRYPTO_ALG_ASYNC;
 206
 207	*mask |= crypto_algt_inherited_mask(algt);
 208}
 209
 210static int cryptd_init_instance(struct crypto_instance *inst,
 211				struct crypto_alg *alg)
 212{
 213	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 214		     "cryptd(%s)",
 215		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 216		return -ENAMETOOLONG;
 217
 218	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 219
 220	inst->alg.cra_priority = alg->cra_priority + 50;
 221	inst->alg.cra_blocksize = alg->cra_blocksize;
 222	inst->alg.cra_alignmask = alg->cra_alignmask;
 223
 224	return 0;
 225}
 226
 227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 228				  const u8 *key, unsigned int keylen)
 229{
 230	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 231	struct crypto_skcipher *child = ctx->child;
 
 232
 233	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 234	crypto_skcipher_set_flags(child,
 235				  crypto_skcipher_get_flags(parent) &
 236				  CRYPTO_TFM_REQ_MASK);
 237	return crypto_skcipher_setkey(child, key, keylen);
 
 
 238}
 239
 240static struct skcipher_request *cryptd_skcipher_prepare(
 241	struct skcipher_request *req, int err)
 
 
 
 
 
 242{
 243	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 244	struct skcipher_request *subreq = &rctx->req;
 245	struct cryptd_skcipher_ctx *ctx;
 246	struct crypto_skcipher *child;
 247
 248	req->base.complete = subreq->base.complete;
 249	req->base.data = subreq->base.data;
 250
 251	if (unlikely(err == -EINPROGRESS))
 252		return NULL;
 253
 254	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 255	child = ctx->child;
 
 256
 257	skcipher_request_set_tfm(subreq, child);
 258	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 259				      NULL, NULL);
 260	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 261				   req->iv);
 262
 263	return subreq;
 264}
 265
 266static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
 267				     crypto_completion_t complete)
 268{
 269	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 270	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 271	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 272	struct skcipher_request *subreq = &rctx->req;
 273	int refcnt = refcount_read(&ctx->refcnt);
 274
 
 275	local_bh_disable();
 276	skcipher_request_complete(req, err);
 277	local_bh_enable();
 278
 279	if (unlikely(err == -EINPROGRESS)) {
 280		subreq->base.complete = req->base.complete;
 281		subreq->base.data = req->base.data;
 282		req->base.complete = complete;
 283		req->base.data = req;
 284	} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
 285		crypto_free_skcipher(tfm);
 286}
 287
 288static void cryptd_skcipher_encrypt(void *data, int err)
 289{
 290	struct skcipher_request *req = data;
 291	struct skcipher_request *subreq;
 292
 293	subreq = cryptd_skcipher_prepare(req, err);
 294	if (likely(subreq))
 295		err = crypto_skcipher_encrypt(subreq);
 296
 297	cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
 
 298}
 299
 300static void cryptd_skcipher_decrypt(void *data, int err)
 301{
 302	struct skcipher_request *req = data;
 303	struct skcipher_request *subreq;
 304
 305	subreq = cryptd_skcipher_prepare(req, err);
 306	if (likely(subreq))
 307		err = crypto_skcipher_decrypt(subreq);
 308
 309	cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
 
 310}
 311
 312static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 313				   crypto_completion_t compl)
 314{
 315	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 316	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 317	struct skcipher_request *subreq = &rctx->req;
 318	struct cryptd_queue *queue;
 319
 320	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 321	subreq->base.complete = req->base.complete;
 322	subreq->base.data = req->base.data;
 323	req->base.complete = compl;
 324	req->base.data = req;
 325
 326	return cryptd_enqueue_request(queue, &req->base);
 327}
 328
 329static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 330{
 331	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 332}
 333
 334static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 335{
 336	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 337}
 338
 339static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 340{
 341	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 342	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 343	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 344	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 345	struct crypto_skcipher *cipher;
 346
 347	cipher = crypto_spawn_skcipher(spawn);
 348	if (IS_ERR(cipher))
 349		return PTR_ERR(cipher);
 350
 351	ctx->child = cipher;
 352	crypto_skcipher_set_reqsize(
 353		tfm, sizeof(struct cryptd_skcipher_request_ctx) +
 354		     crypto_skcipher_reqsize(cipher));
 355	return 0;
 356}
 357
 358static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 359{
 360	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 361
 362	crypto_free_skcipher(ctx->child);
 363}
 364
 365static void cryptd_skcipher_free(struct skcipher_instance *inst)
 
 366{
 367	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368
 369	crypto_drop_skcipher(&ctx->spawn);
 370	kfree(inst);
 
 
 
 
 
 371}
 372
 373static int cryptd_create_skcipher(struct crypto_template *tmpl,
 374				  struct rtattr **tb,
 375				  struct crypto_attr_type *algt,
 376				  struct cryptd_queue *queue)
 377{
 378	struct skcipherd_instance_ctx *ctx;
 379	struct skcipher_instance *inst;
 380	struct skcipher_alg_common *alg;
 381	u32 type;
 382	u32 mask;
 383	int err;
 384
 385	cryptd_type_and_mask(algt, &type, &mask);
 386
 387	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 388	if (!inst)
 389		return -ENOMEM;
 
 
 
 
 390
 391	ctx = skcipher_instance_ctx(inst);
 392	ctx->queue = queue;
 393
 394	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
 395				   crypto_attr_alg_name(tb[1]), type, mask);
 396	if (err)
 397		goto err_free_inst;
 398
 399	alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
 400	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 401	if (err)
 402		goto err_free_inst;
 403
 404	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 405		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 406	inst->alg.ivsize = alg->ivsize;
 407	inst->alg.chunksize = alg->chunksize;
 408	inst->alg.min_keysize = alg->min_keysize;
 409	inst->alg.max_keysize = alg->max_keysize;
 410
 411	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 412
 413	inst->alg.init = cryptd_skcipher_init_tfm;
 414	inst->alg.exit = cryptd_skcipher_exit_tfm;
 415
 416	inst->alg.setkey = cryptd_skcipher_setkey;
 417	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 418	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 419
 420	inst->free = cryptd_skcipher_free;
 
 
 421
 422	err = skcipher_register_instance(tmpl, inst);
 423	if (err) {
 424err_free_inst:
 425		cryptd_skcipher_free(inst);
 
 426	}
 
 
 
 427	return err;
 428}
 429
 430static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
 431{
 432	struct ahash_instance *inst = ahash_alg_instance(tfm);
 433	struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
 434	struct crypto_shash_spawn *spawn = &ictx->spawn;
 435	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 436	struct crypto_shash *hash;
 437
 438	hash = crypto_spawn_shash(spawn);
 439	if (IS_ERR(hash))
 440		return PTR_ERR(hash);
 441
 442	ctx->child = hash;
 443	crypto_ahash_set_reqsize(tfm,
 444				 sizeof(struct cryptd_hash_request_ctx) +
 445				 crypto_shash_descsize(hash));
 446	return 0;
 447}
 448
 449static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
 450				 struct crypto_ahash *tfm)
 451{
 452	struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
 453	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 454	struct crypto_shash *hash;
 455
 456	hash = crypto_clone_shash(ctx->child);
 457	if (IS_ERR(hash))
 458		return PTR_ERR(hash);
 459
 460	nctx->child = hash;
 461	return 0;
 462}
 463
 464static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
 465{
 466	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 467
 468	crypto_free_shash(ctx->child);
 469}
 470
 471static int cryptd_hash_setkey(struct crypto_ahash *parent,
 472				   const u8 *key, unsigned int keylen)
 473{
 474	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 475	struct crypto_shash *child = ctx->child;
 
 476
 477	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 478	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 479				      CRYPTO_TFM_REQ_MASK);
 480	return crypto_shash_setkey(child, key, keylen);
 
 
 
 481}
 482
 483static int cryptd_hash_enqueue(struct ahash_request *req,
 484				crypto_completion_t compl)
 485{
 486	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 487	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 488	struct cryptd_queue *queue =
 489		cryptd_get_queue(crypto_ahash_tfm(tfm));
 490
 491	rctx->complete = req->base.complete;
 492	rctx->data = req->base.data;
 493	req->base.complete = compl;
 494	req->base.data = req;
 495
 496	return cryptd_enqueue_request(queue, &req->base);
 497}
 498
 499static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
 500					      int err)
 501{
 
 
 
 502	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 503
 504	req->base.complete = rctx->complete;
 505	req->base.data = rctx->data;
 506
 507	if (unlikely(err == -EINPROGRESS))
 508		return NULL;
 509
 510	return &rctx->desc;
 511}
 512
 513static void cryptd_hash_complete(struct ahash_request *req, int err,
 514				 crypto_completion_t complete)
 515{
 516	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 517	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 518	int refcnt = refcount_read(&ctx->refcnt);
 519
 520	local_bh_disable();
 521	ahash_request_complete(req, err);
 522	local_bh_enable();
 523
 524	if (err == -EINPROGRESS) {
 525		req->base.complete = complete;
 526		req->base.data = req;
 527	} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
 528		crypto_free_ahash(tfm);
 529}
 530
 531static void cryptd_hash_init(void *data, int err)
 532{
 533	struct ahash_request *req = data;
 534	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 535	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 536	struct crypto_shash *child = ctx->child;
 537	struct shash_desc *desc;
 538
 539	desc = cryptd_hash_prepare(req, err);
 540	if (unlikely(!desc))
 541		goto out;
 542
 543	desc->tfm = child;
 
 544
 545	err = crypto_shash_init(desc);
 546
 
 
 547out:
 548	cryptd_hash_complete(req, err, cryptd_hash_init);
 
 
 549}
 550
 551static int cryptd_hash_init_enqueue(struct ahash_request *req)
 552{
 553	return cryptd_hash_enqueue(req, cryptd_hash_init);
 554}
 555
 556static void cryptd_hash_update(void *data, int err)
 557{
 558	struct ahash_request *req = data;
 559	struct shash_desc *desc;
 
 
 
 
 
 560
 561	desc = cryptd_hash_prepare(req, err);
 562	if (likely(desc))
 563		err = shash_ahash_update(req, desc);
 564
 565	cryptd_hash_complete(req, err, cryptd_hash_update);
 
 
 
 566}
 567
 568static int cryptd_hash_update_enqueue(struct ahash_request *req)
 569{
 570	return cryptd_hash_enqueue(req, cryptd_hash_update);
 571}
 572
 573static void cryptd_hash_final(void *data, int err)
 574{
 575	struct ahash_request *req = data;
 576	struct shash_desc *desc;
 
 
 
 577
 578	desc = cryptd_hash_prepare(req, err);
 579	if (likely(desc))
 580		err = crypto_shash_final(desc, req->result);
 581
 582	cryptd_hash_complete(req, err, cryptd_hash_final);
 
 
 
 
 
 583}
 584
 585static int cryptd_hash_final_enqueue(struct ahash_request *req)
 586{
 587	return cryptd_hash_enqueue(req, cryptd_hash_final);
 588}
 589
 590static void cryptd_hash_finup(void *data, int err)
 591{
 592	struct ahash_request *req = data;
 593	struct shash_desc *desc;
 594
 595	desc = cryptd_hash_prepare(req, err);
 596	if (likely(desc))
 597		err = shash_ahash_finup(req, desc);
 
 
 
 598
 599	cryptd_hash_complete(req, err, cryptd_hash_finup);
 
 
 
 600}
 601
 602static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 603{
 604	return cryptd_hash_enqueue(req, cryptd_hash_finup);
 605}
 606
 607static void cryptd_hash_digest(void *data, int err)
 608{
 609	struct ahash_request *req = data;
 610	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 611	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 612	struct crypto_shash *child = ctx->child;
 613	struct shash_desc *desc;
 
 
 614
 615	desc = cryptd_hash_prepare(req, err);
 616	if (unlikely(!desc))
 617		goto out;
 618
 619	desc->tfm = child;
 
 620
 621	err = shash_ahash_digest(req, desc);
 622
 
 
 623out:
 624	cryptd_hash_complete(req, err, cryptd_hash_digest);
 
 
 625}
 626
 627static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 628{
 629	return cryptd_hash_enqueue(req, cryptd_hash_digest);
 630}
 631
 632static int cryptd_hash_export(struct ahash_request *req, void *out)
 633{
 634	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 635
 636	return crypto_shash_export(&rctx->desc, out);
 637}
 638
 639static int cryptd_hash_import(struct ahash_request *req, const void *in)
 640{
 641	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 642	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 643	struct shash_desc *desc = cryptd_shash_desc(req);
 644
 645	desc->tfm = ctx->child;
 646
 647	return crypto_shash_import(desc, in);
 648}
 649
 650static void cryptd_hash_free(struct ahash_instance *inst)
 651{
 652	struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
 653
 654	crypto_drop_shash(&ctx->spawn);
 655	kfree(inst);
 656}
 657
 658static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 659			      struct crypto_attr_type *algt,
 660			      struct cryptd_queue *queue)
 661{
 662	struct hashd_instance_ctx *ctx;
 663	struct ahash_instance *inst;
 664	struct shash_alg *alg;
 665	u32 type;
 666	u32 mask;
 667	int err;
 668
 669	cryptd_type_and_mask(algt, &type, &mask);
 670
 671	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 672	if (!inst)
 673		return -ENOMEM;
 
 
 
 
 
 674
 675	ctx = ahash_instance_ctx(inst);
 676	ctx->queue = queue;
 677
 678	err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
 679				crypto_attr_alg_name(tb[1]), type, mask);
 680	if (err)
 681		goto err_free_inst;
 682	alg = crypto_spawn_shash_alg(&ctx->spawn);
 683
 684	err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
 685	if (err)
 686		goto err_free_inst;
 687
 688	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 689		(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
 690					CRYPTO_ALG_OPTIONAL_KEY));
 691	inst->alg.halg.digestsize = alg->digestsize;
 692	inst->alg.halg.statesize = alg->statesize;
 693	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 694
 695	inst->alg.init_tfm = cryptd_hash_init_tfm;
 696	inst->alg.clone_tfm = cryptd_hash_clone_tfm;
 697	inst->alg.exit_tfm = cryptd_hash_exit_tfm;
 698
 699	inst->alg.init   = cryptd_hash_init_enqueue;
 700	inst->alg.update = cryptd_hash_update_enqueue;
 701	inst->alg.final  = cryptd_hash_final_enqueue;
 702	inst->alg.finup  = cryptd_hash_finup_enqueue;
 703	inst->alg.export = cryptd_hash_export;
 704	inst->alg.import = cryptd_hash_import;
 705	if (crypto_shash_alg_has_setkey(alg))
 706		inst->alg.setkey = cryptd_hash_setkey;
 707	inst->alg.digest = cryptd_hash_digest_enqueue;
 708
 709	inst->free = cryptd_hash_free;
 710
 711	err = ahash_register_instance(tmpl, inst);
 712	if (err) {
 713err_free_inst:
 714		cryptd_hash_free(inst);
 
 715	}
 716	return err;
 717}
 718
 719static int cryptd_aead_setkey(struct crypto_aead *parent,
 720			      const u8 *key, unsigned int keylen)
 721{
 722	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 723	struct crypto_aead *child = ctx->child;
 724
 725	return crypto_aead_setkey(child, key, keylen);
 726}
 727
 728static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 729				   unsigned int authsize)
 730{
 731	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 732	struct crypto_aead *child = ctx->child;
 733
 734	return crypto_aead_setauthsize(child, authsize);
 735}
 736
 737static void cryptd_aead_crypt(struct aead_request *req,
 738			      struct crypto_aead *child, int err,
 739			      int (*crypt)(struct aead_request *req),
 740			      crypto_completion_t compl)
 741{
 742	struct cryptd_aead_request_ctx *rctx;
 743	struct aead_request *subreq;
 744	struct cryptd_aead_ctx *ctx;
 745	struct crypto_aead *tfm;
 746	int refcnt;
 747
 748	rctx = aead_request_ctx(req);
 749	subreq = &rctx->req;
 750	req->base.complete = subreq->base.complete;
 751	req->base.data = subreq->base.data;
 752
 753	tfm = crypto_aead_reqtfm(req);
 754
 755	if (unlikely(err == -EINPROGRESS))
 756		goto out;
 757
 758	aead_request_set_tfm(subreq, child);
 759	aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 760				  NULL, NULL);
 761	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 762			       req->iv);
 763	aead_request_set_ad(subreq, req->assoclen);
 764
 765	err = crypt(subreq);
 766
 767out:
 768	ctx = crypto_aead_ctx(tfm);
 769	refcnt = refcount_read(&ctx->refcnt);
 770
 771	local_bh_disable();
 772	aead_request_complete(req, err);
 773	local_bh_enable();
 774
 775	if (err == -EINPROGRESS) {
 776		subreq->base.complete = req->base.complete;
 777		subreq->base.data = req->base.data;
 778		req->base.complete = compl;
 779		req->base.data = req;
 780	} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
 781		crypto_free_aead(tfm);
 782}
 783
 784static void cryptd_aead_encrypt(void *data, int err)
 785{
 786	struct aead_request *req = data;
 787	struct cryptd_aead_ctx *ctx;
 788	struct crypto_aead *child;
 789
 790	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 791	child = ctx->child;
 792	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
 793			  cryptd_aead_encrypt);
 794}
 795
 796static void cryptd_aead_decrypt(void *data, int err)
 797{
 798	struct aead_request *req = data;
 799	struct cryptd_aead_ctx *ctx;
 800	struct crypto_aead *child;
 801
 802	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 803	child = ctx->child;
 804	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
 805			  cryptd_aead_decrypt);
 806}
 807
 808static int cryptd_aead_enqueue(struct aead_request *req,
 809				    crypto_completion_t compl)
 810{
 811	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 812	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 813	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 814	struct aead_request *subreq = &rctx->req;
 815
 816	subreq->base.complete = req->base.complete;
 817	subreq->base.data = req->base.data;
 818	req->base.complete = compl;
 819	req->base.data = req;
 820	return cryptd_enqueue_request(queue, &req->base);
 821}
 822
 823static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 824{
 825	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 826}
 827
 828static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 829{
 830	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 831}
 832
 833static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 834{
 835	struct aead_instance *inst = aead_alg_instance(tfm);
 836	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 837	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 838	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 839	struct crypto_aead *cipher;
 840
 841	cipher = crypto_spawn_aead(spawn);
 842	if (IS_ERR(cipher))
 843		return PTR_ERR(cipher);
 844
 
 845	ctx->child = cipher;
 846	crypto_aead_set_reqsize(
 847		tfm, sizeof(struct cryptd_aead_request_ctx) +
 848		     crypto_aead_reqsize(cipher));
 849	return 0;
 850}
 851
 852static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 853{
 854	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 855	crypto_free_aead(ctx->child);
 856}
 857
 858static void cryptd_aead_free(struct aead_instance *inst)
 859{
 860	struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
 861
 862	crypto_drop_aead(&ctx->aead_spawn);
 863	kfree(inst);
 864}
 865
 866static int cryptd_create_aead(struct crypto_template *tmpl,
 867		              struct rtattr **tb,
 868			      struct crypto_attr_type *algt,
 869			      struct cryptd_queue *queue)
 870{
 871	struct aead_instance_ctx *ctx;
 872	struct aead_instance *inst;
 873	struct aead_alg *alg;
 874	u32 type;
 875	u32 mask;
 876	int err;
 877
 878	cryptd_type_and_mask(algt, &type, &mask);
 
 
 
 
 
 
 
 
 879
 880	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 881	if (!inst)
 882		return -ENOMEM;
 883
 884	ctx = aead_instance_ctx(inst);
 885	ctx->queue = queue;
 886
 887	err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
 888			       crypto_attr_alg_name(tb[1]), type, mask);
 889	if (err)
 890		goto err_free_inst;
 891
 892	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 893	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 894	if (err)
 895		goto err_free_inst;
 
 
 
 
 
 
 
 
 
 
 896
 897	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 898		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 899	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 900
 901	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 902	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 903
 904	inst->alg.init = cryptd_aead_init_tfm;
 905	inst->alg.exit = cryptd_aead_exit_tfm;
 906	inst->alg.setkey = cryptd_aead_setkey;
 907	inst->alg.setauthsize = cryptd_aead_setauthsize;
 908	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 909	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 910
 911	inst->free = cryptd_aead_free;
 912
 913	err = aead_register_instance(tmpl, inst);
 914	if (err) {
 915err_free_inst:
 916		cryptd_aead_free(inst);
 
 917	}
 
 
 918	return err;
 919}
 920
 921static struct cryptd_queue queue;
 922
 923static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 924{
 925	struct crypto_attr_type *algt;
 926
 927	algt = crypto_get_attr_type(tb);
 928	if (IS_ERR(algt))
 929		return PTR_ERR(algt);
 930
 931	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 932	case CRYPTO_ALG_TYPE_LSKCIPHER:
 933		return cryptd_create_skcipher(tmpl, tb, algt, &queue);
 934	case CRYPTO_ALG_TYPE_HASH:
 935		return cryptd_create_hash(tmpl, tb, algt, &queue);
 936	case CRYPTO_ALG_TYPE_AEAD:
 937		return cryptd_create_aead(tmpl, tb, algt, &queue);
 938	}
 939
 940	return -EINVAL;
 941}
 942
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943static struct crypto_template cryptd_tmpl = {
 944	.name = "cryptd",
 945	.create = cryptd_create,
 
 946	.module = THIS_MODULE,
 947};
 948
 949struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 950					      u32 type, u32 mask)
 951{
 952	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 953	struct cryptd_skcipher_ctx *ctx;
 954	struct crypto_skcipher *tfm;
 955
 956	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 957		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 958		return ERR_PTR(-EINVAL);
 959
 960	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
 
 
 
 961	if (IS_ERR(tfm))
 962		return ERR_CAST(tfm);
 963
 964	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 965		crypto_free_skcipher(tfm);
 966		return ERR_PTR(-EINVAL);
 967	}
 968
 969	ctx = crypto_skcipher_ctx(tfm);
 970	refcount_set(&ctx->refcnt, 1);
 971
 972	return container_of(tfm, struct cryptd_skcipher, base);
 973}
 974EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
 975
 976struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
 977{
 978	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 979
 980	return ctx->child;
 981}
 982EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
 983
 984bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
 985{
 986	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 987
 988	return refcount_read(&ctx->refcnt) - 1;
 989}
 990EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
 991
 992void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
 993{
 994	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 995
 996	if (refcount_dec_and_test(&ctx->refcnt))
 997		crypto_free_skcipher(&tfm->base);
 998}
 999EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1000
1001struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1002					u32 type, u32 mask)
1003{
1004	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1005	struct cryptd_hash_ctx *ctx;
1006	struct crypto_ahash *tfm;
1007
1008	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1009		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1010		return ERR_PTR(-EINVAL);
1011	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1012	if (IS_ERR(tfm))
1013		return ERR_CAST(tfm);
1014	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1015		crypto_free_ahash(tfm);
1016		return ERR_PTR(-EINVAL);
1017	}
1018
1019	ctx = crypto_ahash_ctx(tfm);
1020	refcount_set(&ctx->refcnt, 1);
1021
1022	return __cryptd_ahash_cast(tfm);
1023}
1024EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1025
1026struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1027{
1028	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1029
1030	return ctx->child;
1031}
1032EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1033
1034struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1035{
1036	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1037	return &rctx->desc;
1038}
1039EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1040
1041bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1042{
1043	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1044
1045	return refcount_read(&ctx->refcnt) - 1;
1046}
1047EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1048
1049void cryptd_free_ahash(struct cryptd_ahash *tfm)
1050{
1051	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1052
1053	if (refcount_dec_and_test(&ctx->refcnt))
1054		crypto_free_ahash(&tfm->base);
1055}
1056EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1057
1058struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1059						  u32 type, u32 mask)
1060{
1061	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1062	struct cryptd_aead_ctx *ctx;
1063	struct crypto_aead *tfm;
1064
1065	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1066		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1067		return ERR_PTR(-EINVAL);
1068	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1069	if (IS_ERR(tfm))
1070		return ERR_CAST(tfm);
1071	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1072		crypto_free_aead(tfm);
1073		return ERR_PTR(-EINVAL);
1074	}
1075
1076	ctx = crypto_aead_ctx(tfm);
1077	refcount_set(&ctx->refcnt, 1);
1078
1079	return __cryptd_aead_cast(tfm);
1080}
1081EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1082
1083struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1084{
1085	struct cryptd_aead_ctx *ctx;
1086	ctx = crypto_aead_ctx(&tfm->base);
1087	return ctx->child;
1088}
1089EXPORT_SYMBOL_GPL(cryptd_aead_child);
1090
1091bool cryptd_aead_queued(struct cryptd_aead *tfm)
1092{
1093	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1094
1095	return refcount_read(&ctx->refcnt) - 1;
1096}
1097EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1098
1099void cryptd_free_aead(struct cryptd_aead *tfm)
1100{
1101	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1102
1103	if (refcount_dec_and_test(&ctx->refcnt))
1104		crypto_free_aead(&tfm->base);
1105}
1106EXPORT_SYMBOL_GPL(cryptd_free_aead);
1107
1108static int __init cryptd_init(void)
1109{
1110	int err;
1111
1112	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1113				    1);
1114	if (!cryptd_wq)
1115		return -ENOMEM;
1116
1117	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1118	if (err)
1119		goto err_destroy_wq;
1120
1121	err = crypto_register_template(&cryptd_tmpl);
1122	if (err)
1123		goto err_fini_queue;
1124
1125	return 0;
1126
1127err_fini_queue:
1128	cryptd_fini_queue(&queue);
1129err_destroy_wq:
1130	destroy_workqueue(cryptd_wq);
1131	return err;
1132}
1133
1134static void __exit cryptd_exit(void)
1135{
1136	destroy_workqueue(cryptd_wq);
1137	cryptd_fini_queue(&queue);
1138	crypto_unregister_template(&cryptd_tmpl);
1139}
1140
1141subsys_initcall(cryptd_init);
1142module_exit(cryptd_exit);
1143
1144MODULE_LICENSE("GPL");
1145MODULE_DESCRIPTION("Software async crypto daemon");
1146MODULE_ALIAS_CRYPTO("cryptd");
v3.5.6
 
  1/*
  2 * Software async crypto daemon.
  3 *
  4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5 *
  6 * Added AEAD support to cryptd.
  7 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8 *             Adrian Hoban <adrian.hoban@intel.com>
  9 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 10 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 11 *    Copyright (c) 2010, Intel Corporation.
 12 *
 13 * This program is free software; you can redistribute it and/or modify it
 14 * under the terms of the GNU General Public License as published by the Free
 15 * Software Foundation; either version 2 of the License, or (at your option)
 16 * any later version.
 17 *
 18 */
 19
 20#include <crypto/algapi.h>
 21#include <crypto/internal/hash.h>
 22#include <crypto/internal/aead.h>
 
 23#include <crypto/cryptd.h>
 24#include <crypto/crypto_wq.h>
 25#include <linux/err.h>
 26#include <linux/init.h>
 27#include <linux/kernel.h>
 28#include <linux/list.h>
 29#include <linux/module.h>
 30#include <linux/scatterlist.h>
 31#include <linux/sched.h>
 32#include <linux/slab.h>
 
 33
 34#define CRYPTD_MAX_CPU_QLEN 100
 
 
 
 
 35
 36struct cryptd_cpu_queue {
 37	struct crypto_queue queue;
 38	struct work_struct work;
 39};
 40
 41struct cryptd_queue {
 
 
 
 
 42	struct cryptd_cpu_queue __percpu *cpu_queue;
 43};
 44
 45struct cryptd_instance_ctx {
 46	struct crypto_spawn spawn;
 47	struct cryptd_queue *queue;
 48};
 49
 
 
 
 
 
 50struct hashd_instance_ctx {
 51	struct crypto_shash_spawn spawn;
 52	struct cryptd_queue *queue;
 53};
 54
 55struct aead_instance_ctx {
 56	struct crypto_aead_spawn aead_spawn;
 57	struct cryptd_queue *queue;
 58};
 59
 60struct cryptd_blkcipher_ctx {
 61	struct crypto_blkcipher *child;
 
 62};
 63
 64struct cryptd_blkcipher_request_ctx {
 65	crypto_completion_t complete;
 66};
 67
 68struct cryptd_hash_ctx {
 
 69	struct crypto_shash *child;
 70};
 71
 72struct cryptd_hash_request_ctx {
 73	crypto_completion_t complete;
 
 74	struct shash_desc desc;
 75};
 76
 77struct cryptd_aead_ctx {
 
 78	struct crypto_aead *child;
 79};
 80
 81struct cryptd_aead_request_ctx {
 82	crypto_completion_t complete;
 83};
 84
 85static void cryptd_queue_worker(struct work_struct *work);
 86
 87static int cryptd_init_queue(struct cryptd_queue *queue,
 88			     unsigned int max_cpu_qlen)
 89{
 90	int cpu;
 91	struct cryptd_cpu_queue *cpu_queue;
 92
 93	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 94	if (!queue->cpu_queue)
 95		return -ENOMEM;
 96	for_each_possible_cpu(cpu) {
 97		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 98		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 99		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
100	}
 
101	return 0;
102}
103
104static void cryptd_fini_queue(struct cryptd_queue *queue)
105{
106	int cpu;
107	struct cryptd_cpu_queue *cpu_queue;
108
109	for_each_possible_cpu(cpu) {
110		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
111		BUG_ON(cpu_queue->queue.qlen);
112	}
113	free_percpu(queue->cpu_queue);
114}
115
116static int cryptd_enqueue_request(struct cryptd_queue *queue,
117				  struct crypto_async_request *request)
118{
119	int cpu, err;
120	struct cryptd_cpu_queue *cpu_queue;
 
121
122	cpu = get_cpu();
123	cpu_queue = this_cpu_ptr(queue->cpu_queue);
124	err = crypto_enqueue_request(&cpu_queue->queue, request);
125	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
126	put_cpu();
 
 
 
 
 
 
 
 
 
 
 
 
 
127
128	return err;
129}
130
131/* Called in workqueue context, do one real cryption work (via
132 * req->complete) and reschedule itself if there are more work to
133 * do. */
134static void cryptd_queue_worker(struct work_struct *work)
135{
136	struct cryptd_cpu_queue *cpu_queue;
137	struct crypto_async_request *req, *backlog;
138
139	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
140	/* Only handle one request at a time to avoid hogging crypto
141	 * workqueue. preempt_disable/enable is used to prevent
142	 * being preempted by cryptd_enqueue_request() */
143	preempt_disable();
144	backlog = crypto_get_backlog(&cpu_queue->queue);
145	req = crypto_dequeue_request(&cpu_queue->queue);
146	preempt_enable();
147
148	if (!req)
149		return;
150
151	if (backlog)
152		backlog->complete(backlog, -EINPROGRESS);
153	req->complete(req, 0);
154
155	if (cpu_queue->queue.qlen)
156		queue_work(kcrypto_wq, &cpu_queue->work);
157}
158
159static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
160{
161	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
162	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
163	return ictx->queue;
164}
165
166static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
167				   const u8 *key, unsigned int keylen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168{
169	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
170	struct crypto_blkcipher *child = ctx->child;
171	int err;
172
173	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
174	crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
175					  CRYPTO_TFM_REQ_MASK);
176	err = crypto_blkcipher_setkey(child, key, keylen);
177	crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
178					    CRYPTO_TFM_RES_MASK);
179	return err;
180}
181
182static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
183				   struct crypto_blkcipher *child,
184				   int err,
185				   int (*crypt)(struct blkcipher_desc *desc,
186						struct scatterlist *dst,
187						struct scatterlist *src,
188						unsigned int len))
189{
190	struct cryptd_blkcipher_request_ctx *rctx;
191	struct blkcipher_desc desc;
 
 
192
193	rctx = ablkcipher_request_ctx(req);
 
194
195	if (unlikely(err == -EINPROGRESS))
196		goto out;
197
198	desc.tfm = child;
199	desc.info = req->info;
200	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
201
202	err = crypt(&desc, req->dst, req->src, req->nbytes);
 
 
 
 
203
204	req->base.complete = rctx->complete;
 
 
 
 
 
 
 
 
 
 
205
206out:
207	local_bh_disable();
208	rctx->complete(&req->base, err);
209	local_bh_enable();
 
 
 
 
 
 
 
 
210}
211
212static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
213{
214	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
215	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
216
217	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
218			       crypto_blkcipher_crt(child)->encrypt);
219}
220
221static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
222{
223	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
224	struct crypto_blkcipher *child = ctx->child;
 
 
 
 
225
226	cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
227			       crypto_blkcipher_crt(child)->decrypt);
228}
229
230static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
231				    crypto_completion_t complete)
232{
233	struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
234	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 
235	struct cryptd_queue *queue;
236
237	queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
238	rctx->complete = req->base.complete;
239	req->base.complete = complete;
 
 
240
241	return cryptd_enqueue_request(queue, &req->base);
242}
243
244static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
245{
246	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
247}
248
249static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
250{
251	return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
252}
253
254static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
255{
256	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
257	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
258	struct crypto_spawn *spawn = &ictx->spawn;
259	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
260	struct crypto_blkcipher *cipher;
261
262	cipher = crypto_spawn_blkcipher(spawn);
263	if (IS_ERR(cipher))
264		return PTR_ERR(cipher);
265
266	ctx->child = cipher;
267	tfm->crt_ablkcipher.reqsize =
268		sizeof(struct cryptd_blkcipher_request_ctx);
 
269	return 0;
270}
271
272static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
273{
274	struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
275
276	crypto_free_blkcipher(ctx->child);
277}
278
279static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
280				   unsigned int tail)
281{
282	char *p;
283	struct crypto_instance *inst;
284	int err;
285
286	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
287	if (!p)
288		return ERR_PTR(-ENOMEM);
289
290	inst = (void *)(p + head);
291
292	err = -ENAMETOOLONG;
293	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
294		     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
295		goto out_free_inst;
296
297	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
298
299	inst->alg.cra_priority = alg->cra_priority + 50;
300	inst->alg.cra_blocksize = alg->cra_blocksize;
301	inst->alg.cra_alignmask = alg->cra_alignmask;
302
303out:
304	return p;
305
306out_free_inst:
307	kfree(p);
308	p = ERR_PTR(err);
309	goto out;
310}
311
312static int cryptd_create_blkcipher(struct crypto_template *tmpl,
313				   struct rtattr **tb,
314				   struct cryptd_queue *queue)
 
315{
316	struct cryptd_instance_ctx *ctx;
317	struct crypto_instance *inst;
318	struct crypto_alg *alg;
 
 
319	int err;
320
321	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
322				  CRYPTO_ALG_TYPE_MASK);
323	if (IS_ERR(alg))
324		return PTR_ERR(alg);
325
326	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
327	err = PTR_ERR(inst);
328	if (IS_ERR(inst))
329		goto out_put_alg;
330
331	ctx = crypto_instance_ctx(inst);
332	ctx->queue = queue;
333
334	err = crypto_init_spawn(&ctx->spawn, alg, inst,
335				CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
336	if (err)
337		goto out_free_inst;
338
339	inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
340	inst->alg.cra_type = &crypto_ablkcipher_type;
 
 
341
342	inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
343	inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
344	inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 
 
 
345
346	inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
347
348	inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 
349
350	inst->alg.cra_init = cryptd_blkcipher_init_tfm;
351	inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 
352
353	inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
354	inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
355	inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
356
357	err = crypto_register_instance(tmpl, inst);
358	if (err) {
359		crypto_drop_spawn(&ctx->spawn);
360out_free_inst:
361		kfree(inst);
362	}
363
364out_put_alg:
365	crypto_mod_put(alg);
366	return err;
367}
368
369static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
370{
371	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
372	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
373	struct crypto_shash_spawn *spawn = &ictx->spawn;
374	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
375	struct crypto_shash *hash;
376
377	hash = crypto_spawn_shash(spawn);
378	if (IS_ERR(hash))
379		return PTR_ERR(hash);
380
381	ctx->child = hash;
382	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
383				 sizeof(struct cryptd_hash_request_ctx) +
384				 crypto_shash_descsize(hash));
385	return 0;
386}
387
388static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 
389{
390	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
392	crypto_free_shash(ctx->child);
393}
394
395static int cryptd_hash_setkey(struct crypto_ahash *parent,
396				   const u8 *key, unsigned int keylen)
397{
398	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
399	struct crypto_shash *child = ctx->child;
400	int err;
401
402	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
403	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
404				      CRYPTO_TFM_REQ_MASK);
405	err = crypto_shash_setkey(child, key, keylen);
406	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
407				       CRYPTO_TFM_RES_MASK);
408	return err;
409}
410
411static int cryptd_hash_enqueue(struct ahash_request *req,
412				crypto_completion_t complete)
413{
414	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
415	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
416	struct cryptd_queue *queue =
417		cryptd_get_queue(crypto_ahash_tfm(tfm));
418
419	rctx->complete = req->base.complete;
420	req->base.complete = complete;
 
 
421
422	return cryptd_enqueue_request(queue, &req->base);
423}
424
425static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 
426{
427	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
428	struct crypto_shash *child = ctx->child;
429	struct ahash_request *req = ahash_request_cast(req_async);
430	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
431	struct shash_desc *desc = &rctx->desc;
 
 
432
433	if (unlikely(err == -EINPROGRESS))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434		goto out;
435
436	desc->tfm = child;
437	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
438
439	err = crypto_shash_init(desc);
440
441	req->base.complete = rctx->complete;
442
443out:
444	local_bh_disable();
445	rctx->complete(&req->base, err);
446	local_bh_enable();
447}
448
449static int cryptd_hash_init_enqueue(struct ahash_request *req)
450{
451	return cryptd_hash_enqueue(req, cryptd_hash_init);
452}
453
454static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
455{
456	struct ahash_request *req = ahash_request_cast(req_async);
457	struct cryptd_hash_request_ctx *rctx;
458
459	rctx = ahash_request_ctx(req);
460
461	if (unlikely(err == -EINPROGRESS))
462		goto out;
463
464	err = shash_ahash_update(req, &rctx->desc);
465
466	req->base.complete = rctx->complete;
467
468out:
469	local_bh_disable();
470	rctx->complete(&req->base, err);
471	local_bh_enable();
472}
473
474static int cryptd_hash_update_enqueue(struct ahash_request *req)
475{
476	return cryptd_hash_enqueue(req, cryptd_hash_update);
477}
478
479static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
480{
481	struct ahash_request *req = ahash_request_cast(req_async);
482	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
483
484	if (unlikely(err == -EINPROGRESS))
485		goto out;
486
487	err = crypto_shash_final(&rctx->desc, req->result);
 
 
488
489	req->base.complete = rctx->complete;
490
491out:
492	local_bh_disable();
493	rctx->complete(&req->base, err);
494	local_bh_enable();
495}
496
497static int cryptd_hash_final_enqueue(struct ahash_request *req)
498{
499	return cryptd_hash_enqueue(req, cryptd_hash_final);
500}
501
502static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
503{
504	struct ahash_request *req = ahash_request_cast(req_async);
505	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
506
507	if (unlikely(err == -EINPROGRESS))
508		goto out;
509
510	err = shash_ahash_finup(req, &rctx->desc);
511
512	req->base.complete = rctx->complete;
513
514out:
515	local_bh_disable();
516	rctx->complete(&req->base, err);
517	local_bh_enable();
518}
519
520static int cryptd_hash_finup_enqueue(struct ahash_request *req)
521{
522	return cryptd_hash_enqueue(req, cryptd_hash_finup);
523}
524
525static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
526{
527	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 
 
528	struct crypto_shash *child = ctx->child;
529	struct ahash_request *req = ahash_request_cast(req_async);
530	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
531	struct shash_desc *desc = &rctx->desc;
532
533	if (unlikely(err == -EINPROGRESS))
 
534		goto out;
535
536	desc->tfm = child;
537	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
538
539	err = shash_ahash_digest(req, desc);
540
541	req->base.complete = rctx->complete;
542
543out:
544	local_bh_disable();
545	rctx->complete(&req->base, err);
546	local_bh_enable();
547}
548
549static int cryptd_hash_digest_enqueue(struct ahash_request *req)
550{
551	return cryptd_hash_enqueue(req, cryptd_hash_digest);
552}
553
554static int cryptd_hash_export(struct ahash_request *req, void *out)
555{
556	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
557
558	return crypto_shash_export(&rctx->desc, out);
559}
560
561static int cryptd_hash_import(struct ahash_request *req, const void *in)
562{
563	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 
 
 
 
 
 
 
 
 
 
564
565	return crypto_shash_import(&rctx->desc, in);
 
566}
567
568static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 
569			      struct cryptd_queue *queue)
570{
571	struct hashd_instance_ctx *ctx;
572	struct ahash_instance *inst;
573	struct shash_alg *salg;
574	struct crypto_alg *alg;
 
575	int err;
576
577	salg = shash_attr_alg(tb[1], 0, 0);
578	if (IS_ERR(salg))
579		return PTR_ERR(salg);
580
581	alg = &salg->base;
582	inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
583				     sizeof(*ctx));
584	err = PTR_ERR(inst);
585	if (IS_ERR(inst))
586		goto out_put_alg;
587
588	ctx = ahash_instance_ctx(inst);
589	ctx->queue = queue;
590
591	err = crypto_init_shash_spawn(&ctx->spawn, salg,
592				      ahash_crypto_instance(inst));
593	if (err)
594		goto out_free_inst;
 
595
596	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
 
 
597
598	inst->alg.halg.digestsize = salg->digestsize;
 
 
 
 
599	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
600
601	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
602	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 
603
604	inst->alg.init   = cryptd_hash_init_enqueue;
605	inst->alg.update = cryptd_hash_update_enqueue;
606	inst->alg.final  = cryptd_hash_final_enqueue;
607	inst->alg.finup  = cryptd_hash_finup_enqueue;
608	inst->alg.export = cryptd_hash_export;
609	inst->alg.import = cryptd_hash_import;
610	inst->alg.setkey = cryptd_hash_setkey;
 
611	inst->alg.digest = cryptd_hash_digest_enqueue;
612
 
 
613	err = ahash_register_instance(tmpl, inst);
614	if (err) {
615		crypto_drop_shash(&ctx->spawn);
616out_free_inst:
617		kfree(inst);
618	}
 
 
 
 
 
 
 
 
619
620out_put_alg:
621	crypto_mod_put(alg);
622	return err;
 
 
 
 
 
 
 
623}
624
625static void cryptd_aead_crypt(struct aead_request *req,
626			struct crypto_aead *child,
627			int err,
628			int (*crypt)(struct aead_request *req))
629{
630	struct cryptd_aead_request_ctx *rctx;
 
 
 
 
 
631	rctx = aead_request_ctx(req);
 
 
 
 
 
632
633	if (unlikely(err == -EINPROGRESS))
634		goto out;
635	aead_request_set_tfm(req, child);
636	err = crypt( req );
637	req->base.complete = rctx->complete;
 
 
 
 
 
 
 
638out:
 
 
 
639	local_bh_disable();
640	rctx->complete(&req->base, err);
641	local_bh_enable();
 
 
 
 
 
 
 
 
642}
643
644static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
645{
646	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
647	struct crypto_aead *child = ctx->child;
648	struct aead_request *req;
649
650	req = container_of(areq, struct aead_request, base);
651	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
 
 
652}
653
654static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
655{
656	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
657	struct crypto_aead *child = ctx->child;
658	struct aead_request *req;
659
660	req = container_of(areq, struct aead_request, base);
661	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
 
 
662}
663
664static int cryptd_aead_enqueue(struct aead_request *req,
665				    crypto_completion_t complete)
666{
667	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
668	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
669	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 
670
671	rctx->complete = req->base.complete;
672	req->base.complete = complete;
 
 
673	return cryptd_enqueue_request(queue, &req->base);
674}
675
676static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
677{
678	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
679}
680
681static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
682{
683	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
684}
685
686static int cryptd_aead_init_tfm(struct crypto_tfm *tfm)
687{
688	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
689	struct aead_instance_ctx *ictx = crypto_instance_ctx(inst);
690	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
691	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
692	struct crypto_aead *cipher;
693
694	cipher = crypto_spawn_aead(spawn);
695	if (IS_ERR(cipher))
696		return PTR_ERR(cipher);
697
698	crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP);
699	ctx->child = cipher;
700	tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx);
 
 
701	return 0;
702}
703
704static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm)
705{
706	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm);
707	crypto_free_aead(ctx->child);
708}
709
 
 
 
 
 
 
 
 
710static int cryptd_create_aead(struct crypto_template *tmpl,
711		              struct rtattr **tb,
 
712			      struct cryptd_queue *queue)
713{
714	struct aead_instance_ctx *ctx;
715	struct crypto_instance *inst;
716	struct crypto_alg *alg;
 
 
717	int err;
718
719	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD,
720				CRYPTO_ALG_TYPE_MASK);
721        if (IS_ERR(alg))
722		return PTR_ERR(alg);
723
724	inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
725	err = PTR_ERR(inst);
726	if (IS_ERR(inst))
727		goto out_put_alg;
728
729	ctx = crypto_instance_ctx(inst);
 
 
 
 
730	ctx->queue = queue;
731
732	err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst,
733			CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
734	if (err)
735		goto out_free_inst;
736
737	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
738	inst->alg.cra_type = alg->cra_type;
739	inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
740	inst->alg.cra_init = cryptd_aead_init_tfm;
741	inst->alg.cra_exit = cryptd_aead_exit_tfm;
742	inst->alg.cra_aead.setkey      = alg->cra_aead.setkey;
743	inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize;
744	inst->alg.cra_aead.geniv       = alg->cra_aead.geniv;
745	inst->alg.cra_aead.ivsize      = alg->cra_aead.ivsize;
746	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
747	inst->alg.cra_aead.encrypt     = cryptd_aead_encrypt_enqueue;
748	inst->alg.cra_aead.decrypt     = cryptd_aead_decrypt_enqueue;
749	inst->alg.cra_aead.givencrypt  = alg->cra_aead.givencrypt;
750	inst->alg.cra_aead.givdecrypt  = alg->cra_aead.givdecrypt;
751
752	err = crypto_register_instance(tmpl, inst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
753	if (err) {
754		crypto_drop_spawn(&ctx->aead_spawn.base);
755out_free_inst:
756		kfree(inst);
757	}
758out_put_alg:
759	crypto_mod_put(alg);
760	return err;
761}
762
763static struct cryptd_queue queue;
764
765static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
766{
767	struct crypto_attr_type *algt;
768
769	algt = crypto_get_attr_type(tb);
770	if (IS_ERR(algt))
771		return PTR_ERR(algt);
772
773	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
774	case CRYPTO_ALG_TYPE_BLKCIPHER:
775		return cryptd_create_blkcipher(tmpl, tb, &queue);
776	case CRYPTO_ALG_TYPE_DIGEST:
777		return cryptd_create_hash(tmpl, tb, &queue);
778	case CRYPTO_ALG_TYPE_AEAD:
779		return cryptd_create_aead(tmpl, tb, &queue);
780	}
781
782	return -EINVAL;
783}
784
785static void cryptd_free(struct crypto_instance *inst)
786{
787	struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
788	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
789	struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
790
791	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
792	case CRYPTO_ALG_TYPE_AHASH:
793		crypto_drop_shash(&hctx->spawn);
794		kfree(ahash_instance(inst));
795		return;
796	case CRYPTO_ALG_TYPE_AEAD:
797		crypto_drop_spawn(&aead_ctx->aead_spawn.base);
798		kfree(inst);
799		return;
800	default:
801		crypto_drop_spawn(&ctx->spawn);
802		kfree(inst);
803	}
804}
805
806static struct crypto_template cryptd_tmpl = {
807	.name = "cryptd",
808	.create = cryptd_create,
809	.free = cryptd_free,
810	.module = THIS_MODULE,
811};
812
813struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
814						  u32 type, u32 mask)
815{
816	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
817	struct crypto_tfm *tfm;
 
818
819	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
820		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
821		return ERR_PTR(-EINVAL);
822	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
823	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
824	mask &= ~CRYPTO_ALG_TYPE_MASK;
825	mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
826	tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
827	if (IS_ERR(tfm))
828		return ERR_CAST(tfm);
829	if (tfm->__crt_alg->cra_module != THIS_MODULE) {
830		crypto_free_tfm(tfm);
 
831		return ERR_PTR(-EINVAL);
832	}
833
834	return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
 
 
 
835}
836EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
837
838struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
839{
840	struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
 
841	return ctx->child;
842}
843EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
 
 
 
 
 
 
 
 
844
845void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
846{
847	crypto_free_ablkcipher(&tfm->base);
 
 
 
848}
849EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
850
851struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
852					u32 type, u32 mask)
853{
854	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
855	struct crypto_ahash *tfm;
856
857	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
858		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
859		return ERR_PTR(-EINVAL);
860	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
861	if (IS_ERR(tfm))
862		return ERR_CAST(tfm);
863	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
864		crypto_free_ahash(tfm);
865		return ERR_PTR(-EINVAL);
866	}
867
 
 
 
868	return __cryptd_ahash_cast(tfm);
869}
870EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
871
872struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
873{
874	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
875
876	return ctx->child;
877}
878EXPORT_SYMBOL_GPL(cryptd_ahash_child);
879
880struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
881{
882	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
883	return &rctx->desc;
884}
885EXPORT_SYMBOL_GPL(cryptd_shash_desc);
886
 
 
 
 
 
 
 
 
887void cryptd_free_ahash(struct cryptd_ahash *tfm)
888{
889	crypto_free_ahash(&tfm->base);
 
 
 
890}
891EXPORT_SYMBOL_GPL(cryptd_free_ahash);
892
893struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
894						  u32 type, u32 mask)
895{
896	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 
897	struct crypto_aead *tfm;
898
899	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
900		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
901		return ERR_PTR(-EINVAL);
902	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
903	if (IS_ERR(tfm))
904		return ERR_CAST(tfm);
905	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
906		crypto_free_aead(tfm);
907		return ERR_PTR(-EINVAL);
908	}
 
 
 
 
909	return __cryptd_aead_cast(tfm);
910}
911EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
912
913struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
914{
915	struct cryptd_aead_ctx *ctx;
916	ctx = crypto_aead_ctx(&tfm->base);
917	return ctx->child;
918}
919EXPORT_SYMBOL_GPL(cryptd_aead_child);
920
 
 
 
 
 
 
 
 
921void cryptd_free_aead(struct cryptd_aead *tfm)
922{
923	crypto_free_aead(&tfm->base);
 
 
 
924}
925EXPORT_SYMBOL_GPL(cryptd_free_aead);
926
927static int __init cryptd_init(void)
928{
929	int err;
930
931	err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
 
 
 
 
 
932	if (err)
933		return err;
934
935	err = crypto_register_template(&cryptd_tmpl);
936	if (err)
937		cryptd_fini_queue(&queue);
938
 
 
 
 
 
 
939	return err;
940}
941
942static void __exit cryptd_exit(void)
943{
 
944	cryptd_fini_queue(&queue);
945	crypto_unregister_template(&cryptd_tmpl);
946}
947
948subsys_initcall(cryptd_init);
949module_exit(cryptd_exit);
950
951MODULE_LICENSE("GPL");
952MODULE_DESCRIPTION("Software async crypto daemon");