Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * Symmetric key cipher operations.
   3 *
   4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   5 * multiple page boundaries by using temporary blocks.  In user context,
   6 * the kernel is given a chance to schedule us once per page.
   7 *
   8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License as published by the Free
  12 * Software Foundation; either version 2 of the License, or (at your option)
  13 * any later version.
  14 *
  15 */
  16
  17#include <crypto/internal/aead.h>
 
  18#include <crypto/internal/skcipher.h>
  19#include <crypto/scatterwalk.h>
  20#include <linux/bug.h>
  21#include <linux/cryptouser.h>
  22#include <linux/compiler.h>
 
  23#include <linux/list.h>
 
  24#include <linux/module.h>
  25#include <linux/rtnetlink.h>
  26#include <linux/seq_file.h>
 
 
  27#include <net/netlink.h>
 
  28
  29#include "internal.h"
  30
  31enum {
  32	SKCIPHER_WALK_PHYS = 1 << 0,
  33	SKCIPHER_WALK_SLOW = 1 << 1,
  34	SKCIPHER_WALK_COPY = 1 << 2,
  35	SKCIPHER_WALK_DIFF = 1 << 3,
  36	SKCIPHER_WALK_SLEEP = 1 << 4,
  37};
  38
  39struct skcipher_walk_buffer {
  40	struct list_head entry;
  41	struct scatter_walk dst;
  42	unsigned int len;
  43	u8 *data;
  44	u8 buffer[];
  45};
  46
  47static int skcipher_walk_next(struct skcipher_walk *walk);
  48
  49static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
  50{
  51	if (PageHighMem(scatterwalk_page(walk)))
  52		kunmap_atomic(vaddr);
  53}
  54
  55static inline void *skcipher_map(struct scatter_walk *walk)
  56{
  57	struct page *page = scatterwalk_page(walk);
  58
  59	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
  60	       offset_in_page(walk->offset);
  61}
  62
  63static inline void skcipher_map_src(struct skcipher_walk *walk)
  64{
  65	walk->src.virt.addr = skcipher_map(&walk->in);
  66}
  67
  68static inline void skcipher_map_dst(struct skcipher_walk *walk)
  69{
  70	walk->dst.virt.addr = skcipher_map(&walk->out);
  71}
  72
  73static inline void skcipher_unmap_src(struct skcipher_walk *walk)
  74{
  75	skcipher_unmap(&walk->in, walk->src.virt.addr);
  76}
  77
  78static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
  79{
  80	skcipher_unmap(&walk->out, walk->dst.virt.addr);
  81}
  82
  83static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
  84{
  85	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  86}
  87
  88/* Get a spot of the specified length that does not straddle a page.
  89 * The caller needs to ensure that there is enough space for this operation.
  90 */
  91static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
  92{
  93	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  94
  95	return max(start, end_page);
  96}
  97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  98static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
  99{
 100	u8 *addr;
 101
 102	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 103	addr = skcipher_get_spot(addr, bsize);
 104	scatterwalk_copychunks(addr, &walk->out, bsize,
 105			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
 106	return 0;
 107}
 108
 109int skcipher_walk_done(struct skcipher_walk *walk, int err)
 110{
 111	unsigned int n = walk->nbytes - err;
 112	unsigned int nbytes;
 
 
 
 113
 114	nbytes = walk->total - n;
 
 
 
 115
 116	if (unlikely(err < 0)) {
 117		nbytes = 0;
 118		n = 0;
 119	} else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
 120					   SKCIPHER_WALK_SLOW |
 121					   SKCIPHER_WALK_COPY |
 122					   SKCIPHER_WALK_DIFF)))) {
 123unmap_src:
 124		skcipher_unmap_src(walk);
 125	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
 126		skcipher_unmap_dst(walk);
 127		goto unmap_src;
 128	} else if (walk->flags & SKCIPHER_WALK_COPY) {
 129		skcipher_map_dst(walk);
 130		memcpy(walk->dst.virt.addr, walk->page, n);
 131		skcipher_unmap_dst(walk);
 132	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
 133		if (WARN_ON(err)) {
 
 
 
 
 
 
 134			err = -EINVAL;
 135			nbytes = 0;
 136		} else
 137			n = skcipher_done_slow(walk, n);
 138	}
 139
 140	if (err > 0)
 141		err = 0;
 142
 143	walk->total = nbytes;
 144	walk->nbytes = nbytes;
 145
 146	scatterwalk_advance(&walk->in, n);
 147	scatterwalk_advance(&walk->out, n);
 148	scatterwalk_done(&walk->in, 0, nbytes);
 149	scatterwalk_done(&walk->out, 1, nbytes);
 150
 151	if (nbytes) {
 152		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
 153			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
 154		return skcipher_walk_next(walk);
 155	}
 156
 
 157	/* Short-circuit for the common/fast path. */
 158	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
 159		goto out;
 160
 161	if (walk->flags & SKCIPHER_WALK_PHYS)
 162		goto out;
 163
 164	if (walk->iv != walk->oiv)
 165		memcpy(walk->oiv, walk->iv, walk->ivsize);
 166	if (walk->buffer != walk->page)
 167		kfree(walk->buffer);
 168	if (walk->page)
 169		free_page((unsigned long)walk->page);
 170
 171out:
 172	return err;
 173}
 174EXPORT_SYMBOL_GPL(skcipher_walk_done);
 175
 176void skcipher_walk_complete(struct skcipher_walk *walk, int err)
 177{
 178	struct skcipher_walk_buffer *p, *tmp;
 179
 180	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 181		u8 *data;
 182
 183		if (err)
 184			goto done;
 185
 186		data = p->data;
 187		if (!data) {
 188			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
 189			data = skcipher_get_spot(data, walk->stride);
 190		}
 191
 192		scatterwalk_copychunks(data, &p->dst, p->len, 1);
 193
 194		if (offset_in_page(p->data) + p->len + walk->stride >
 195		    PAGE_SIZE)
 196			free_page((unsigned long)p->data);
 197
 198done:
 199		list_del(&p->entry);
 200		kfree(p);
 201	}
 202
 203	if (!err && walk->iv != walk->oiv)
 204		memcpy(walk->oiv, walk->iv, walk->ivsize);
 205	if (walk->buffer != walk->page)
 206		kfree(walk->buffer);
 207	if (walk->page)
 208		free_page((unsigned long)walk->page);
 209}
 210EXPORT_SYMBOL_GPL(skcipher_walk_complete);
 211
 212static void skcipher_queue_write(struct skcipher_walk *walk,
 213				 struct skcipher_walk_buffer *p)
 214{
 215	p->dst = walk->out;
 216	list_add_tail(&p->entry, &walk->buffers);
 217}
 218
 219static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
 220{
 221	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
 222	unsigned alignmask = walk->alignmask;
 223	struct skcipher_walk_buffer *p;
 224	unsigned a;
 225	unsigned n;
 226	u8 *buffer;
 227	void *v;
 228
 229	if (!phys) {
 230		if (!walk->buffer)
 231			walk->buffer = walk->page;
 232		buffer = walk->buffer;
 233		if (buffer)
 234			goto ok;
 235	}
 236
 237	/* Start with the minimum alignment of kmalloc. */
 238	a = crypto_tfm_ctx_alignment() - 1;
 239	n = bsize;
 240
 241	if (phys) {
 242		/* Calculate the minimum alignment of p->buffer. */
 243		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
 244		n += sizeof(*p);
 245	}
 246
 247	/* Minimum size to align p->buffer by alignmask. */
 248	n += alignmask & ~a;
 249
 250	/* Minimum size to ensure p->buffer does not straddle a page. */
 251	n += (bsize - 1) & ~(alignmask | a);
 252
 253	v = kzalloc(n, skcipher_walk_gfp(walk));
 254	if (!v)
 255		return skcipher_walk_done(walk, -ENOMEM);
 256
 257	if (phys) {
 258		p = v;
 259		p->len = bsize;
 260		skcipher_queue_write(walk, p);
 261		buffer = p->buffer;
 262	} else {
 263		walk->buffer = v;
 264		buffer = v;
 265	}
 266
 267ok:
 268	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
 269	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
 270	walk->src.virt.addr = walk->dst.virt.addr;
 271
 272	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 273
 274	walk->nbytes = bsize;
 275	walk->flags |= SKCIPHER_WALK_SLOW;
 276
 277	return 0;
 278}
 279
 280static int skcipher_next_copy(struct skcipher_walk *walk)
 281{
 282	struct skcipher_walk_buffer *p;
 283	u8 *tmp = walk->page;
 284
 285	skcipher_map_src(walk);
 286	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
 287	skcipher_unmap_src(walk);
 288
 289	walk->src.virt.addr = tmp;
 290	walk->dst.virt.addr = tmp;
 291
 292	if (!(walk->flags & SKCIPHER_WALK_PHYS))
 293		return 0;
 294
 295	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
 296	if (!p)
 297		return -ENOMEM;
 298
 299	p->data = walk->page;
 300	p->len = walk->nbytes;
 301	skcipher_queue_write(walk, p);
 302
 303	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
 304	    PAGE_SIZE)
 305		walk->page = NULL;
 306	else
 307		walk->page += walk->nbytes;
 308
 309	return 0;
 310}
 311
 312static int skcipher_next_fast(struct skcipher_walk *walk)
 313{
 314	unsigned long diff;
 315
 316	walk->src.phys.page = scatterwalk_page(&walk->in);
 317	walk->src.phys.offset = offset_in_page(walk->in.offset);
 318	walk->dst.phys.page = scatterwalk_page(&walk->out);
 319	walk->dst.phys.offset = offset_in_page(walk->out.offset);
 320
 321	if (walk->flags & SKCIPHER_WALK_PHYS)
 322		return 0;
 323
 324	diff = walk->src.phys.offset - walk->dst.phys.offset;
 325	diff |= walk->src.virt.page - walk->dst.virt.page;
 326
 327	skcipher_map_src(walk);
 328	walk->dst.virt.addr = walk->src.virt.addr;
 329
 330	if (diff) {
 331		walk->flags |= SKCIPHER_WALK_DIFF;
 332		skcipher_map_dst(walk);
 333	}
 334
 335	return 0;
 336}
 337
 338static int skcipher_walk_next(struct skcipher_walk *walk)
 339{
 340	unsigned int bsize;
 341	unsigned int n;
 342	int err;
 343
 344	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
 345			 SKCIPHER_WALK_DIFF);
 346
 347	n = walk->total;
 348	bsize = min(walk->stride, max(n, walk->blocksize));
 349	n = scatterwalk_clamp(&walk->in, n);
 350	n = scatterwalk_clamp(&walk->out, n);
 351
 352	if (unlikely(n < bsize)) {
 353		if (unlikely(walk->total < walk->blocksize))
 354			return skcipher_walk_done(walk, -EINVAL);
 355
 356slow_path:
 357		err = skcipher_next_slow(walk, bsize);
 358		goto set_phys_lowmem;
 359	}
 360
 361	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
 362		if (!walk->page) {
 363			gfp_t gfp = skcipher_walk_gfp(walk);
 364
 365			walk->page = (void *)__get_free_page(gfp);
 366			if (!walk->page)
 367				goto slow_path;
 368		}
 369
 370		walk->nbytes = min_t(unsigned, n,
 371				     PAGE_SIZE - offset_in_page(walk->page));
 372		walk->flags |= SKCIPHER_WALK_COPY;
 373		err = skcipher_next_copy(walk);
 374		goto set_phys_lowmem;
 375	}
 376
 377	walk->nbytes = n;
 378
 379	return skcipher_next_fast(walk);
 380
 381set_phys_lowmem:
 382	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
 383		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
 384		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
 385		walk->src.phys.offset &= PAGE_SIZE - 1;
 386		walk->dst.phys.offset &= PAGE_SIZE - 1;
 387	}
 388	return err;
 389}
 390EXPORT_SYMBOL_GPL(skcipher_walk_next);
 391
 392static int skcipher_copy_iv(struct skcipher_walk *walk)
 393{
 394	unsigned a = crypto_tfm_ctx_alignment() - 1;
 395	unsigned alignmask = walk->alignmask;
 396	unsigned ivsize = walk->ivsize;
 397	unsigned bs = walk->stride;
 398	unsigned aligned_bs;
 399	unsigned size;
 400	u8 *iv;
 401
 402	aligned_bs = ALIGN(bs, alignmask);
 403
 404	/* Minimum size to align buffer by alignmask. */
 405	size = alignmask & ~a;
 406
 407	if (walk->flags & SKCIPHER_WALK_PHYS)
 408		size += ivsize;
 409	else {
 410		size += aligned_bs + ivsize;
 411
 412		/* Minimum size to ensure buffer does not straddle a page. */
 413		size += (bs - 1) & ~(alignmask | a);
 414	}
 415
 416	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
 417	if (!walk->buffer)
 418		return -ENOMEM;
 419
 420	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
 421	iv = skcipher_get_spot(iv, bs) + aligned_bs;
 422
 423	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
 424	return 0;
 425}
 426
 427static int skcipher_walk_first(struct skcipher_walk *walk)
 428{
 429	if (WARN_ON_ONCE(in_irq()))
 430		return -EDEADLK;
 431
 432	walk->buffer = NULL;
 433	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 434		int err = skcipher_copy_iv(walk);
 435		if (err)
 436			return err;
 437	}
 438
 439	walk->page = NULL;
 440	walk->nbytes = walk->total;
 441
 442	return skcipher_walk_next(walk);
 443}
 444
 445static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 446				  struct skcipher_request *req)
 447{
 448	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 
 449
 450	walk->total = req->cryptlen;
 451	walk->nbytes = 0;
 452	walk->iv = req->iv;
 453	walk->oiv = req->iv;
 454
 455	if (unlikely(!walk->total))
 456		return 0;
 457
 458	scatterwalk_start(&walk->in, req->src);
 459	scatterwalk_start(&walk->out, req->dst);
 460
 461	walk->flags &= ~SKCIPHER_WALK_SLEEP;
 462	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 463		       SKCIPHER_WALK_SLEEP : 0;
 464
 465	walk->blocksize = crypto_skcipher_blocksize(tfm);
 466	walk->stride = crypto_skcipher_walksize(tfm);
 467	walk->ivsize = crypto_skcipher_ivsize(tfm);
 468	walk->alignmask = crypto_skcipher_alignmask(tfm);
 469
 
 
 
 
 
 470	return skcipher_walk_first(walk);
 471}
 472
 473int skcipher_walk_virt(struct skcipher_walk *walk,
 474		       struct skcipher_request *req, bool atomic)
 475{
 476	int err;
 477
 
 
 478	walk->flags &= ~SKCIPHER_WALK_PHYS;
 479
 480	err = skcipher_walk_skcipher(walk, req);
 481
 482	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
 483
 484	return err;
 485}
 486EXPORT_SYMBOL_GPL(skcipher_walk_virt);
 487
 488void skcipher_walk_atomise(struct skcipher_walk *walk)
 489{
 490	walk->flags &= ~SKCIPHER_WALK_SLEEP;
 491}
 492EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
 493
 494int skcipher_walk_async(struct skcipher_walk *walk,
 495			struct skcipher_request *req)
 496{
 497	walk->flags |= SKCIPHER_WALK_PHYS;
 498
 499	INIT_LIST_HEAD(&walk->buffers);
 500
 501	return skcipher_walk_skcipher(walk, req);
 502}
 503EXPORT_SYMBOL_GPL(skcipher_walk_async);
 504
 505static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 506				     struct aead_request *req, bool atomic)
 507{
 508	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 509	int err;
 510
 511	walk->nbytes = 0;
 512	walk->iv = req->iv;
 513	walk->oiv = req->iv;
 514
 515	if (unlikely(!walk->total))
 516		return 0;
 517
 518	walk->flags &= ~SKCIPHER_WALK_PHYS;
 519
 520	scatterwalk_start(&walk->in, req->src);
 521	scatterwalk_start(&walk->out, req->dst);
 522
 523	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
 524	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 525
 526	scatterwalk_done(&walk->in, 0, walk->total);
 527	scatterwalk_done(&walk->out, 0, walk->total);
 528
 529	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
 530		walk->flags |= SKCIPHER_WALK_SLEEP;
 531	else
 532		walk->flags &= ~SKCIPHER_WALK_SLEEP;
 533
 534	walk->blocksize = crypto_aead_blocksize(tfm);
 535	walk->stride = crypto_aead_chunksize(tfm);
 536	walk->ivsize = crypto_aead_ivsize(tfm);
 537	walk->alignmask = crypto_aead_alignmask(tfm);
 538
 539	err = skcipher_walk_first(walk);
 540
 541	if (atomic)
 542		walk->flags &= ~SKCIPHER_WALK_SLEEP;
 543
 544	return err;
 545}
 546
 547int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
 548		       bool atomic)
 549{
 550	walk->total = req->cryptlen;
 551
 552	return skcipher_walk_aead_common(walk, req, atomic);
 553}
 554EXPORT_SYMBOL_GPL(skcipher_walk_aead);
 555
 556int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
 557			       struct aead_request *req, bool atomic)
 558{
 559	walk->total = req->cryptlen;
 560
 561	return skcipher_walk_aead_common(walk, req, atomic);
 562}
 563EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
 564
 565int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
 566			       struct aead_request *req, bool atomic)
 567{
 568	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 569
 570	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
 571
 572	return skcipher_walk_aead_common(walk, req, atomic);
 573}
 574EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
 575
 576static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
 577{
 578	if (alg->cra_type == &crypto_blkcipher_type)
 579		return sizeof(struct crypto_blkcipher *);
 580
 581	if (alg->cra_type == &crypto_ablkcipher_type ||
 582	    alg->cra_type == &crypto_givcipher_type)
 583		return sizeof(struct crypto_ablkcipher *);
 584
 585	return crypto_alg_extsize(alg);
 586}
 587
 588static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
 589				     const u8 *key, unsigned int keylen)
 590{
 591	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 592	struct crypto_blkcipher *blkcipher = *ctx;
 593	int err;
 
 
 594
 595	crypto_blkcipher_clear_flags(blkcipher, ~0);
 596	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
 597					      CRYPTO_TFM_REQ_MASK);
 598	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
 599	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
 600				       CRYPTO_TFM_RES_MASK);
 601	if (err)
 602		return err;
 603
 604	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 605	return 0;
 
 
 
 606}
 607
 608static int skcipher_crypt_blkcipher(struct skcipher_request *req,
 609				    int (*crypt)(struct blkcipher_desc *,
 610						 struct scatterlist *,
 611						 struct scatterlist *,
 612						 unsigned int))
 613{
 614	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 615	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 616	struct blkcipher_desc desc = {
 617		.tfm = *ctx,
 618		.info = req->iv,
 619		.flags = req->base.flags,
 620	};
 621
 
 
 622
 623	return crypt(&desc, req->dst, req->src, req->cryptlen);
 624}
 
 
 
 
 
 625
 626static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
 627{
 628	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 629	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 630	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 631
 632	return skcipher_crypt_blkcipher(req, alg->encrypt);
 633}
 
 
 634
 635static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
 636{
 637	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 638	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 639	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 640
 641	return skcipher_crypt_blkcipher(req, alg->decrypt);
 
 642}
 
 643
 644static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 645{
 646	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 
 
 647
 648	crypto_free_blkcipher(*ctx);
 649}
 650
 651static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 652{
 653	struct crypto_alg *calg = tfm->__crt_alg;
 654	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 655	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 656	struct crypto_blkcipher *blkcipher;
 657	struct crypto_tfm *btfm;
 658
 659	if (!crypto_mod_get(calg))
 660		return -EAGAIN;
 661
 662	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
 663					CRYPTO_ALG_TYPE_MASK);
 664	if (IS_ERR(btfm)) {
 665		crypto_mod_put(calg);
 666		return PTR_ERR(btfm);
 667	}
 668
 669	blkcipher = __crypto_blkcipher_cast(btfm);
 670	*ctx = blkcipher;
 671	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
 672
 673	skcipher->setkey = skcipher_setkey_blkcipher;
 674	skcipher->encrypt = skcipher_encrypt_blkcipher;
 675	skcipher->decrypt = skcipher_decrypt_blkcipher;
 676
 677	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
 678	skcipher->keysize = calg->cra_blkcipher.max_keysize;
 679
 680	if (skcipher->keysize)
 681		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
 682
 683	return 0;
 684}
 
 685
 686static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
 687				      const u8 *key, unsigned int keylen)
 688{
 689	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
 690	struct crypto_ablkcipher *ablkcipher = *ctx;
 691	int err;
 692
 693	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
 694	crypto_ablkcipher_set_flags(ablkcipher,
 695				    crypto_skcipher_get_flags(tfm) &
 696				    CRYPTO_TFM_REQ_MASK);
 697	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
 698	crypto_skcipher_set_flags(tfm,
 699				  crypto_ablkcipher_get_flags(ablkcipher) &
 700				  CRYPTO_TFM_RES_MASK);
 701	if (err)
 702		return err;
 703
 704	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 705	return 0;
 
 
 
 
 
 
 
 
 
 
 706}
 
 707
 708static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
 709				     int (*crypt)(struct ablkcipher_request *))
 710{
 711	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 712	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
 713	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
 714
 715	ablkcipher_request_set_tfm(subreq, *ctx);
 716	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
 717					req->base.complete, req->base.data);
 718	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 719				     req->iv);
 720
 721	return crypt(subreq);
 
 
 
 722}
 723
 724static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
 725{
 726	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 727	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 728	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 729
 730	return skcipher_crypt_ablkcipher(req, alg->encrypt);
 731}
 732
 733static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
 734{
 735	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 736	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 737	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 738
 739	return skcipher_crypt_ablkcipher(req, alg->decrypt);
 740}
 741
 742static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 743{
 744	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
 745
 746	crypto_free_ablkcipher(*ctx);
 747}
 748
 749static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 750{
 751	struct crypto_alg *calg = tfm->__crt_alg;
 752	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 753	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
 754	struct crypto_ablkcipher *ablkcipher;
 755	struct crypto_tfm *abtfm;
 756
 757	if (!crypto_mod_get(calg))
 758		return -EAGAIN;
 759
 760	abtfm = __crypto_alloc_tfm(calg, 0, 0);
 761	if (IS_ERR(abtfm)) {
 762		crypto_mod_put(calg);
 763		return PTR_ERR(abtfm);
 764	}
 765
 766	ablkcipher = __crypto_ablkcipher_cast(abtfm);
 767	*ctx = ablkcipher;
 768	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
 769
 770	skcipher->setkey = skcipher_setkey_ablkcipher;
 771	skcipher->encrypt = skcipher_encrypt_ablkcipher;
 772	skcipher->decrypt = skcipher_decrypt_ablkcipher;
 773
 774	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 775	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
 776			    sizeof(struct ablkcipher_request);
 777	skcipher->keysize = calg->cra_ablkcipher.max_keysize;
 778
 779	if (skcipher->keysize)
 780		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
 781
 782	return 0;
 783}
 784
 785static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
 786				     const u8 *key, unsigned int keylen)
 787{
 788	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 789	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 790	u8 *buffer, *alignbuffer;
 791	unsigned long absize;
 792	int ret;
 793
 794	absize = keylen + alignmask;
 795	buffer = kmalloc(absize, GFP_ATOMIC);
 796	if (!buffer)
 797		return -ENOMEM;
 798
 799	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 800	memcpy(alignbuffer, key, keylen);
 801	ret = cipher->setkey(tfm, alignbuffer, keylen);
 802	kzfree(buffer);
 803	return ret;
 804}
 
 805
 806static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 807			   unsigned int keylen)
 808{
 809	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 810	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 811	int err;
 812
 813	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 814		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 815		return -EINVAL;
 816	}
 817
 818	if ((unsigned long)key & alignmask)
 819		err = skcipher_setkey_unaligned(tfm, key, keylen);
 820	else
 821		err = cipher->setkey(tfm, key, keylen);
 822
 823	if (err)
 824		return err;
 825
 826	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 827	return 0;
 
 828}
 
 829
 830static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 831{
 832	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 833	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 834
 835	alg->exit(skcipher);
 836}
 837
 838static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
 839{
 840	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 841	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 842
 843	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
 844		return crypto_init_skcipher_ops_blkcipher(tfm);
 845
 846	if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
 847	    tfm->__crt_alg->cra_type == &crypto_givcipher_type)
 848		return crypto_init_skcipher_ops_ablkcipher(tfm);
 849
 850	skcipher->setkey = skcipher_setkey;
 851	skcipher->encrypt = alg->encrypt;
 852	skcipher->decrypt = alg->decrypt;
 853	skcipher->ivsize = alg->ivsize;
 854	skcipher->keysize = alg->max_keysize;
 855
 856	if (skcipher->keysize)
 857		crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
 858
 859	if (alg->exit)
 860		skcipher->base.exit = crypto_skcipher_exit_tfm;
 861
 862	if (alg->init)
 863		return alg->init(skcipher);
 864
 865	return 0;
 866}
 867
 
 
 
 
 
 
 
 
 868static void crypto_skcipher_free_instance(struct crypto_instance *inst)
 869{
 870	struct skcipher_instance *skcipher =
 871		container_of(inst, struct skcipher_instance, s.base);
 872
 873	skcipher->free(skcipher);
 874}
 875
 876static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 877	__maybe_unused;
 878static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 879{
 880	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
 881						     base);
 882
 883	seq_printf(m, "type         : skcipher\n");
 884	seq_printf(m, "async        : %s\n",
 885		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
 886	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 887	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
 888	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
 889	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
 890	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
 891	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 
 892}
 893
 894#ifdef CONFIG_NET
 895static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 896{
 
 897	struct crypto_report_blkcipher rblkcipher;
 898	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
 899						     base);
 900
 901	strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
 902	strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
 
 
 903
 904	rblkcipher.blocksize = alg->cra_blocksize;
 905	rblkcipher.min_keysize = skcipher->min_keysize;
 906	rblkcipher.max_keysize = skcipher->max_keysize;
 907	rblkcipher.ivsize = skcipher->ivsize;
 908
 909	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 910		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
 911		goto nla_put_failure;
 912	return 0;
 913
 914nla_put_failure:
 915	return -EMSGSIZE;
 916}
 917#else
 918static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 
 919{
 920	return -ENOSYS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 921}
 922#endif
 923
 924static const struct crypto_type crypto_skcipher_type2 = {
 925	.extsize = crypto_skcipher_extsize,
 926	.init_tfm = crypto_skcipher_init_tfm,
 927	.free = crypto_skcipher_free_instance,
 928#ifdef CONFIG_PROC_FS
 929	.show = crypto_skcipher_show,
 930#endif
 
 931	.report = crypto_skcipher_report,
 
 
 
 
 932	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
 933	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
 934	.type = CRYPTO_ALG_TYPE_SKCIPHER,
 935	.tfmsize = offsetof(struct crypto_skcipher, base),
 936};
 937
 938int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
 939			  const char *name, u32 type, u32 mask)
 
 940{
 941	spawn->base.frontend = &crypto_skcipher_type2;
 942	return crypto_grab_spawn(&spawn->base, name, type, mask);
 943}
 944EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 945
 946struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
 947					      u32 type, u32 mask)
 948{
 949	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
 950}
 951EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
 952
 953int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
 
 954{
 955	return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
 956				   type, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957}
 958EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
 959
 960static int skcipher_prepare_alg(struct skcipher_alg *alg)
 961{
 
 
 
 
 
 
 
 962	struct crypto_alg *base = &alg->base;
 963
 964	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
 965	    alg->walksize > PAGE_SIZE / 8)
 
 966		return -EINVAL;
 967
 968	if (!alg->chunksize)
 969		alg->chunksize = base->cra_blocksize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 970	if (!alg->walksize)
 971		alg->walksize = alg->chunksize;
 972
 973	base->cra_type = &crypto_skcipher_type2;
 974	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 
 
 
 
 
 975	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
 976
 977	return 0;
 978}
 979
 980int crypto_register_skcipher(struct skcipher_alg *alg)
 981{
 982	struct crypto_alg *base = &alg->base;
 983	int err;
 984
 985	err = skcipher_prepare_alg(alg);
 986	if (err)
 987		return err;
 988
 989	return crypto_register_alg(base);
 990}
 991EXPORT_SYMBOL_GPL(crypto_register_skcipher);
 992
 993void crypto_unregister_skcipher(struct skcipher_alg *alg)
 994{
 995	crypto_unregister_alg(&alg->base);
 996}
 997EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
 998
 999int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1000{
1001	int i, ret;
1002
1003	for (i = 0; i < count; i++) {
1004		ret = crypto_register_skcipher(&algs[i]);
1005		if (ret)
1006			goto err;
1007	}
1008
1009	return 0;
1010
1011err:
1012	for (--i; i >= 0; --i)
1013		crypto_unregister_skcipher(&algs[i]);
1014
1015	return ret;
1016}
1017EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1018
1019void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1020{
1021	int i;
1022
1023	for (i = count - 1; i >= 0; --i)
1024		crypto_unregister_skcipher(&algs[i]);
1025}
1026EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1027
1028int skcipher_register_instance(struct crypto_template *tmpl,
1029			   struct skcipher_instance *inst)
1030{
1031	int err;
1032
 
 
 
1033	err = skcipher_prepare_alg(&inst->alg);
1034	if (err)
1035		return err;
1036
1037	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1038}
1039EXPORT_SYMBOL_GPL(skcipher_register_instance);
1040
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041MODULE_LICENSE("GPL");
1042MODULE_DESCRIPTION("Symmetric key cipher type");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Symmetric key cipher operations.
   4 *
   5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   6 * multiple page boundaries by using temporary blocks.  In user context,
   7 * the kernel is given a chance to schedule us once per page.
   8 *
   9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
 
 
 
 
 
 
  10 */
  11
  12#include <crypto/internal/aead.h>
  13#include <crypto/internal/cipher.h>
  14#include <crypto/internal/skcipher.h>
  15#include <crypto/scatterwalk.h>
  16#include <linux/bug.h>
  17#include <linux/cryptouser.h>
  18#include <linux/err.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/mm.h>
  22#include <linux/module.h>
 
  23#include <linux/seq_file.h>
  24#include <linux/slab.h>
  25#include <linux/string.h>
  26#include <net/netlink.h>
  27#include "skcipher.h"
  28
  29#define CRYPTO_ALG_TYPE_SKCIPHER_MASK	0x0000000e
  30
  31enum {
  32	SKCIPHER_WALK_PHYS = 1 << 0,
  33	SKCIPHER_WALK_SLOW = 1 << 1,
  34	SKCIPHER_WALK_COPY = 1 << 2,
  35	SKCIPHER_WALK_DIFF = 1 << 3,
  36	SKCIPHER_WALK_SLEEP = 1 << 4,
  37};
  38
  39struct skcipher_walk_buffer {
  40	struct list_head entry;
  41	struct scatter_walk dst;
  42	unsigned int len;
  43	u8 *data;
  44	u8 buffer[];
  45};
  46
  47static const struct crypto_type crypto_skcipher_type;
 
 
 
 
 
 
 
 
 
 
  48
  49static int skcipher_walk_next(struct skcipher_walk *walk);
 
 
  50
  51static inline void skcipher_map_src(struct skcipher_walk *walk)
  52{
  53	walk->src.virt.addr = scatterwalk_map(&walk->in);
  54}
  55
  56static inline void skcipher_map_dst(struct skcipher_walk *walk)
  57{
  58	walk->dst.virt.addr = scatterwalk_map(&walk->out);
  59}
  60
  61static inline void skcipher_unmap_src(struct skcipher_walk *walk)
  62{
  63	scatterwalk_unmap(walk->src.virt.addr);
  64}
  65
  66static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
  67{
  68	scatterwalk_unmap(walk->dst.virt.addr);
  69}
  70
  71static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
  72{
  73	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  74}
  75
  76/* Get a spot of the specified length that does not straddle a page.
  77 * The caller needs to ensure that there is enough space for this operation.
  78 */
  79static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
  80{
  81	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  82
  83	return max(start, end_page);
  84}
  85
  86static inline struct skcipher_alg *__crypto_skcipher_alg(
  87	struct crypto_alg *alg)
  88{
  89	return container_of(alg, struct skcipher_alg, base);
  90}
  91
  92static inline struct crypto_istat_cipher *skcipher_get_stat(
  93	struct skcipher_alg *alg)
  94{
  95	return skcipher_get_stat_common(&alg->co);
  96}
  97
  98static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
  99{
 100	struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
 101
 102	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
 103		return err;
 104
 105	if (err && err != -EINPROGRESS && err != -EBUSY)
 106		atomic64_inc(&istat->err_cnt);
 107
 108	return err;
 109}
 110
 111static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 112{
 113	u8 *addr;
 114
 115	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 116	addr = skcipher_get_spot(addr, bsize);
 117	scatterwalk_copychunks(addr, &walk->out, bsize,
 118			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
 119	return 0;
 120}
 121
 122int skcipher_walk_done(struct skcipher_walk *walk, int err)
 123{
 124	unsigned int n = walk->nbytes;
 125	unsigned int nbytes = 0;
 126
 127	if (!n)
 128		goto finish;
 129
 130	if (likely(err >= 0)) {
 131		n -= err;
 132		nbytes = walk->total - n;
 133	}
 134
 135	if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
 136				    SKCIPHER_WALK_SLOW |
 137				    SKCIPHER_WALK_COPY |
 138				    SKCIPHER_WALK_DIFF)))) {
 
 
 
 139unmap_src:
 140		skcipher_unmap_src(walk);
 141	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
 142		skcipher_unmap_dst(walk);
 143		goto unmap_src;
 144	} else if (walk->flags & SKCIPHER_WALK_COPY) {
 145		skcipher_map_dst(walk);
 146		memcpy(walk->dst.virt.addr, walk->page, n);
 147		skcipher_unmap_dst(walk);
 148	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
 149		if (err > 0) {
 150			/*
 151			 * Didn't process all bytes.  Either the algorithm is
 152			 * broken, or this was the last step and it turned out
 153			 * the message wasn't evenly divisible into blocks but
 154			 * the algorithm requires it.
 155			 */
 156			err = -EINVAL;
 157			nbytes = 0;
 158		} else
 159			n = skcipher_done_slow(walk, n);
 160	}
 161
 162	if (err > 0)
 163		err = 0;
 164
 165	walk->total = nbytes;
 166	walk->nbytes = 0;
 167
 168	scatterwalk_advance(&walk->in, n);
 169	scatterwalk_advance(&walk->out, n);
 170	scatterwalk_done(&walk->in, 0, nbytes);
 171	scatterwalk_done(&walk->out, 1, nbytes);
 172
 173	if (nbytes) {
 174		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
 175			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
 176		return skcipher_walk_next(walk);
 177	}
 178
 179finish:
 180	/* Short-circuit for the common/fast path. */
 181	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
 182		goto out;
 183
 184	if (walk->flags & SKCIPHER_WALK_PHYS)
 185		goto out;
 186
 187	if (walk->iv != walk->oiv)
 188		memcpy(walk->oiv, walk->iv, walk->ivsize);
 189	if (walk->buffer != walk->page)
 190		kfree(walk->buffer);
 191	if (walk->page)
 192		free_page((unsigned long)walk->page);
 193
 194out:
 195	return err;
 196}
 197EXPORT_SYMBOL_GPL(skcipher_walk_done);
 198
 199void skcipher_walk_complete(struct skcipher_walk *walk, int err)
 200{
 201	struct skcipher_walk_buffer *p, *tmp;
 202
 203	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 204		u8 *data;
 205
 206		if (err)
 207			goto done;
 208
 209		data = p->data;
 210		if (!data) {
 211			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
 212			data = skcipher_get_spot(data, walk->stride);
 213		}
 214
 215		scatterwalk_copychunks(data, &p->dst, p->len, 1);
 216
 217		if (offset_in_page(p->data) + p->len + walk->stride >
 218		    PAGE_SIZE)
 219			free_page((unsigned long)p->data);
 220
 221done:
 222		list_del(&p->entry);
 223		kfree(p);
 224	}
 225
 226	if (!err && walk->iv != walk->oiv)
 227		memcpy(walk->oiv, walk->iv, walk->ivsize);
 228	if (walk->buffer != walk->page)
 229		kfree(walk->buffer);
 230	if (walk->page)
 231		free_page((unsigned long)walk->page);
 232}
 233EXPORT_SYMBOL_GPL(skcipher_walk_complete);
 234
 235static void skcipher_queue_write(struct skcipher_walk *walk,
 236				 struct skcipher_walk_buffer *p)
 237{
 238	p->dst = walk->out;
 239	list_add_tail(&p->entry, &walk->buffers);
 240}
 241
 242static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
 243{
 244	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
 245	unsigned alignmask = walk->alignmask;
 246	struct skcipher_walk_buffer *p;
 247	unsigned a;
 248	unsigned n;
 249	u8 *buffer;
 250	void *v;
 251
 252	if (!phys) {
 253		if (!walk->buffer)
 254			walk->buffer = walk->page;
 255		buffer = walk->buffer;
 256		if (buffer)
 257			goto ok;
 258	}
 259
 260	/* Start with the minimum alignment of kmalloc. */
 261	a = crypto_tfm_ctx_alignment() - 1;
 262	n = bsize;
 263
 264	if (phys) {
 265		/* Calculate the minimum alignment of p->buffer. */
 266		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
 267		n += sizeof(*p);
 268	}
 269
 270	/* Minimum size to align p->buffer by alignmask. */
 271	n += alignmask & ~a;
 272
 273	/* Minimum size to ensure p->buffer does not straddle a page. */
 274	n += (bsize - 1) & ~(alignmask | a);
 275
 276	v = kzalloc(n, skcipher_walk_gfp(walk));
 277	if (!v)
 278		return skcipher_walk_done(walk, -ENOMEM);
 279
 280	if (phys) {
 281		p = v;
 282		p->len = bsize;
 283		skcipher_queue_write(walk, p);
 284		buffer = p->buffer;
 285	} else {
 286		walk->buffer = v;
 287		buffer = v;
 288	}
 289
 290ok:
 291	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
 292	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
 293	walk->src.virt.addr = walk->dst.virt.addr;
 294
 295	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 296
 297	walk->nbytes = bsize;
 298	walk->flags |= SKCIPHER_WALK_SLOW;
 299
 300	return 0;
 301}
 302
 303static int skcipher_next_copy(struct skcipher_walk *walk)
 304{
 305	struct skcipher_walk_buffer *p;
 306	u8 *tmp = walk->page;
 307
 308	skcipher_map_src(walk);
 309	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
 310	skcipher_unmap_src(walk);
 311
 312	walk->src.virt.addr = tmp;
 313	walk->dst.virt.addr = tmp;
 314
 315	if (!(walk->flags & SKCIPHER_WALK_PHYS))
 316		return 0;
 317
 318	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
 319	if (!p)
 320		return -ENOMEM;
 321
 322	p->data = walk->page;
 323	p->len = walk->nbytes;
 324	skcipher_queue_write(walk, p);
 325
 326	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
 327	    PAGE_SIZE)
 328		walk->page = NULL;
 329	else
 330		walk->page += walk->nbytes;
 331
 332	return 0;
 333}
 334
 335static int skcipher_next_fast(struct skcipher_walk *walk)
 336{
 337	unsigned long diff;
 338
 339	walk->src.phys.page = scatterwalk_page(&walk->in);
 340	walk->src.phys.offset = offset_in_page(walk->in.offset);
 341	walk->dst.phys.page = scatterwalk_page(&walk->out);
 342	walk->dst.phys.offset = offset_in_page(walk->out.offset);
 343
 344	if (walk->flags & SKCIPHER_WALK_PHYS)
 345		return 0;
 346
 347	diff = walk->src.phys.offset - walk->dst.phys.offset;
 348	diff |= walk->src.virt.page - walk->dst.virt.page;
 349
 350	skcipher_map_src(walk);
 351	walk->dst.virt.addr = walk->src.virt.addr;
 352
 353	if (diff) {
 354		walk->flags |= SKCIPHER_WALK_DIFF;
 355		skcipher_map_dst(walk);
 356	}
 357
 358	return 0;
 359}
 360
 361static int skcipher_walk_next(struct skcipher_walk *walk)
 362{
 363	unsigned int bsize;
 364	unsigned int n;
 365	int err;
 366
 367	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
 368			 SKCIPHER_WALK_DIFF);
 369
 370	n = walk->total;
 371	bsize = min(walk->stride, max(n, walk->blocksize));
 372	n = scatterwalk_clamp(&walk->in, n);
 373	n = scatterwalk_clamp(&walk->out, n);
 374
 375	if (unlikely(n < bsize)) {
 376		if (unlikely(walk->total < walk->blocksize))
 377			return skcipher_walk_done(walk, -EINVAL);
 378
 379slow_path:
 380		err = skcipher_next_slow(walk, bsize);
 381		goto set_phys_lowmem;
 382	}
 383
 384	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
 385		if (!walk->page) {
 386			gfp_t gfp = skcipher_walk_gfp(walk);
 387
 388			walk->page = (void *)__get_free_page(gfp);
 389			if (!walk->page)
 390				goto slow_path;
 391		}
 392
 393		walk->nbytes = min_t(unsigned, n,
 394				     PAGE_SIZE - offset_in_page(walk->page));
 395		walk->flags |= SKCIPHER_WALK_COPY;
 396		err = skcipher_next_copy(walk);
 397		goto set_phys_lowmem;
 398	}
 399
 400	walk->nbytes = n;
 401
 402	return skcipher_next_fast(walk);
 403
 404set_phys_lowmem:
 405	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
 406		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
 407		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
 408		walk->src.phys.offset &= PAGE_SIZE - 1;
 409		walk->dst.phys.offset &= PAGE_SIZE - 1;
 410	}
 411	return err;
 412}
 
 413
 414static int skcipher_copy_iv(struct skcipher_walk *walk)
 415{
 416	unsigned a = crypto_tfm_ctx_alignment() - 1;
 417	unsigned alignmask = walk->alignmask;
 418	unsigned ivsize = walk->ivsize;
 419	unsigned bs = walk->stride;
 420	unsigned aligned_bs;
 421	unsigned size;
 422	u8 *iv;
 423
 424	aligned_bs = ALIGN(bs, alignmask + 1);
 425
 426	/* Minimum size to align buffer by alignmask. */
 427	size = alignmask & ~a;
 428
 429	if (walk->flags & SKCIPHER_WALK_PHYS)
 430		size += ivsize;
 431	else {
 432		size += aligned_bs + ivsize;
 433
 434		/* Minimum size to ensure buffer does not straddle a page. */
 435		size += (bs - 1) & ~(alignmask | a);
 436	}
 437
 438	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
 439	if (!walk->buffer)
 440		return -ENOMEM;
 441
 442	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
 443	iv = skcipher_get_spot(iv, bs) + aligned_bs;
 444
 445	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
 446	return 0;
 447}
 448
 449static int skcipher_walk_first(struct skcipher_walk *walk)
 450{
 451	if (WARN_ON_ONCE(in_hardirq()))
 452		return -EDEADLK;
 453
 454	walk->buffer = NULL;
 455	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 456		int err = skcipher_copy_iv(walk);
 457		if (err)
 458			return err;
 459	}
 460
 461	walk->page = NULL;
 
 462
 463	return skcipher_walk_next(walk);
 464}
 465
 466static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 467				  struct skcipher_request *req)
 468{
 469	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 470	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 471
 472	walk->total = req->cryptlen;
 473	walk->nbytes = 0;
 474	walk->iv = req->iv;
 475	walk->oiv = req->iv;
 476
 477	if (unlikely(!walk->total))
 478		return 0;
 479
 480	scatterwalk_start(&walk->in, req->src);
 481	scatterwalk_start(&walk->out, req->dst);
 482
 483	walk->flags &= ~SKCIPHER_WALK_SLEEP;
 484	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 485		       SKCIPHER_WALK_SLEEP : 0;
 486
 487	walk->blocksize = crypto_skcipher_blocksize(tfm);
 
 488	walk->ivsize = crypto_skcipher_ivsize(tfm);
 489	walk->alignmask = crypto_skcipher_alignmask(tfm);
 490
 491	if (alg->co.base.cra_type != &crypto_skcipher_type)
 492		walk->stride = alg->co.chunksize;
 493	else
 494		walk->stride = alg->walksize;
 495
 496	return skcipher_walk_first(walk);
 497}
 498
 499int skcipher_walk_virt(struct skcipher_walk *walk,
 500		       struct skcipher_request *req, bool atomic)
 501{
 502	int err;
 503
 504	might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 505
 506	walk->flags &= ~SKCIPHER_WALK_PHYS;
 507
 508	err = skcipher_walk_skcipher(walk, req);
 509
 510	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
 511
 512	return err;
 513}
 514EXPORT_SYMBOL_GPL(skcipher_walk_virt);
 515
 
 
 
 
 
 
 516int skcipher_walk_async(struct skcipher_walk *walk,
 517			struct skcipher_request *req)
 518{
 519	walk->flags |= SKCIPHER_WALK_PHYS;
 520
 521	INIT_LIST_HEAD(&walk->buffers);
 522
 523	return skcipher_walk_skcipher(walk, req);
 524}
 525EXPORT_SYMBOL_GPL(skcipher_walk_async);
 526
 527static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 528				     struct aead_request *req, bool atomic)
 529{
 530	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 531	int err;
 532
 533	walk->nbytes = 0;
 534	walk->iv = req->iv;
 535	walk->oiv = req->iv;
 536
 537	if (unlikely(!walk->total))
 538		return 0;
 539
 540	walk->flags &= ~SKCIPHER_WALK_PHYS;
 541
 542	scatterwalk_start(&walk->in, req->src);
 543	scatterwalk_start(&walk->out, req->dst);
 544
 545	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
 546	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 547
 548	scatterwalk_done(&walk->in, 0, walk->total);
 549	scatterwalk_done(&walk->out, 0, walk->total);
 550
 551	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
 552		walk->flags |= SKCIPHER_WALK_SLEEP;
 553	else
 554		walk->flags &= ~SKCIPHER_WALK_SLEEP;
 555
 556	walk->blocksize = crypto_aead_blocksize(tfm);
 557	walk->stride = crypto_aead_chunksize(tfm);
 558	walk->ivsize = crypto_aead_ivsize(tfm);
 559	walk->alignmask = crypto_aead_alignmask(tfm);
 560
 561	err = skcipher_walk_first(walk);
 562
 563	if (atomic)
 564		walk->flags &= ~SKCIPHER_WALK_SLEEP;
 565
 566	return err;
 567}
 568
 
 
 
 
 
 
 
 
 
 569int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
 570			       struct aead_request *req, bool atomic)
 571{
 572	walk->total = req->cryptlen;
 573
 574	return skcipher_walk_aead_common(walk, req, atomic);
 575}
 576EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
 577
 578int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
 579			       struct aead_request *req, bool atomic)
 580{
 581	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 582
 583	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
 584
 585	return skcipher_walk_aead_common(walk, req, atomic);
 586}
 587EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
 588
 589static void skcipher_set_needkey(struct crypto_skcipher *tfm)
 590{
 591	if (crypto_skcipher_max_keysize(tfm) != 0)
 592		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
 
 
 
 
 
 
 593}
 594
 595static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
 596				     const u8 *key, unsigned int keylen)
 597{
 598	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 599	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 600	u8 *buffer, *alignbuffer;
 601	unsigned long absize;
 602	int ret;
 603
 604	absize = keylen + alignmask;
 605	buffer = kmalloc(absize, GFP_ATOMIC);
 606	if (!buffer)
 607		return -ENOMEM;
 
 
 
 
 608
 609	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 610	memcpy(alignbuffer, key, keylen);
 611	ret = cipher->setkey(tfm, alignbuffer, keylen);
 612	kfree_sensitive(buffer);
 613	return ret;
 614}
 615
 616int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 617			   unsigned int keylen)
 
 
 
 618{
 619	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 620	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 621	int err;
 
 
 
 
 622
 623	if (cipher->co.base.cra_type != &crypto_skcipher_type) {
 624		struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
 625
 626		crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
 627		crypto_lskcipher_set_flags(*ctx,
 628					   crypto_skcipher_get_flags(tfm) &
 629					   CRYPTO_TFM_REQ_MASK);
 630		err = crypto_lskcipher_setkey(*ctx, key, keylen);
 631		goto out;
 632	}
 633
 634	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
 635		return -EINVAL;
 
 
 
 636
 637	if ((unsigned long)key & alignmask)
 638		err = skcipher_setkey_unaligned(tfm, key, keylen);
 639	else
 640		err = cipher->setkey(tfm, key, keylen);
 641
 642out:
 643	if (unlikely(err)) {
 644		skcipher_set_needkey(tfm);
 645		return err;
 646	}
 647
 648	crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 649	return 0;
 650}
 651EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
 652
 653int crypto_skcipher_encrypt(struct skcipher_request *req)
 654{
 655	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 656	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 657	int ret;
 658
 659	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
 660		struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
 661
 662		atomic64_inc(&istat->encrypt_cnt);
 663		atomic64_add(req->cryptlen, &istat->encrypt_tlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664	}
 665
 666	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
 667		ret = -ENOKEY;
 668	else if (alg->co.base.cra_type != &crypto_skcipher_type)
 669		ret = crypto_lskcipher_encrypt_sg(req);
 670	else
 671		ret = alg->encrypt(req);
 
 
 
 
 
 
 
 672
 673	return crypto_skcipher_errstat(alg, ret);
 674}
 675EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
 676
 677int crypto_skcipher_decrypt(struct skcipher_request *req)
 
 678{
 679	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 680	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 681	int ret;
 682
 683	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
 684		struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
 
 
 
 
 
 
 
 
 685
 686		atomic64_inc(&istat->decrypt_cnt);
 687		atomic64_add(req->cryptlen, &istat->decrypt_tlen);
 688	}
 689
 690	if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
 691		ret = -ENOKEY;
 692	else if (alg->co.base.cra_type != &crypto_skcipher_type)
 693		ret = crypto_lskcipher_decrypt_sg(req);
 694	else
 695		ret = alg->decrypt(req);
 696
 697	return crypto_skcipher_errstat(alg, ret);
 698}
 699EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
 700
 701static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
 
 702{
 703	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 704	u8 *ivs = skcipher_request_ctx(req);
 
 705
 706	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
 
 
 
 
 707
 708	memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
 709	       crypto_skcipher_statesize(tfm));
 710
 711	return 0;
 712}
 713
 714static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
 715{
 716	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 717	u8 *ivs = skcipher_request_ctx(req);
 
 718
 719	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
 
 720
 721	memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
 722	       crypto_skcipher_statesize(tfm));
 
 
 
 723
 724	return 0;
 725}
 726
 727static int skcipher_noexport(struct skcipher_request *req, void *out)
 728{
 729	return 0;
 
 
 730}
 731
 732static int skcipher_noimport(struct skcipher_request *req, const void *in)
 733{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 734	return 0;
 735}
 736
 737int crypto_skcipher_export(struct skcipher_request *req, void *out)
 
 738{
 739	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 740	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 
 
 
 
 
 
 
 
 741
 742	if (alg->co.base.cra_type != &crypto_skcipher_type)
 743		return crypto_lskcipher_export(req, out);
 744	return alg->export(req, out);
 
 
 745}
 746EXPORT_SYMBOL_GPL(crypto_skcipher_export);
 747
 748int crypto_skcipher_import(struct skcipher_request *req, const void *in)
 
 749{
 750	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 751	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752
 753	if (alg->co.base.cra_type != &crypto_skcipher_type)
 754		return crypto_lskcipher_import(req, in);
 755	return alg->import(req, in);
 756}
 757EXPORT_SYMBOL_GPL(crypto_skcipher_import);
 758
 759static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 760{
 761	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 762	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 763
 764	alg->exit(skcipher);
 765}
 766
 767static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
 768{
 769	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 770	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 771
 772	skcipher_set_needkey(skcipher);
 
 773
 774	if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
 775		unsigned am = crypto_skcipher_alignmask(skcipher);
 776		unsigned reqsize;
 777
 778		reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
 779		reqsize += crypto_skcipher_ivsize(skcipher);
 780		reqsize += crypto_skcipher_statesize(skcipher);
 781		crypto_skcipher_set_reqsize(skcipher, reqsize);
 
 782
 783		return crypto_init_lskcipher_ops_sg(tfm);
 784	}
 785
 786	if (alg->exit)
 787		skcipher->base.exit = crypto_skcipher_exit_tfm;
 788
 789	if (alg->init)
 790		return alg->init(skcipher);
 791
 792	return 0;
 793}
 794
 795static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
 796{
 797	if (alg->cra_type != &crypto_skcipher_type)
 798		return sizeof(struct crypto_lskcipher *);
 799
 800	return crypto_alg_extsize(alg);
 801}
 802
 803static void crypto_skcipher_free_instance(struct crypto_instance *inst)
 804{
 805	struct skcipher_instance *skcipher =
 806		container_of(inst, struct skcipher_instance, s.base);
 807
 808	skcipher->free(skcipher);
 809}
 810
 811static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 812	__maybe_unused;
 813static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 814{
 815	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
 
 816
 817	seq_printf(m, "type         : skcipher\n");
 818	seq_printf(m, "async        : %s\n",
 819		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
 820	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 821	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
 822	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
 823	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
 824	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
 825	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 826	seq_printf(m, "statesize    : %u\n", skcipher->statesize);
 827}
 828
 829static int __maybe_unused crypto_skcipher_report(
 830	struct sk_buff *skb, struct crypto_alg *alg)
 831{
 832	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
 833	struct crypto_report_blkcipher rblkcipher;
 
 
 834
 835	memset(&rblkcipher, 0, sizeof(rblkcipher));
 836
 837	strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
 838	strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
 839
 840	rblkcipher.blocksize = alg->cra_blocksize;
 841	rblkcipher.min_keysize = skcipher->min_keysize;
 842	rblkcipher.max_keysize = skcipher->max_keysize;
 843	rblkcipher.ivsize = skcipher->ivsize;
 844
 845	return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 846		       sizeof(rblkcipher), &rblkcipher);
 
 
 
 
 
 847}
 848
 849static int __maybe_unused crypto_skcipher_report_stat(
 850	struct sk_buff *skb, struct crypto_alg *alg)
 851{
 852	struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
 853	struct crypto_istat_cipher *istat;
 854	struct crypto_stat_cipher rcipher;
 855
 856	istat = skcipher_get_stat(skcipher);
 857
 858	memset(&rcipher, 0, sizeof(rcipher));
 859
 860	strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
 861
 862	rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
 863	rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
 864	rcipher.stat_decrypt_cnt =  atomic64_read(&istat->decrypt_cnt);
 865	rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
 866	rcipher.stat_err_cnt =  atomic64_read(&istat->err_cnt);
 867
 868	return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
 869}
 
 870
 871static const struct crypto_type crypto_skcipher_type = {
 872	.extsize = crypto_skcipher_extsize,
 873	.init_tfm = crypto_skcipher_init_tfm,
 874	.free = crypto_skcipher_free_instance,
 875#ifdef CONFIG_PROC_FS
 876	.show = crypto_skcipher_show,
 877#endif
 878#if IS_ENABLED(CONFIG_CRYPTO_USER)
 879	.report = crypto_skcipher_report,
 880#endif
 881#ifdef CONFIG_CRYPTO_STATS
 882	.report_stat = crypto_skcipher_report_stat,
 883#endif
 884	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
 885	.maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
 886	.type = CRYPTO_ALG_TYPE_SKCIPHER,
 887	.tfmsize = offsetof(struct crypto_skcipher, base),
 888};
 889
 890int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
 891			 struct crypto_instance *inst,
 892			 const char *name, u32 type, u32 mask)
 893{
 894	spawn->base.frontend = &crypto_skcipher_type;
 895	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
 896}
 897EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 898
 899struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
 900					      u32 type, u32 mask)
 901{
 902	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
 903}
 904EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
 905
 906struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
 907				const char *alg_name, u32 type, u32 mask)
 908{
 909	struct crypto_skcipher *tfm;
 910
 911	/* Only sync algorithms allowed. */
 912	mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
 913
 914	tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
 915
 916	/*
 917	 * Make sure we do not allocate something that might get used with
 918	 * an on-stack request: check the request size.
 919	 */
 920	if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
 921				    MAX_SYNC_SKCIPHER_REQSIZE)) {
 922		crypto_free_skcipher(tfm);
 923		return ERR_PTR(-EINVAL);
 924	}
 925
 926	return (struct crypto_sync_skcipher *)tfm;
 927}
 928EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
 929
 930int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
 931{
 932	return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
 933}
 934EXPORT_SYMBOL_GPL(crypto_has_skcipher);
 935
 936int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
 937{
 938	struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
 939	struct crypto_alg *base = &alg->base;
 940
 941	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
 942	    alg->statesize > PAGE_SIZE / 2 ||
 943	    (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
 944		return -EINVAL;
 945
 946	if (!alg->chunksize)
 947		alg->chunksize = base->cra_blocksize;
 948
 949	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 950
 951	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
 952		memset(istat, 0, sizeof(*istat));
 953
 954	return 0;
 955}
 956
 957static int skcipher_prepare_alg(struct skcipher_alg *alg)
 958{
 959	struct crypto_alg *base = &alg->base;
 960	int err;
 961
 962	err = skcipher_prepare_alg_common(&alg->co);
 963	if (err)
 964		return err;
 965
 966	if (alg->walksize > PAGE_SIZE / 8)
 967		return -EINVAL;
 968
 969	if (!alg->walksize)
 970		alg->walksize = alg->chunksize;
 971
 972	if (!alg->statesize) {
 973		alg->import = skcipher_noimport;
 974		alg->export = skcipher_noexport;
 975	} else if (!(alg->import && alg->export))
 976		return -EINVAL;
 977
 978	base->cra_type = &crypto_skcipher_type;
 979	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
 980
 981	return 0;
 982}
 983
 984int crypto_register_skcipher(struct skcipher_alg *alg)
 985{
 986	struct crypto_alg *base = &alg->base;
 987	int err;
 988
 989	err = skcipher_prepare_alg(alg);
 990	if (err)
 991		return err;
 992
 993	return crypto_register_alg(base);
 994}
 995EXPORT_SYMBOL_GPL(crypto_register_skcipher);
 996
 997void crypto_unregister_skcipher(struct skcipher_alg *alg)
 998{
 999	crypto_unregister_alg(&alg->base);
1000}
1001EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1002
1003int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1004{
1005	int i, ret;
1006
1007	for (i = 0; i < count; i++) {
1008		ret = crypto_register_skcipher(&algs[i]);
1009		if (ret)
1010			goto err;
1011	}
1012
1013	return 0;
1014
1015err:
1016	for (--i; i >= 0; --i)
1017		crypto_unregister_skcipher(&algs[i]);
1018
1019	return ret;
1020}
1021EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1022
1023void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1024{
1025	int i;
1026
1027	for (i = count - 1; i >= 0; --i)
1028		crypto_unregister_skcipher(&algs[i]);
1029}
1030EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1031
1032int skcipher_register_instance(struct crypto_template *tmpl,
1033			   struct skcipher_instance *inst)
1034{
1035	int err;
1036
1037	if (WARN_ON(!inst->free))
1038		return -EINVAL;
1039
1040	err = skcipher_prepare_alg(&inst->alg);
1041	if (err)
1042		return err;
1043
1044	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1045}
1046EXPORT_SYMBOL_GPL(skcipher_register_instance);
1047
1048static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1049				  unsigned int keylen)
1050{
1051	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1052
1053	crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1054	crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1055				CRYPTO_TFM_REQ_MASK);
1056	return crypto_cipher_setkey(cipher, key, keylen);
1057}
1058
1059static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1060{
1061	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1062	struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
1063	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1064	struct crypto_cipher *cipher;
1065
1066	cipher = crypto_spawn_cipher(spawn);
1067	if (IS_ERR(cipher))
1068		return PTR_ERR(cipher);
1069
1070	ctx->cipher = cipher;
1071	return 0;
1072}
1073
1074static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1075{
1076	struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1077
1078	crypto_free_cipher(ctx->cipher);
1079}
1080
1081static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1082{
1083	crypto_drop_cipher(skcipher_instance_ctx(inst));
1084	kfree(inst);
1085}
1086
1087/**
1088 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1089 *
1090 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1091 * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
1092 * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
1093 * alignmask, and priority are set from the underlying cipher but can be
1094 * overridden if needed.  The tfm context defaults to skcipher_ctx_simple, and
1095 * default ->setkey(), ->init(), and ->exit() methods are installed.
1096 *
1097 * @tmpl: the template being instantiated
1098 * @tb: the template parameters
1099 *
1100 * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
1101 *	   needs to register the instance.
1102 */
1103struct skcipher_instance *skcipher_alloc_instance_simple(
1104	struct crypto_template *tmpl, struct rtattr **tb)
1105{
1106	u32 mask;
1107	struct skcipher_instance *inst;
1108	struct crypto_cipher_spawn *spawn;
1109	struct crypto_alg *cipher_alg;
1110	int err;
1111
1112	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
1113	if (err)
1114		return ERR_PTR(err);
1115
1116	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1117	if (!inst)
1118		return ERR_PTR(-ENOMEM);
1119	spawn = skcipher_instance_ctx(inst);
1120
1121	err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
1122				 crypto_attr_alg_name(tb[1]), 0, mask);
1123	if (err)
1124		goto err_free_inst;
1125	cipher_alg = crypto_spawn_cipher_alg(spawn);
1126
1127	err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1128				  cipher_alg);
1129	if (err)
1130		goto err_free_inst;
1131
1132	inst->free = skcipher_free_instance_simple;
1133
1134	/* Default algorithm properties, can be overridden */
1135	inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1136	inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1137	inst->alg.base.cra_priority = cipher_alg->cra_priority;
1138	inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1139	inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1140	inst->alg.ivsize = cipher_alg->cra_blocksize;
1141
1142	/* Use skcipher_ctx_simple by default, can be overridden */
1143	inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1144	inst->alg.setkey = skcipher_setkey_simple;
1145	inst->alg.init = skcipher_init_tfm_simple;
1146	inst->alg.exit = skcipher_exit_tfm_simple;
1147
1148	return inst;
1149
1150err_free_inst:
1151	skcipher_free_instance_simple(inst);
1152	return ERR_PTR(err);
1153}
1154EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1155
1156MODULE_LICENSE("GPL");
1157MODULE_DESCRIPTION("Symmetric key cipher type");
1158MODULE_IMPORT_NS(CRYPTO_INTERNAL);