Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Cryptographic API.
   4 *
   5 * s390 implementation of the AES Cipher Algorithm.
   6 *
   7 * s390 Version:
   8 *   Copyright IBM Corp. 2005, 2017
   9 *   Author(s): Jan Glauber (jang@de.ibm.com)
  10 *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  11 *		Patrick Steuer <patrick.steuer@de.ibm.com>
  12 *		Harald Freudenberger <freude@de.ibm.com>
  13 *
  14 * Derived from "crypto/aes_generic.c"
  15 */
  16
  17#define KMSG_COMPONENT "aes_s390"
  18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19
  20#include <crypto/aes.h>
  21#include <crypto/algapi.h>
  22#include <crypto/ghash.h>
  23#include <crypto/internal/aead.h>
  24#include <crypto/internal/cipher.h>
  25#include <crypto/internal/skcipher.h>
  26#include <crypto/scatterwalk.h>
  27#include <linux/err.h>
  28#include <linux/module.h>
  29#include <linux/cpufeature.h>
  30#include <linux/init.h>
  31#include <linux/mutex.h>
  32#include <linux/fips.h>
  33#include <linux/string.h>
  34#include <crypto/xts.h>
  35#include <asm/cpacf.h>
  36
  37static u8 *ctrblk;
  38static DEFINE_MUTEX(ctrblk_lock);
  39
  40static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  41		    kma_functions;
  42
  43struct s390_aes_ctx {
  44	u8 key[AES_MAX_KEY_SIZE];
  45	int key_len;
  46	unsigned long fc;
  47	union {
  48		struct crypto_skcipher *skcipher;
  49		struct crypto_cipher *cip;
  50	} fallback;
  51};
  52
  53struct s390_xts_ctx {
  54	u8 key[32];
  55	u8 pcc_key[32];
  56	int key_len;
  57	unsigned long fc;
  58	struct crypto_skcipher *fallback;
  59};
  60
  61struct gcm_sg_walk {
  62	struct scatter_walk walk;
  63	unsigned int walk_bytes;
  64	u8 *walk_ptr;
  65	unsigned int walk_bytes_remain;
  66	u8 buf[AES_BLOCK_SIZE];
  67	unsigned int buf_bytes;
  68	u8 *ptr;
  69	unsigned int nbytes;
  70};
  71
  72static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  73		unsigned int key_len)
  74{
  75	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 
  76
  77	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  78	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  79			CRYPTO_TFM_REQ_MASK);
  80
  81	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
 
 
 
 
 
 
  82}
  83
  84static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  85		       unsigned int key_len)
  86{
  87	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  88	unsigned long fc;
  89
  90	/* Pick the correct function code based on the key length */
  91	fc = (key_len == 16) ? CPACF_KM_AES_128 :
  92	     (key_len == 24) ? CPACF_KM_AES_192 :
  93	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
  94
  95	/* Check if the function code is available */
  96	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  97	if (!sctx->fc)
  98		return setkey_fallback_cip(tfm, in_key, key_len);
  99
 100	sctx->key_len = key_len;
 101	memcpy(sctx->key, in_key, key_len);
 102	return 0;
 103}
 104
 105static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 106{
 107	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 108
 109	if (unlikely(!sctx->fc)) {
 110		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
 111		return;
 112	}
 113	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
 114}
 115
 116static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 117{
 118	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 119
 120	if (unlikely(!sctx->fc)) {
 121		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
 122		return;
 123	}
 124	cpacf_km(sctx->fc | CPACF_DECRYPT,
 125		 &sctx->key, out, in, AES_BLOCK_SIZE);
 126}
 127
 128static int fallback_init_cip(struct crypto_tfm *tfm)
 129{
 130	const char *name = tfm->__crt_alg->cra_name;
 131	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 132
 133	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
 134						 CRYPTO_ALG_NEED_FALLBACK);
 135
 136	if (IS_ERR(sctx->fallback.cip)) {
 137		pr_err("Allocating AES fallback algorithm %s failed\n",
 138		       name);
 139		return PTR_ERR(sctx->fallback.cip);
 140	}
 141
 142	return 0;
 143}
 144
 145static void fallback_exit_cip(struct crypto_tfm *tfm)
 146{
 147	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 148
 149	crypto_free_cipher(sctx->fallback.cip);
 150	sctx->fallback.cip = NULL;
 151}
 152
 153static struct crypto_alg aes_alg = {
 154	.cra_name		=	"aes",
 155	.cra_driver_name	=	"aes-s390",
 156	.cra_priority		=	300,
 157	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
 158					CRYPTO_ALG_NEED_FALLBACK,
 159	.cra_blocksize		=	AES_BLOCK_SIZE,
 160	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 161	.cra_module		=	THIS_MODULE,
 162	.cra_init               =       fallback_init_cip,
 163	.cra_exit               =       fallback_exit_cip,
 164	.cra_u			=	{
 165		.cipher = {
 166			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
 167			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
 168			.cia_setkey		=	aes_set_key,
 169			.cia_encrypt		=	crypto_aes_encrypt,
 170			.cia_decrypt		=	crypto_aes_decrypt,
 171		}
 172	}
 173};
 174
 175static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
 176				    unsigned int len)
 177{
 178	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 
 179
 180	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
 181				    CRYPTO_TFM_REQ_MASK);
 182	crypto_skcipher_set_flags(sctx->fallback.skcipher,
 183				  crypto_skcipher_get_flags(tfm) &
 184				  CRYPTO_TFM_REQ_MASK);
 185	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
 
 
 
 
 
 
 186}
 187
 188static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
 189				   struct skcipher_request *req,
 190				   unsigned long modifier)
 191{
 192	struct skcipher_request *subreq = skcipher_request_ctx(req);
 
 
 
 
 
 
 
 193
 194	*subreq = *req;
 195	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
 196	return (modifier & CPACF_DECRYPT) ?
 197		crypto_skcipher_decrypt(subreq) :
 198		crypto_skcipher_encrypt(subreq);
 199}
 200
 201static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202			   unsigned int key_len)
 203{
 204	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 205	unsigned long fc;
 206
 207	/* Pick the correct function code based on the key length */
 208	fc = (key_len == 16) ? CPACF_KM_AES_128 :
 209	     (key_len == 24) ? CPACF_KM_AES_192 :
 210	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
 211
 212	/* Check if the function code is available */
 213	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 214	if (!sctx->fc)
 215		return setkey_fallback_skcipher(tfm, in_key, key_len);
 216
 217	sctx->key_len = key_len;
 218	memcpy(sctx->key, in_key, key_len);
 219	return 0;
 220}
 221
 222static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 
 223{
 224	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 225	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 226	struct skcipher_walk walk;
 227	unsigned int nbytes, n;
 228	int ret;
 229
 230	if (unlikely(!sctx->fc))
 231		return fallback_skcipher_crypt(sctx, req, modifier);
 232
 233	ret = skcipher_walk_virt(&walk, req, false);
 234	while ((nbytes = walk.nbytes) != 0) {
 235		/* only use complete blocks */
 236		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 237		cpacf_km(sctx->fc | modifier, sctx->key,
 238			 walk.dst.virt.addr, walk.src.virt.addr, n);
 239		ret = skcipher_walk_done(&walk, nbytes - n);
 240	}
 
 241	return ret;
 242}
 243
 244static int ecb_aes_encrypt(struct skcipher_request *req)
 
 
 245{
 246	return ecb_aes_crypt(req, 0);
 
 
 
 
 
 
 
 247}
 248
 249static int ecb_aes_decrypt(struct skcipher_request *req)
 
 
 250{
 251	return ecb_aes_crypt(req, CPACF_DECRYPT);
 
 
 
 
 
 
 
 252}
 253
 254static int fallback_init_skcipher(struct crypto_skcipher *tfm)
 255{
 256	const char *name = crypto_tfm_alg_name(&tfm->base);
 257	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 258
 259	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
 260				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
 261
 262	if (IS_ERR(sctx->fallback.skcipher)) {
 263		pr_err("Allocating AES fallback algorithm %s failed\n",
 264		       name);
 265		return PTR_ERR(sctx->fallback.skcipher);
 266	}
 267
 268	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
 269				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
 270	return 0;
 271}
 272
 273static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
 274{
 275	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 276
 277	crypto_free_skcipher(sctx->fallback.skcipher);
 278}
 279
 280static struct skcipher_alg ecb_aes_alg = {
 281	.base.cra_name		=	"ecb(aes)",
 282	.base.cra_driver_name	=	"ecb-aes-s390",
 283	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
 284	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 285	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 286	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 287	.base.cra_module	=	THIS_MODULE,
 288	.init			=	fallback_init_skcipher,
 289	.exit			=	fallback_exit_skcipher,
 290	.min_keysize		=	AES_MIN_KEY_SIZE,
 291	.max_keysize		=	AES_MAX_KEY_SIZE,
 292	.setkey			=	ecb_aes_set_key,
 293	.encrypt		=	ecb_aes_encrypt,
 294	.decrypt		=	ecb_aes_decrypt,
 
 
 
 
 
 
 295};
 296
 297static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 298			   unsigned int key_len)
 299{
 300	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 301	unsigned long fc;
 302
 303	/* Pick the correct function code based on the key length */
 304	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
 305	     (key_len == 24) ? CPACF_KMC_AES_192 :
 306	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
 307
 308	/* Check if the function code is available */
 309	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
 310	if (!sctx->fc)
 311		return setkey_fallback_skcipher(tfm, in_key, key_len);
 312
 313	sctx->key_len = key_len;
 314	memcpy(sctx->key, in_key, key_len);
 315	return 0;
 316}
 317
 318static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 
 319{
 320	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 321	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 322	struct skcipher_walk walk;
 323	unsigned int nbytes, n;
 324	int ret;
 325	struct {
 326		u8 iv[AES_BLOCK_SIZE];
 327		u8 key[AES_MAX_KEY_SIZE];
 328	} param;
 329
 330	if (unlikely(!sctx->fc))
 331		return fallback_skcipher_crypt(sctx, req, modifier);
 332
 333	ret = skcipher_walk_virt(&walk, req, false);
 334	if (ret)
 335		return ret;
 336	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
 337	memcpy(param.key, sctx->key, sctx->key_len);
 338	while ((nbytes = walk.nbytes) != 0) {
 339		/* only use complete blocks */
 340		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 341		cpacf_kmc(sctx->fc | modifier, &param,
 342			  walk.dst.virt.addr, walk.src.virt.addr, n);
 343		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
 344		ret = skcipher_walk_done(&walk, nbytes - n);
 345	}
 346	memzero_explicit(&param, sizeof(param));
 347	return ret;
 348}
 349
 350static int cbc_aes_encrypt(struct skcipher_request *req)
 
 
 351{
 352	return cbc_aes_crypt(req, 0);
 
 
 
 
 
 
 
 353}
 354
 355static int cbc_aes_decrypt(struct skcipher_request *req)
 
 
 356{
 357	return cbc_aes_crypt(req, CPACF_DECRYPT);
 
 
 
 
 
 
 
 358}
 359
 360static struct skcipher_alg cbc_aes_alg = {
 361	.base.cra_name		=	"cbc(aes)",
 362	.base.cra_driver_name	=	"cbc-aes-s390",
 363	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 364	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 365	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 366	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 367	.base.cra_module	=	THIS_MODULE,
 368	.init			=	fallback_init_skcipher,
 369	.exit			=	fallback_exit_skcipher,
 370	.min_keysize		=	AES_MIN_KEY_SIZE,
 371	.max_keysize		=	AES_MAX_KEY_SIZE,
 372	.ivsize			=	AES_BLOCK_SIZE,
 373	.setkey			=	cbc_aes_set_key,
 374	.encrypt		=	cbc_aes_encrypt,
 375	.decrypt		=	cbc_aes_decrypt,
 
 
 
 
 
 
 376};
 377
 378static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
 379			       unsigned int len)
 380{
 381	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 382
 383	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
 384	crypto_skcipher_set_flags(xts_ctx->fallback,
 385				  crypto_skcipher_get_flags(tfm) &
 386				  CRYPTO_TFM_REQ_MASK);
 387	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
 388}
 389
 390static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 391			   unsigned int key_len)
 392{
 393	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 394	unsigned long fc;
 395	int err;
 396
 397	err = xts_fallback_setkey(tfm, in_key, key_len);
 398	if (err)
 399		return err;
 400
 
 
 
 
 
 
 401	/* Pick the correct function code based on the key length */
 402	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
 403	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
 404
 405	/* Check if the function code is available */
 406	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 407	if (!xts_ctx->fc)
 408		return 0;
 409
 410	/* Split the XTS key into the two subkeys */
 411	key_len = key_len / 2;
 412	xts_ctx->key_len = key_len;
 413	memcpy(xts_ctx->key, in_key, key_len);
 414	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
 415	return 0;
 416}
 417
 418static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 
 419{
 420	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 421	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 422	struct skcipher_walk walk;
 423	unsigned int offset, nbytes, n;
 424	int ret;
 425	struct {
 426		u8 key[32];
 427		u8 tweak[16];
 428		u8 block[16];
 429		u8 bit[16];
 430		u8 xts[16];
 431	} pcc_param;
 432	struct {
 433		u8 key[32];
 434		u8 init[16];
 435	} xts_param;
 436
 437	if (req->cryptlen < AES_BLOCK_SIZE)
 438		return -EINVAL;
 439
 440	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
 441		struct skcipher_request *subreq = skcipher_request_ctx(req);
 442
 443		*subreq = *req;
 444		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
 445		return (modifier & CPACF_DECRYPT) ?
 446			crypto_skcipher_decrypt(subreq) :
 447			crypto_skcipher_encrypt(subreq);
 448	}
 449
 450	ret = skcipher_walk_virt(&walk, req, false);
 451	if (ret)
 452		return ret;
 453	offset = xts_ctx->key_len & 0x10;
 454	memset(pcc_param.block, 0, sizeof(pcc_param.block));
 455	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
 456	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
 457	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
 458	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
 459	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
 460
 461	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
 462	memcpy(xts_param.init, pcc_param.xts, 16);
 463
 464	while ((nbytes = walk.nbytes) != 0) {
 465		/* only use complete blocks */
 466		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 467		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
 468			 walk.dst.virt.addr, walk.src.virt.addr, n);
 469		ret = skcipher_walk_done(&walk, nbytes - n);
 470	}
 471	memzero_explicit(&pcc_param, sizeof(pcc_param));
 472	memzero_explicit(&xts_param, sizeof(xts_param));
 473	return ret;
 474}
 475
 476static int xts_aes_encrypt(struct skcipher_request *req)
 
 
 477{
 478	return xts_aes_crypt(req, 0);
 
 
 
 
 
 
 
 
 
 
 479}
 480
 481static int xts_aes_decrypt(struct skcipher_request *req)
 
 
 482{
 483	return xts_aes_crypt(req, CPACF_DECRYPT);
 
 
 
 
 
 
 
 
 
 
 484}
 485
 486static int xts_fallback_init(struct crypto_skcipher *tfm)
 487{
 488	const char *name = crypto_tfm_alg_name(&tfm->base);
 489	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 490
 491	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
 492				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
 493
 494	if (IS_ERR(xts_ctx->fallback)) {
 495		pr_err("Allocating XTS fallback algorithm %s failed\n",
 496		       name);
 497		return PTR_ERR(xts_ctx->fallback);
 498	}
 499	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
 500				    crypto_skcipher_reqsize(xts_ctx->fallback));
 501	return 0;
 502}
 503
 504static void xts_fallback_exit(struct crypto_skcipher *tfm)
 505{
 506	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 507
 508	crypto_free_skcipher(xts_ctx->fallback);
 509}
 510
 511static struct skcipher_alg xts_aes_alg = {
 512	.base.cra_name		=	"xts(aes)",
 513	.base.cra_driver_name	=	"xts-aes-s390",
 514	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 515	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 516	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 517	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
 518	.base.cra_module	=	THIS_MODULE,
 519	.init			=	xts_fallback_init,
 520	.exit			=	xts_fallback_exit,
 521	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
 522	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
 523	.ivsize			=	AES_BLOCK_SIZE,
 524	.setkey			=	xts_aes_set_key,
 525	.encrypt		=	xts_aes_encrypt,
 526	.decrypt		=	xts_aes_decrypt,
 
 
 
 
 
 
 527};
 528
 529static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 530			   unsigned int key_len)
 531{
 532	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 533	unsigned long fc;
 534
 535	/* Pick the correct function code based on the key length */
 536	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
 537	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
 538	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
 539
 540	/* Check if the function code is available */
 541	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
 542	if (!sctx->fc)
 543		return setkey_fallback_skcipher(tfm, in_key, key_len);
 544
 545	sctx->key_len = key_len;
 546	memcpy(sctx->key, in_key, key_len);
 547	return 0;
 548}
 549
 550static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
 551{
 552	unsigned int i, n;
 553
 554	/* only use complete blocks, max. PAGE_SIZE */
 555	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
 556	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
 557	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
 558		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
 559		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
 560		ctrptr += AES_BLOCK_SIZE;
 561	}
 562	return n;
 563}
 564
 565static int ctr_aes_crypt(struct skcipher_request *req)
 
 566{
 567	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 568	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 569	u8 buf[AES_BLOCK_SIZE], *ctrptr;
 570	struct skcipher_walk walk;
 571	unsigned int n, nbytes;
 572	int ret, locked;
 573
 574	if (unlikely(!sctx->fc))
 575		return fallback_skcipher_crypt(sctx, req, 0);
 576
 577	locked = mutex_trylock(&ctrblk_lock);
 578
 579	ret = skcipher_walk_virt(&walk, req, false);
 580	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 581		n = AES_BLOCK_SIZE;
 582
 583		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
 584			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
 585		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
 586		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
 587			    walk.src.virt.addr, n, ctrptr);
 
 588		if (ctrptr == ctrblk)
 589			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
 590			       AES_BLOCK_SIZE);
 591		crypto_inc(walk.iv, AES_BLOCK_SIZE);
 592		ret = skcipher_walk_done(&walk, nbytes - n);
 593	}
 594	if (locked)
 595		mutex_unlock(&ctrblk_lock);
 596	/*
 597	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
 598	 */
 599	if (nbytes) {
 600		memset(buf, 0, AES_BLOCK_SIZE);
 601		memcpy(buf, walk.src.virt.addr, nbytes);
 602		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
 603			    AES_BLOCK_SIZE, walk.iv);
 604		memcpy(walk.dst.virt.addr, buf, nbytes);
 605		crypto_inc(walk.iv, AES_BLOCK_SIZE);
 606		ret = skcipher_walk_done(&walk, 0);
 607	}
 608
 609	return ret;
 610}
 611
 612static struct skcipher_alg ctr_aes_alg = {
 613	.base.cra_name		=	"ctr(aes)",
 614	.base.cra_driver_name	=	"ctr-aes-s390",
 615	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 616	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 617	.base.cra_blocksize	=	1,
 618	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 619	.base.cra_module	=	THIS_MODULE,
 620	.init			=	fallback_init_skcipher,
 621	.exit			=	fallback_exit_skcipher,
 622	.min_keysize		=	AES_MIN_KEY_SIZE,
 623	.max_keysize		=	AES_MAX_KEY_SIZE,
 624	.ivsize			=	AES_BLOCK_SIZE,
 625	.setkey			=	ctr_aes_set_key,
 626	.encrypt		=	ctr_aes_crypt,
 627	.decrypt		=	ctr_aes_crypt,
 628	.chunksize		=	AES_BLOCK_SIZE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 629};
 630
 631static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
 632			  unsigned int keylen)
 633{
 634	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 635
 636	switch (keylen) {
 637	case AES_KEYSIZE_128:
 638		ctx->fc = CPACF_KMA_GCM_AES_128;
 639		break;
 640	case AES_KEYSIZE_192:
 641		ctx->fc = CPACF_KMA_GCM_AES_192;
 642		break;
 643	case AES_KEYSIZE_256:
 644		ctx->fc = CPACF_KMA_GCM_AES_256;
 645		break;
 646	default:
 647		return -EINVAL;
 648	}
 649
 650	memcpy(ctx->key, key, keylen);
 651	ctx->key_len = keylen;
 652	return 0;
 653}
 654
 655static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 656{
 657	switch (authsize) {
 658	case 4:
 659	case 8:
 660	case 12:
 661	case 13:
 662	case 14:
 663	case 15:
 664	case 16:
 665		break;
 666	default:
 667		return -EINVAL;
 668	}
 669
 670	return 0;
 671}
 672
 673static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
 674			   unsigned int len)
 675{
 676	memset(gw, 0, sizeof(*gw));
 677	gw->walk_bytes_remain = len;
 678	scatterwalk_start(&gw->walk, sg);
 679}
 680
 681static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
 682{
 683	struct scatterlist *nextsg;
 684
 685	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
 686	while (!gw->walk_bytes) {
 687		nextsg = sg_next(gw->walk.sg);
 688		if (!nextsg)
 689			return 0;
 690		scatterwalk_start(&gw->walk, nextsg);
 691		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
 692						   gw->walk_bytes_remain);
 693	}
 694	gw->walk_ptr = scatterwalk_map(&gw->walk);
 695	return gw->walk_bytes;
 696}
 697
 698static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
 699					     unsigned int nbytes)
 700{
 701	gw->walk_bytes_remain -= nbytes;
 702	scatterwalk_unmap(gw->walk_ptr);
 703	scatterwalk_advance(&gw->walk, nbytes);
 704	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
 705	gw->walk_ptr = NULL;
 706}
 707
 708static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 709{
 710	int n;
 711
 712	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
 713		gw->ptr = gw->buf;
 714		gw->nbytes = gw->buf_bytes;
 715		goto out;
 716	}
 717
 718	if (gw->walk_bytes_remain == 0) {
 719		gw->ptr = NULL;
 720		gw->nbytes = 0;
 721		goto out;
 722	}
 723
 724	if (!_gcm_sg_clamp_and_map(gw)) {
 725		gw->ptr = NULL;
 726		gw->nbytes = 0;
 727		goto out;
 728	}
 729
 730	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
 731		gw->ptr = gw->walk_ptr;
 732		gw->nbytes = gw->walk_bytes;
 733		goto out;
 734	}
 735
 736	while (1) {
 737		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
 738		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
 739		gw->buf_bytes += n;
 740		_gcm_sg_unmap_and_advance(gw, n);
 741		if (gw->buf_bytes >= minbytesneeded) {
 742			gw->ptr = gw->buf;
 743			gw->nbytes = gw->buf_bytes;
 744			goto out;
 745		}
 746		if (!_gcm_sg_clamp_and_map(gw)) {
 747			gw->ptr = NULL;
 748			gw->nbytes = 0;
 749			goto out;
 750		}
 751	}
 752
 753out:
 754	return gw->nbytes;
 755}
 756
 757static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 758{
 759	if (gw->walk_bytes_remain == 0) {
 760		gw->ptr = NULL;
 761		gw->nbytes = 0;
 762		goto out;
 763	}
 764
 765	if (!_gcm_sg_clamp_and_map(gw)) {
 766		gw->ptr = NULL;
 767		gw->nbytes = 0;
 768		goto out;
 769	}
 770
 771	if (gw->walk_bytes >= minbytesneeded) {
 772		gw->ptr = gw->walk_ptr;
 773		gw->nbytes = gw->walk_bytes;
 774		goto out;
 775	}
 776
 777	scatterwalk_unmap(gw->walk_ptr);
 778	gw->walk_ptr = NULL;
 779
 780	gw->ptr = gw->buf;
 781	gw->nbytes = sizeof(gw->buf);
 782
 783out:
 784	return gw->nbytes;
 785}
 786
 787static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 788{
 789	if (gw->ptr == NULL)
 790		return 0;
 791
 792	if (gw->ptr == gw->buf) {
 793		int n = gw->buf_bytes - bytesdone;
 794		if (n > 0) {
 795			memmove(gw->buf, gw->buf + bytesdone, n);
 796			gw->buf_bytes = n;
 797		} else
 798			gw->buf_bytes = 0;
 799	} else
 800		_gcm_sg_unmap_and_advance(gw, bytesdone);
 801
 802	return bytesdone;
 803}
 804
 805static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 806{
 807	int i, n;
 808
 809	if (gw->ptr == NULL)
 810		return 0;
 811
 812	if (gw->ptr == gw->buf) {
 813		for (i = 0; i < bytesdone; i += n) {
 814			if (!_gcm_sg_clamp_and_map(gw))
 815				return i;
 816			n = min(gw->walk_bytes, bytesdone - i);
 817			memcpy(gw->walk_ptr, gw->buf + i, n);
 818			_gcm_sg_unmap_and_advance(gw, n);
 819		}
 820	} else
 821		_gcm_sg_unmap_and_advance(gw, bytesdone);
 822
 823	return bytesdone;
 824}
 825
 826static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
 827{
 828	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 829	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 830	unsigned int ivsize = crypto_aead_ivsize(tfm);
 831	unsigned int taglen = crypto_aead_authsize(tfm);
 832	unsigned int aadlen = req->assoclen;
 833	unsigned int pclen = req->cryptlen;
 834	int ret = 0;
 835
 836	unsigned int n, len, in_bytes, out_bytes,
 837		     min_bytes, bytes, aad_bytes, pc_bytes;
 838	struct gcm_sg_walk gw_in, gw_out;
 839	u8 tag[GHASH_DIGEST_SIZE];
 840
 841	struct {
 842		u32 _[3];		/* reserved */
 843		u32 cv;			/* Counter Value */
 844		u8 t[GHASH_DIGEST_SIZE];/* Tag */
 845		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
 846		u64 taadl;		/* Total AAD Length */
 847		u64 tpcl;		/* Total Plain-/Cipher-text Length */
 848		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
 849		u8 k[AES_MAX_KEY_SIZE];	/* Key */
 850	} param;
 851
 852	/*
 853	 * encrypt
 854	 *   req->src: aad||plaintext
 855	 *   req->dst: aad||ciphertext||tag
 856	 * decrypt
 857	 *   req->src: aad||ciphertext||tag
 858	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
 859	 * aad, plaintext and ciphertext may be empty.
 860	 */
 861	if (flags & CPACF_DECRYPT)
 862		pclen -= taglen;
 863	len = aadlen + pclen;
 864
 865	memset(&param, 0, sizeof(param));
 866	param.cv = 1;
 867	param.taadl = aadlen * 8;
 868	param.tpcl = pclen * 8;
 869	memcpy(param.j0, req->iv, ivsize);
 870	*(u32 *)(param.j0 + ivsize) = 1;
 871	memcpy(param.k, ctx->key, ctx->key_len);
 872
 873	gcm_walk_start(&gw_in, req->src, len);
 874	gcm_walk_start(&gw_out, req->dst, len);
 875
 876	do {
 877		min_bytes = min_t(unsigned int,
 878				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
 879		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
 880		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
 881		bytes = min(in_bytes, out_bytes);
 882
 883		if (aadlen + pclen <= bytes) {
 884			aad_bytes = aadlen;
 885			pc_bytes = pclen;
 886			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
 887		} else {
 888			if (aadlen <= bytes) {
 889				aad_bytes = aadlen;
 890				pc_bytes = (bytes - aadlen) &
 891					   ~(AES_BLOCK_SIZE - 1);
 892				flags |= CPACF_KMA_LAAD;
 893			} else {
 894				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
 895				pc_bytes = 0;
 896			}
 897		}
 898
 899		if (aad_bytes > 0)
 900			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
 901
 902		cpacf_kma(ctx->fc | flags, &param,
 903			  gw_out.ptr + aad_bytes,
 904			  gw_in.ptr + aad_bytes, pc_bytes,
 905			  gw_in.ptr, aad_bytes);
 906
 907		n = aad_bytes + pc_bytes;
 908		if (gcm_in_walk_done(&gw_in, n) != n)
 909			return -ENOMEM;
 910		if (gcm_out_walk_done(&gw_out, n) != n)
 911			return -ENOMEM;
 912		aadlen -= aad_bytes;
 913		pclen -= pc_bytes;
 914	} while (aadlen + pclen > 0);
 915
 916	if (flags & CPACF_DECRYPT) {
 917		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
 918		if (crypto_memneq(tag, param.t, taglen))
 919			ret = -EBADMSG;
 920	} else
 921		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
 922
 923	memzero_explicit(&param, sizeof(param));
 924	return ret;
 925}
 926
 927static int gcm_aes_encrypt(struct aead_request *req)
 928{
 929	return gcm_aes_crypt(req, CPACF_ENCRYPT);
 930}
 931
 932static int gcm_aes_decrypt(struct aead_request *req)
 933{
 934	return gcm_aes_crypt(req, CPACF_DECRYPT);
 935}
 936
 937static struct aead_alg gcm_aes_aead = {
 938	.setkey			= gcm_aes_setkey,
 939	.setauthsize		= gcm_aes_setauthsize,
 940	.encrypt		= gcm_aes_encrypt,
 941	.decrypt		= gcm_aes_decrypt,
 942
 943	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
 944	.maxauthsize		= GHASH_DIGEST_SIZE,
 945	.chunksize		= AES_BLOCK_SIZE,
 946
 947	.base			= {
 948		.cra_blocksize		= 1,
 949		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
 950		.cra_priority		= 900,
 951		.cra_name		= "gcm(aes)",
 952		.cra_driver_name	= "gcm-aes-s390",
 953		.cra_module		= THIS_MODULE,
 954	},
 955};
 956
 957static struct crypto_alg *aes_s390_alg;
 958static struct skcipher_alg *aes_s390_skcipher_algs[4];
 959static int aes_s390_skciphers_num;
 960static struct aead_alg *aes_s390_aead_alg;
 961
 962static int aes_s390_register_skcipher(struct skcipher_alg *alg)
 963{
 964	int ret;
 965
 966	ret = crypto_register_skcipher(alg);
 967	if (!ret)
 968		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
 969	return ret;
 970}
 971
 972static void aes_s390_fini(void)
 973{
 974	if (aes_s390_alg)
 975		crypto_unregister_alg(aes_s390_alg);
 976	while (aes_s390_skciphers_num--)
 977		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
 978	if (ctrblk)
 979		free_page((unsigned long) ctrblk);
 980
 981	if (aes_s390_aead_alg)
 982		crypto_unregister_aead(aes_s390_aead_alg);
 983}
 984
 985static int __init aes_s390_init(void)
 986{
 987	int ret;
 988
 989	/* Query available functions for KM, KMC, KMCTR and KMA */
 990	cpacf_query(CPACF_KM, &km_functions);
 991	cpacf_query(CPACF_KMC, &kmc_functions);
 992	cpacf_query(CPACF_KMCTR, &kmctr_functions);
 993	cpacf_query(CPACF_KMA, &kma_functions);
 994
 995	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
 996	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
 997	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
 998		ret = crypto_register_alg(&aes_alg);
 999		if (ret)
1000			goto out_err;
1001		aes_s390_alg = &aes_alg;
1002		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1003		if (ret)
1004			goto out_err;
1005	}
1006
1007	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1008	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1009	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1010		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1011		if (ret)
1012			goto out_err;
1013	}
1014
1015	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1016	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1017		ret = aes_s390_register_skcipher(&xts_aes_alg);
1018		if (ret)
1019			goto out_err;
1020	}
1021
1022	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1023	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1024	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1025		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1026		if (!ctrblk) {
1027			ret = -ENOMEM;
1028			goto out_err;
1029		}
1030		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1031		if (ret)
1032			goto out_err;
1033	}
1034
1035	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1036	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1037	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1038		ret = crypto_register_aead(&gcm_aes_aead);
1039		if (ret)
1040			goto out_err;
1041		aes_s390_aead_alg = &gcm_aes_aead;
1042	}
1043
1044	return 0;
1045out_err:
1046	aes_s390_fini();
1047	return ret;
1048}
1049
1050module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1051module_exit(aes_s390_fini);
1052
1053MODULE_ALIAS_CRYPTO("aes-all");
1054
1055MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1056MODULE_LICENSE("GPL");
1057MODULE_IMPORT_NS(CRYPTO_INTERNAL);
v5.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Cryptographic API.
   4 *
   5 * s390 implementation of the AES Cipher Algorithm.
   6 *
   7 * s390 Version:
   8 *   Copyright IBM Corp. 2005, 2017
   9 *   Author(s): Jan Glauber (jang@de.ibm.com)
  10 *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  11 *		Patrick Steuer <patrick.steuer@de.ibm.com>
  12 *		Harald Freudenberger <freude@de.ibm.com>
  13 *
  14 * Derived from "crypto/aes_generic.c"
  15 */
  16
  17#define KMSG_COMPONENT "aes_s390"
  18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19
  20#include <crypto/aes.h>
  21#include <crypto/algapi.h>
  22#include <crypto/ghash.h>
  23#include <crypto/internal/aead.h>
 
  24#include <crypto/internal/skcipher.h>
  25#include <crypto/scatterwalk.h>
  26#include <linux/err.h>
  27#include <linux/module.h>
  28#include <linux/cpufeature.h>
  29#include <linux/init.h>
  30#include <linux/mutex.h>
  31#include <linux/fips.h>
  32#include <linux/string.h>
  33#include <crypto/xts.h>
  34#include <asm/cpacf.h>
  35
  36static u8 *ctrblk;
  37static DEFINE_MUTEX(ctrblk_lock);
  38
  39static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  40		    kma_functions;
  41
  42struct s390_aes_ctx {
  43	u8 key[AES_MAX_KEY_SIZE];
  44	int key_len;
  45	unsigned long fc;
  46	union {
  47		struct crypto_sync_skcipher *blk;
  48		struct crypto_cipher *cip;
  49	} fallback;
  50};
  51
  52struct s390_xts_ctx {
  53	u8 key[32];
  54	u8 pcc_key[32];
  55	int key_len;
  56	unsigned long fc;
  57	struct crypto_sync_skcipher *fallback;
  58};
  59
  60struct gcm_sg_walk {
  61	struct scatter_walk walk;
  62	unsigned int walk_bytes;
  63	u8 *walk_ptr;
  64	unsigned int walk_bytes_remain;
  65	u8 buf[AES_BLOCK_SIZE];
  66	unsigned int buf_bytes;
  67	u8 *ptr;
  68	unsigned int nbytes;
  69};
  70
  71static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  72		unsigned int key_len)
  73{
  74	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  75	int ret;
  76
  77	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  78	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  79			CRYPTO_TFM_REQ_MASK);
  80
  81	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  82	if (ret) {
  83		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  84		tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
  85				CRYPTO_TFM_RES_MASK);
  86	}
  87	return ret;
  88}
  89
  90static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  91		       unsigned int key_len)
  92{
  93	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  94	unsigned long fc;
  95
  96	/* Pick the correct function code based on the key length */
  97	fc = (key_len == 16) ? CPACF_KM_AES_128 :
  98	     (key_len == 24) ? CPACF_KM_AES_192 :
  99	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
 100
 101	/* Check if the function code is available */
 102	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 103	if (!sctx->fc)
 104		return setkey_fallback_cip(tfm, in_key, key_len);
 105
 106	sctx->key_len = key_len;
 107	memcpy(sctx->key, in_key, key_len);
 108	return 0;
 109}
 110
 111static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 112{
 113	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 114
 115	if (unlikely(!sctx->fc)) {
 116		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
 117		return;
 118	}
 119	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
 120}
 121
 122static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 123{
 124	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 125
 126	if (unlikely(!sctx->fc)) {
 127		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
 128		return;
 129	}
 130	cpacf_km(sctx->fc | CPACF_DECRYPT,
 131		 &sctx->key, out, in, AES_BLOCK_SIZE);
 132}
 133
 134static int fallback_init_cip(struct crypto_tfm *tfm)
 135{
 136	const char *name = tfm->__crt_alg->cra_name;
 137	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 138
 139	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
 140						 CRYPTO_ALG_NEED_FALLBACK);
 141
 142	if (IS_ERR(sctx->fallback.cip)) {
 143		pr_err("Allocating AES fallback algorithm %s failed\n",
 144		       name);
 145		return PTR_ERR(sctx->fallback.cip);
 146	}
 147
 148	return 0;
 149}
 150
 151static void fallback_exit_cip(struct crypto_tfm *tfm)
 152{
 153	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 154
 155	crypto_free_cipher(sctx->fallback.cip);
 156	sctx->fallback.cip = NULL;
 157}
 158
 159static struct crypto_alg aes_alg = {
 160	.cra_name		=	"aes",
 161	.cra_driver_name	=	"aes-s390",
 162	.cra_priority		=	300,
 163	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
 164					CRYPTO_ALG_NEED_FALLBACK,
 165	.cra_blocksize		=	AES_BLOCK_SIZE,
 166	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 167	.cra_module		=	THIS_MODULE,
 168	.cra_init               =       fallback_init_cip,
 169	.cra_exit               =       fallback_exit_cip,
 170	.cra_u			=	{
 171		.cipher = {
 172			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
 173			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
 174			.cia_setkey		=	aes_set_key,
 175			.cia_encrypt		=	crypto_aes_encrypt,
 176			.cia_decrypt		=	crypto_aes_decrypt,
 177		}
 178	}
 179};
 180
 181static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
 182		unsigned int len)
 183{
 184	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 185	unsigned int ret;
 186
 187	crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
 188					 CRYPTO_TFM_REQ_MASK);
 189	crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
 190						      CRYPTO_TFM_REQ_MASK);
 191
 192	ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
 193
 194	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 195	tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
 196			  CRYPTO_TFM_RES_MASK;
 197
 198	return ret;
 199}
 200
 201static int fallback_blk_dec(struct blkcipher_desc *desc,
 202		struct scatterlist *dst, struct scatterlist *src,
 203		unsigned int nbytes)
 204{
 205	unsigned int ret;
 206	struct crypto_blkcipher *tfm = desc->tfm;
 207	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
 208	SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
 209
 210	skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
 211	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
 212	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
 213
 214	ret = crypto_skcipher_decrypt(req);
 215
 216	skcipher_request_zero(req);
 217	return ret;
 
 218}
 219
 220static int fallback_blk_enc(struct blkcipher_desc *desc,
 221		struct scatterlist *dst, struct scatterlist *src,
 222		unsigned int nbytes)
 223{
 224	unsigned int ret;
 225	struct crypto_blkcipher *tfm = desc->tfm;
 226	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
 227	SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
 228
 229	skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
 230	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
 231	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
 232
 233	ret = crypto_skcipher_encrypt(req);
 234	return ret;
 235}
 236
 237static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 238			   unsigned int key_len)
 239{
 240	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 241	unsigned long fc;
 242
 243	/* Pick the correct function code based on the key length */
 244	fc = (key_len == 16) ? CPACF_KM_AES_128 :
 245	     (key_len == 24) ? CPACF_KM_AES_192 :
 246	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
 247
 248	/* Check if the function code is available */
 249	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 250	if (!sctx->fc)
 251		return setkey_fallback_blk(tfm, in_key, key_len);
 252
 253	sctx->key_len = key_len;
 254	memcpy(sctx->key, in_key, key_len);
 255	return 0;
 256}
 257
 258static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
 259			 struct blkcipher_walk *walk)
 260{
 261	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 
 
 262	unsigned int nbytes, n;
 263	int ret;
 264
 265	ret = blkcipher_walk_virt(desc, walk);
 266	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
 
 
 
 267		/* only use complete blocks */
 268		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 269		cpacf_km(sctx->fc | modifier, sctx->key,
 270			 walk->dst.virt.addr, walk->src.virt.addr, n);
 271		ret = blkcipher_walk_done(desc, walk, nbytes - n);
 272	}
 273
 274	return ret;
 275}
 276
 277static int ecb_aes_encrypt(struct blkcipher_desc *desc,
 278			   struct scatterlist *dst, struct scatterlist *src,
 279			   unsigned int nbytes)
 280{
 281	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 282	struct blkcipher_walk walk;
 283
 284	if (unlikely(!sctx->fc))
 285		return fallback_blk_enc(desc, dst, src, nbytes);
 286
 287	blkcipher_walk_init(&walk, dst, src, nbytes);
 288	return ecb_aes_crypt(desc, 0, &walk);
 289}
 290
 291static int ecb_aes_decrypt(struct blkcipher_desc *desc,
 292			   struct scatterlist *dst, struct scatterlist *src,
 293			   unsigned int nbytes)
 294{
 295	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 296	struct blkcipher_walk walk;
 297
 298	if (unlikely(!sctx->fc))
 299		return fallback_blk_dec(desc, dst, src, nbytes);
 300
 301	blkcipher_walk_init(&walk, dst, src, nbytes);
 302	return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
 303}
 304
 305static int fallback_init_blk(struct crypto_tfm *tfm)
 306{
 307	const char *name = tfm->__crt_alg->cra_name;
 308	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 309
 310	sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
 311						   CRYPTO_ALG_NEED_FALLBACK);
 312
 313	if (IS_ERR(sctx->fallback.blk)) {
 314		pr_err("Allocating AES fallback algorithm %s failed\n",
 315		       name);
 316		return PTR_ERR(sctx->fallback.blk);
 317	}
 318
 
 
 319	return 0;
 320}
 321
 322static void fallback_exit_blk(struct crypto_tfm *tfm)
 323{
 324	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 325
 326	crypto_free_sync_skcipher(sctx->fallback.blk);
 327}
 328
 329static struct crypto_alg ecb_aes_alg = {
 330	.cra_name		=	"ecb(aes)",
 331	.cra_driver_name	=	"ecb-aes-s390",
 332	.cra_priority		=	401,	/* combo: aes + ecb + 1 */
 333	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 334					CRYPTO_ALG_NEED_FALLBACK,
 335	.cra_blocksize		=	AES_BLOCK_SIZE,
 336	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 337	.cra_type		=	&crypto_blkcipher_type,
 338	.cra_module		=	THIS_MODULE,
 339	.cra_init		=	fallback_init_blk,
 340	.cra_exit		=	fallback_exit_blk,
 341	.cra_u			=	{
 342		.blkcipher = {
 343			.min_keysize		=	AES_MIN_KEY_SIZE,
 344			.max_keysize		=	AES_MAX_KEY_SIZE,
 345			.setkey			=	ecb_aes_set_key,
 346			.encrypt		=	ecb_aes_encrypt,
 347			.decrypt		=	ecb_aes_decrypt,
 348		}
 349	}
 350};
 351
 352static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 353			   unsigned int key_len)
 354{
 355	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 356	unsigned long fc;
 357
 358	/* Pick the correct function code based on the key length */
 359	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
 360	     (key_len == 24) ? CPACF_KMC_AES_192 :
 361	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
 362
 363	/* Check if the function code is available */
 364	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
 365	if (!sctx->fc)
 366		return setkey_fallback_blk(tfm, in_key, key_len);
 367
 368	sctx->key_len = key_len;
 369	memcpy(sctx->key, in_key, key_len);
 370	return 0;
 371}
 372
 373static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
 374			 struct blkcipher_walk *walk)
 375{
 376	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 
 
 377	unsigned int nbytes, n;
 378	int ret;
 379	struct {
 380		u8 iv[AES_BLOCK_SIZE];
 381		u8 key[AES_MAX_KEY_SIZE];
 382	} param;
 383
 384	ret = blkcipher_walk_virt(desc, walk);
 385	memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
 
 
 
 
 
 386	memcpy(param.key, sctx->key, sctx->key_len);
 387	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
 388		/* only use complete blocks */
 389		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 390		cpacf_kmc(sctx->fc | modifier, &param,
 391			  walk->dst.virt.addr, walk->src.virt.addr, n);
 392		ret = blkcipher_walk_done(desc, walk, nbytes - n);
 
 393	}
 394	memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
 395	return ret;
 396}
 397
 398static int cbc_aes_encrypt(struct blkcipher_desc *desc,
 399			   struct scatterlist *dst, struct scatterlist *src,
 400			   unsigned int nbytes)
 401{
 402	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 403	struct blkcipher_walk walk;
 404
 405	if (unlikely(!sctx->fc))
 406		return fallback_blk_enc(desc, dst, src, nbytes);
 407
 408	blkcipher_walk_init(&walk, dst, src, nbytes);
 409	return cbc_aes_crypt(desc, 0, &walk);
 410}
 411
 412static int cbc_aes_decrypt(struct blkcipher_desc *desc,
 413			   struct scatterlist *dst, struct scatterlist *src,
 414			   unsigned int nbytes)
 415{
 416	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 417	struct blkcipher_walk walk;
 418
 419	if (unlikely(!sctx->fc))
 420		return fallback_blk_dec(desc, dst, src, nbytes);
 421
 422	blkcipher_walk_init(&walk, dst, src, nbytes);
 423	return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
 424}
 425
 426static struct crypto_alg cbc_aes_alg = {
 427	.cra_name		=	"cbc(aes)",
 428	.cra_driver_name	=	"cbc-aes-s390",
 429	.cra_priority		=	402,	/* ecb-aes-s390 + 1 */
 430	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 431					CRYPTO_ALG_NEED_FALLBACK,
 432	.cra_blocksize		=	AES_BLOCK_SIZE,
 433	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 434	.cra_type		=	&crypto_blkcipher_type,
 435	.cra_module		=	THIS_MODULE,
 436	.cra_init		=	fallback_init_blk,
 437	.cra_exit		=	fallback_exit_blk,
 438	.cra_u			=	{
 439		.blkcipher = {
 440			.min_keysize		=	AES_MIN_KEY_SIZE,
 441			.max_keysize		=	AES_MAX_KEY_SIZE,
 442			.ivsize			=	AES_BLOCK_SIZE,
 443			.setkey			=	cbc_aes_set_key,
 444			.encrypt		=	cbc_aes_encrypt,
 445			.decrypt		=	cbc_aes_decrypt,
 446		}
 447	}
 448};
 449
 450static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
 451				   unsigned int len)
 452{
 453	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
 454	unsigned int ret;
 455
 456	crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
 457					 CRYPTO_TFM_REQ_MASK);
 458	crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
 459						     CRYPTO_TFM_REQ_MASK);
 460
 461	ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
 462
 463	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 464	tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
 465			  CRYPTO_TFM_RES_MASK;
 466
 467	return ret;
 
 
 
 
 468}
 469
 470static int xts_fallback_decrypt(struct blkcipher_desc *desc,
 471		struct scatterlist *dst, struct scatterlist *src,
 472		unsigned int nbytes)
 473{
 474	struct crypto_blkcipher *tfm = desc->tfm;
 475	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
 476	SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
 477	unsigned int ret;
 478
 479	skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
 480	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
 481	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
 482
 483	ret = crypto_skcipher_decrypt(req);
 484
 485	skcipher_request_zero(req);
 486	return ret;
 487}
 488
 489static int xts_fallback_encrypt(struct blkcipher_desc *desc,
 490		struct scatterlist *dst, struct scatterlist *src,
 491		unsigned int nbytes)
 492{
 493	struct crypto_blkcipher *tfm = desc->tfm;
 494	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
 495	SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
 496	unsigned int ret;
 497
 498	skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
 499	skcipher_request_set_callback(req, desc->flags, NULL, NULL);
 500	skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
 501
 502	ret = crypto_skcipher_encrypt(req);
 503
 504	skcipher_request_zero(req);
 505	return ret;
 506}
 507
 508static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 509			   unsigned int key_len)
 510{
 511	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
 512	unsigned long fc;
 513	int err;
 514
 515	err = xts_fallback_setkey(tfm, in_key, key_len);
 516	if (err)
 517		return err;
 518
 519	/* In fips mode only 128 bit or 256 bit keys are valid */
 520	if (fips_enabled && key_len != 32 && key_len != 64) {
 521		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 522		return -EINVAL;
 523	}
 524
 525	/* Pick the correct function code based on the key length */
 526	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
 527	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
 528
 529	/* Check if the function code is available */
 530	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 531	if (!xts_ctx->fc)
 532		return 0;
 533
 534	/* Split the XTS key into the two subkeys */
 535	key_len = key_len / 2;
 536	xts_ctx->key_len = key_len;
 537	memcpy(xts_ctx->key, in_key, key_len);
 538	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
 539	return 0;
 540}
 541
 542static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
 543			 struct blkcipher_walk *walk)
 544{
 545	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
 
 
 546	unsigned int offset, nbytes, n;
 547	int ret;
 548	struct {
 549		u8 key[32];
 550		u8 tweak[16];
 551		u8 block[16];
 552		u8 bit[16];
 553		u8 xts[16];
 554	} pcc_param;
 555	struct {
 556		u8 key[32];
 557		u8 init[16];
 558	} xts_param;
 559
 560	ret = blkcipher_walk_virt(desc, walk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 561	offset = xts_ctx->key_len & 0x10;
 562	memset(pcc_param.block, 0, sizeof(pcc_param.block));
 563	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
 564	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
 565	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
 566	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
 567	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
 568
 569	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
 570	memcpy(xts_param.init, pcc_param.xts, 16);
 571
 572	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
 573		/* only use complete blocks */
 574		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 575		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
 576			 walk->dst.virt.addr, walk->src.virt.addr, n);
 577		ret = blkcipher_walk_done(desc, walk, nbytes - n);
 578	}
 
 
 579	return ret;
 580}
 581
 582static int xts_aes_encrypt(struct blkcipher_desc *desc,
 583			   struct scatterlist *dst, struct scatterlist *src,
 584			   unsigned int nbytes)
 585{
 586	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
 587	struct blkcipher_walk walk;
 588
 589	if (!nbytes)
 590		return -EINVAL;
 591
 592	if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
 593		return xts_fallback_encrypt(desc, dst, src, nbytes);
 594
 595	blkcipher_walk_init(&walk, dst, src, nbytes);
 596	return xts_aes_crypt(desc, 0, &walk);
 597}
 598
 599static int xts_aes_decrypt(struct blkcipher_desc *desc,
 600			   struct scatterlist *dst, struct scatterlist *src,
 601			   unsigned int nbytes)
 602{
 603	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
 604	struct blkcipher_walk walk;
 605
 606	if (!nbytes)
 607		return -EINVAL;
 608
 609	if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
 610		return xts_fallback_decrypt(desc, dst, src, nbytes);
 611
 612	blkcipher_walk_init(&walk, dst, src, nbytes);
 613	return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
 614}
 615
 616static int xts_fallback_init(struct crypto_tfm *tfm)
 617{
 618	const char *name = tfm->__crt_alg->cra_name;
 619	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
 620
 621	xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
 622						  CRYPTO_ALG_NEED_FALLBACK);
 623
 624	if (IS_ERR(xts_ctx->fallback)) {
 625		pr_err("Allocating XTS fallback algorithm %s failed\n",
 626		       name);
 627		return PTR_ERR(xts_ctx->fallback);
 628	}
 
 
 629	return 0;
 630}
 631
 632static void xts_fallback_exit(struct crypto_tfm *tfm)
 633{
 634	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
 635
 636	crypto_free_sync_skcipher(xts_ctx->fallback);
 637}
 638
 639static struct crypto_alg xts_aes_alg = {
 640	.cra_name		=	"xts(aes)",
 641	.cra_driver_name	=	"xts-aes-s390",
 642	.cra_priority		=	402,	/* ecb-aes-s390 + 1 */
 643	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 644					CRYPTO_ALG_NEED_FALLBACK,
 645	.cra_blocksize		=	AES_BLOCK_SIZE,
 646	.cra_ctxsize		=	sizeof(struct s390_xts_ctx),
 647	.cra_type		=	&crypto_blkcipher_type,
 648	.cra_module		=	THIS_MODULE,
 649	.cra_init		=	xts_fallback_init,
 650	.cra_exit		=	xts_fallback_exit,
 651	.cra_u			=	{
 652		.blkcipher = {
 653			.min_keysize		=	2 * AES_MIN_KEY_SIZE,
 654			.max_keysize		=	2 * AES_MAX_KEY_SIZE,
 655			.ivsize			=	AES_BLOCK_SIZE,
 656			.setkey			=	xts_aes_set_key,
 657			.encrypt		=	xts_aes_encrypt,
 658			.decrypt		=	xts_aes_decrypt,
 659		}
 660	}
 661};
 662
 663static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 664			   unsigned int key_len)
 665{
 666	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 667	unsigned long fc;
 668
 669	/* Pick the correct function code based on the key length */
 670	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
 671	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
 672	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
 673
 674	/* Check if the function code is available */
 675	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
 676	if (!sctx->fc)
 677		return setkey_fallback_blk(tfm, in_key, key_len);
 678
 679	sctx->key_len = key_len;
 680	memcpy(sctx->key, in_key, key_len);
 681	return 0;
 682}
 683
 684static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
 685{
 686	unsigned int i, n;
 687
 688	/* only use complete blocks, max. PAGE_SIZE */
 689	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
 690	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
 691	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
 692		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
 693		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
 694		ctrptr += AES_BLOCK_SIZE;
 695	}
 696	return n;
 697}
 698
 699static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
 700			 struct blkcipher_walk *walk)
 701{
 702	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 
 703	u8 buf[AES_BLOCK_SIZE], *ctrptr;
 
 704	unsigned int n, nbytes;
 705	int ret, locked;
 706
 
 
 
 707	locked = mutex_trylock(&ctrblk_lock);
 708
 709	ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
 710	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
 711		n = AES_BLOCK_SIZE;
 
 712		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
 713			n = __ctrblk_init(ctrblk, walk->iv, nbytes);
 714		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
 715		cpacf_kmctr(sctx->fc | modifier, sctx->key,
 716			    walk->dst.virt.addr, walk->src.virt.addr,
 717			    n, ctrptr);
 718		if (ctrptr == ctrblk)
 719			memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
 720			       AES_BLOCK_SIZE);
 721		crypto_inc(walk->iv, AES_BLOCK_SIZE);
 722		ret = blkcipher_walk_done(desc, walk, nbytes - n);
 723	}
 724	if (locked)
 725		mutex_unlock(&ctrblk_lock);
 726	/*
 727	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
 728	 */
 729	if (nbytes) {
 730		cpacf_kmctr(sctx->fc | modifier, sctx->key,
 731			    buf, walk->src.virt.addr,
 732			    AES_BLOCK_SIZE, walk->iv);
 733		memcpy(walk->dst.virt.addr, buf, nbytes);
 734		crypto_inc(walk->iv, AES_BLOCK_SIZE);
 735		ret = blkcipher_walk_done(desc, walk, 0);
 
 736	}
 737
 738	return ret;
 739}
 740
 741static int ctr_aes_encrypt(struct blkcipher_desc *desc,
 742			   struct scatterlist *dst, struct scatterlist *src,
 743			   unsigned int nbytes)
 744{
 745	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 746	struct blkcipher_walk walk;
 747
 748	if (unlikely(!sctx->fc))
 749		return fallback_blk_enc(desc, dst, src, nbytes);
 750
 751	blkcipher_walk_init(&walk, dst, src, nbytes);
 752	return ctr_aes_crypt(desc, 0, &walk);
 753}
 754
 755static int ctr_aes_decrypt(struct blkcipher_desc *desc,
 756			   struct scatterlist *dst, struct scatterlist *src,
 757			   unsigned int nbytes)
 758{
 759	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
 760	struct blkcipher_walk walk;
 761
 762	if (unlikely(!sctx->fc))
 763		return fallback_blk_dec(desc, dst, src, nbytes);
 764
 765	blkcipher_walk_init(&walk, dst, src, nbytes);
 766	return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
 767}
 768
 769static struct crypto_alg ctr_aes_alg = {
 770	.cra_name		=	"ctr(aes)",
 771	.cra_driver_name	=	"ctr-aes-s390",
 772	.cra_priority		=	402,	/* ecb-aes-s390 + 1 */
 773	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
 774					CRYPTO_ALG_NEED_FALLBACK,
 775	.cra_blocksize		=	1,
 776	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 777	.cra_type		=	&crypto_blkcipher_type,
 778	.cra_module		=	THIS_MODULE,
 779	.cra_init		=	fallback_init_blk,
 780	.cra_exit		=	fallback_exit_blk,
 781	.cra_u			=	{
 782		.blkcipher = {
 783			.min_keysize		=	AES_MIN_KEY_SIZE,
 784			.max_keysize		=	AES_MAX_KEY_SIZE,
 785			.ivsize			=	AES_BLOCK_SIZE,
 786			.setkey			=	ctr_aes_set_key,
 787			.encrypt		=	ctr_aes_encrypt,
 788			.decrypt		=	ctr_aes_decrypt,
 789		}
 790	}
 791};
 792
 793static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
 794			  unsigned int keylen)
 795{
 796	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 797
 798	switch (keylen) {
 799	case AES_KEYSIZE_128:
 800		ctx->fc = CPACF_KMA_GCM_AES_128;
 801		break;
 802	case AES_KEYSIZE_192:
 803		ctx->fc = CPACF_KMA_GCM_AES_192;
 804		break;
 805	case AES_KEYSIZE_256:
 806		ctx->fc = CPACF_KMA_GCM_AES_256;
 807		break;
 808	default:
 809		return -EINVAL;
 810	}
 811
 812	memcpy(ctx->key, key, keylen);
 813	ctx->key_len = keylen;
 814	return 0;
 815}
 816
 817static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 818{
 819	switch (authsize) {
 820	case 4:
 821	case 8:
 822	case 12:
 823	case 13:
 824	case 14:
 825	case 15:
 826	case 16:
 827		break;
 828	default:
 829		return -EINVAL;
 830	}
 831
 832	return 0;
 833}
 834
 835static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
 836			   unsigned int len)
 837{
 838	memset(gw, 0, sizeof(*gw));
 839	gw->walk_bytes_remain = len;
 840	scatterwalk_start(&gw->walk, sg);
 841}
 842
 843static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
 844{
 845	struct scatterlist *nextsg;
 846
 847	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
 848	while (!gw->walk_bytes) {
 849		nextsg = sg_next(gw->walk.sg);
 850		if (!nextsg)
 851			return 0;
 852		scatterwalk_start(&gw->walk, nextsg);
 853		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
 854						   gw->walk_bytes_remain);
 855	}
 856	gw->walk_ptr = scatterwalk_map(&gw->walk);
 857	return gw->walk_bytes;
 858}
 859
 860static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
 861					     unsigned int nbytes)
 862{
 863	gw->walk_bytes_remain -= nbytes;
 864	scatterwalk_unmap(&gw->walk);
 865	scatterwalk_advance(&gw->walk, nbytes);
 866	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
 867	gw->walk_ptr = NULL;
 868}
 869
 870static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 871{
 872	int n;
 873
 874	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
 875		gw->ptr = gw->buf;
 876		gw->nbytes = gw->buf_bytes;
 877		goto out;
 878	}
 879
 880	if (gw->walk_bytes_remain == 0) {
 881		gw->ptr = NULL;
 882		gw->nbytes = 0;
 883		goto out;
 884	}
 885
 886	if (!_gcm_sg_clamp_and_map(gw)) {
 887		gw->ptr = NULL;
 888		gw->nbytes = 0;
 889		goto out;
 890	}
 891
 892	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
 893		gw->ptr = gw->walk_ptr;
 894		gw->nbytes = gw->walk_bytes;
 895		goto out;
 896	}
 897
 898	while (1) {
 899		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
 900		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
 901		gw->buf_bytes += n;
 902		_gcm_sg_unmap_and_advance(gw, n);
 903		if (gw->buf_bytes >= minbytesneeded) {
 904			gw->ptr = gw->buf;
 905			gw->nbytes = gw->buf_bytes;
 906			goto out;
 907		}
 908		if (!_gcm_sg_clamp_and_map(gw)) {
 909			gw->ptr = NULL;
 910			gw->nbytes = 0;
 911			goto out;
 912		}
 913	}
 914
 915out:
 916	return gw->nbytes;
 917}
 918
 919static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 920{
 921	if (gw->walk_bytes_remain == 0) {
 922		gw->ptr = NULL;
 923		gw->nbytes = 0;
 924		goto out;
 925	}
 926
 927	if (!_gcm_sg_clamp_and_map(gw)) {
 928		gw->ptr = NULL;
 929		gw->nbytes = 0;
 930		goto out;
 931	}
 932
 933	if (gw->walk_bytes >= minbytesneeded) {
 934		gw->ptr = gw->walk_ptr;
 935		gw->nbytes = gw->walk_bytes;
 936		goto out;
 937	}
 938
 939	scatterwalk_unmap(&gw->walk);
 940	gw->walk_ptr = NULL;
 941
 942	gw->ptr = gw->buf;
 943	gw->nbytes = sizeof(gw->buf);
 944
 945out:
 946	return gw->nbytes;
 947}
 948
 949static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 950{
 951	if (gw->ptr == NULL)
 952		return 0;
 953
 954	if (gw->ptr == gw->buf) {
 955		int n = gw->buf_bytes - bytesdone;
 956		if (n > 0) {
 957			memmove(gw->buf, gw->buf + bytesdone, n);
 958			gw->buf_bytes = n;
 959		} else
 960			gw->buf_bytes = 0;
 961	} else
 962		_gcm_sg_unmap_and_advance(gw, bytesdone);
 963
 964	return bytesdone;
 965}
 966
 967static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 968{
 969	int i, n;
 970
 971	if (gw->ptr == NULL)
 972		return 0;
 973
 974	if (gw->ptr == gw->buf) {
 975		for (i = 0; i < bytesdone; i += n) {
 976			if (!_gcm_sg_clamp_and_map(gw))
 977				return i;
 978			n = min(gw->walk_bytes, bytesdone - i);
 979			memcpy(gw->walk_ptr, gw->buf + i, n);
 980			_gcm_sg_unmap_and_advance(gw, n);
 981		}
 982	} else
 983		_gcm_sg_unmap_and_advance(gw, bytesdone);
 984
 985	return bytesdone;
 986}
 987
 988static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
 989{
 990	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 991	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 992	unsigned int ivsize = crypto_aead_ivsize(tfm);
 993	unsigned int taglen = crypto_aead_authsize(tfm);
 994	unsigned int aadlen = req->assoclen;
 995	unsigned int pclen = req->cryptlen;
 996	int ret = 0;
 997
 998	unsigned int n, len, in_bytes, out_bytes,
 999		     min_bytes, bytes, aad_bytes, pc_bytes;
1000	struct gcm_sg_walk gw_in, gw_out;
1001	u8 tag[GHASH_DIGEST_SIZE];
1002
1003	struct {
1004		u32 _[3];		/* reserved */
1005		u32 cv;			/* Counter Value */
1006		u8 t[GHASH_DIGEST_SIZE];/* Tag */
1007		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
1008		u64 taadl;		/* Total AAD Length */
1009		u64 tpcl;		/* Total Plain-/Cipher-text Length */
1010		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
1011		u8 k[AES_MAX_KEY_SIZE];	/* Key */
1012	} param;
1013
1014	/*
1015	 * encrypt
1016	 *   req->src: aad||plaintext
1017	 *   req->dst: aad||ciphertext||tag
1018	 * decrypt
1019	 *   req->src: aad||ciphertext||tag
1020	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
1021	 * aad, plaintext and ciphertext may be empty.
1022	 */
1023	if (flags & CPACF_DECRYPT)
1024		pclen -= taglen;
1025	len = aadlen + pclen;
1026
1027	memset(&param, 0, sizeof(param));
1028	param.cv = 1;
1029	param.taadl = aadlen * 8;
1030	param.tpcl = pclen * 8;
1031	memcpy(param.j0, req->iv, ivsize);
1032	*(u32 *)(param.j0 + ivsize) = 1;
1033	memcpy(param.k, ctx->key, ctx->key_len);
1034
1035	gcm_walk_start(&gw_in, req->src, len);
1036	gcm_walk_start(&gw_out, req->dst, len);
1037
1038	do {
1039		min_bytes = min_t(unsigned int,
1040				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
1041		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
1042		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
1043		bytes = min(in_bytes, out_bytes);
1044
1045		if (aadlen + pclen <= bytes) {
1046			aad_bytes = aadlen;
1047			pc_bytes = pclen;
1048			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
1049		} else {
1050			if (aadlen <= bytes) {
1051				aad_bytes = aadlen;
1052				pc_bytes = (bytes - aadlen) &
1053					   ~(AES_BLOCK_SIZE - 1);
1054				flags |= CPACF_KMA_LAAD;
1055			} else {
1056				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
1057				pc_bytes = 0;
1058			}
1059		}
1060
1061		if (aad_bytes > 0)
1062			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1063
1064		cpacf_kma(ctx->fc | flags, &param,
1065			  gw_out.ptr + aad_bytes,
1066			  gw_in.ptr + aad_bytes, pc_bytes,
1067			  gw_in.ptr, aad_bytes);
1068
1069		n = aad_bytes + pc_bytes;
1070		if (gcm_in_walk_done(&gw_in, n) != n)
1071			return -ENOMEM;
1072		if (gcm_out_walk_done(&gw_out, n) != n)
1073			return -ENOMEM;
1074		aadlen -= aad_bytes;
1075		pclen -= pc_bytes;
1076	} while (aadlen + pclen > 0);
1077
1078	if (flags & CPACF_DECRYPT) {
1079		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1080		if (crypto_memneq(tag, param.t, taglen))
1081			ret = -EBADMSG;
1082	} else
1083		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1084
1085	memzero_explicit(&param, sizeof(param));
1086	return ret;
1087}
1088
1089static int gcm_aes_encrypt(struct aead_request *req)
1090{
1091	return gcm_aes_crypt(req, CPACF_ENCRYPT);
1092}
1093
1094static int gcm_aes_decrypt(struct aead_request *req)
1095{
1096	return gcm_aes_crypt(req, CPACF_DECRYPT);
1097}
1098
1099static struct aead_alg gcm_aes_aead = {
1100	.setkey			= gcm_aes_setkey,
1101	.setauthsize		= gcm_aes_setauthsize,
1102	.encrypt		= gcm_aes_encrypt,
1103	.decrypt		= gcm_aes_decrypt,
1104
1105	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
1106	.maxauthsize		= GHASH_DIGEST_SIZE,
1107	.chunksize		= AES_BLOCK_SIZE,
1108
1109	.base			= {
1110		.cra_blocksize		= 1,
1111		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
1112		.cra_priority		= 900,
1113		.cra_name		= "gcm(aes)",
1114		.cra_driver_name	= "gcm-aes-s390",
1115		.cra_module		= THIS_MODULE,
1116	},
1117};
1118
1119static struct crypto_alg *aes_s390_algs_ptr[5];
1120static int aes_s390_algs_num;
 
1121static struct aead_alg *aes_s390_aead_alg;
1122
1123static int aes_s390_register_alg(struct crypto_alg *alg)
1124{
1125	int ret;
1126
1127	ret = crypto_register_alg(alg);
1128	if (!ret)
1129		aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
1130	return ret;
1131}
1132
1133static void aes_s390_fini(void)
1134{
1135	while (aes_s390_algs_num--)
1136		crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
 
 
1137	if (ctrblk)
1138		free_page((unsigned long) ctrblk);
1139
1140	if (aes_s390_aead_alg)
1141		crypto_unregister_aead(aes_s390_aead_alg);
1142}
1143
1144static int __init aes_s390_init(void)
1145{
1146	int ret;
1147
1148	/* Query available functions for KM, KMC, KMCTR and KMA */
1149	cpacf_query(CPACF_KM, &km_functions);
1150	cpacf_query(CPACF_KMC, &kmc_functions);
1151	cpacf_query(CPACF_KMCTR, &kmctr_functions);
1152	cpacf_query(CPACF_KMA, &kma_functions);
1153
1154	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1155	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1156	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1157		ret = aes_s390_register_alg(&aes_alg);
1158		if (ret)
1159			goto out_err;
1160		ret = aes_s390_register_alg(&ecb_aes_alg);
 
1161		if (ret)
1162			goto out_err;
1163	}
1164
1165	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1166	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1167	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1168		ret = aes_s390_register_alg(&cbc_aes_alg);
1169		if (ret)
1170			goto out_err;
1171	}
1172
1173	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1174	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1175		ret = aes_s390_register_alg(&xts_aes_alg);
1176		if (ret)
1177			goto out_err;
1178	}
1179
1180	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1181	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1182	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1183		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1184		if (!ctrblk) {
1185			ret = -ENOMEM;
1186			goto out_err;
1187		}
1188		ret = aes_s390_register_alg(&ctr_aes_alg);
1189		if (ret)
1190			goto out_err;
1191	}
1192
1193	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1194	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1195	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1196		ret = crypto_register_aead(&gcm_aes_aead);
1197		if (ret)
1198			goto out_err;
1199		aes_s390_aead_alg = &gcm_aes_aead;
1200	}
1201
1202	return 0;
1203out_err:
1204	aes_s390_fini();
1205	return ret;
1206}
1207
1208module_cpu_feature_match(MSA, aes_s390_init);
1209module_exit(aes_s390_fini);
1210
1211MODULE_ALIAS_CRYPTO("aes-all");
1212
1213MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1214MODULE_LICENSE("GPL");