Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Cryptographic API.
   4 *
   5 * s390 implementation of the AES Cipher Algorithm.
   6 *
   7 * s390 Version:
   8 *   Copyright IBM Corp. 2005, 2017
   9 *   Author(s): Jan Glauber (jang@de.ibm.com)
  10 *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  11 *		Patrick Steuer <patrick.steuer@de.ibm.com>
  12 *		Harald Freudenberger <freude@de.ibm.com>
  13 *
  14 * Derived from "crypto/aes_generic.c"
  15 */
  16
  17#define KMSG_COMPONENT "aes_s390"
  18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19
  20#include <crypto/aes.h>
  21#include <crypto/algapi.h>
  22#include <crypto/ghash.h>
  23#include <crypto/internal/aead.h>
  24#include <crypto/internal/cipher.h>
  25#include <crypto/internal/skcipher.h>
  26#include <crypto/scatterwalk.h>
  27#include <linux/err.h>
  28#include <linux/module.h>
  29#include <linux/cpufeature.h>
  30#include <linux/init.h>
  31#include <linux/mutex.h>
  32#include <linux/fips.h>
  33#include <linux/string.h>
  34#include <crypto/xts.h>
  35#include <asm/cpacf.h>
  36
  37static u8 *ctrblk;
  38static DEFINE_MUTEX(ctrblk_lock);
  39
  40static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  41		    kma_functions;
  42
  43struct s390_aes_ctx {
  44	u8 key[AES_MAX_KEY_SIZE];
  45	int key_len;
  46	unsigned long fc;
  47	union {
  48		struct crypto_skcipher *skcipher;
  49		struct crypto_cipher *cip;
  50	} fallback;
  51};
  52
  53struct s390_xts_ctx {
  54	union {
  55		u8 keys[64];
  56		struct {
  57			u8 key[32];
  58			u8 pcc_key[32];
  59		};
  60	};
  61	int key_len;
  62	unsigned long fc;
  63	struct crypto_skcipher *fallback;
  64};
  65
  66struct gcm_sg_walk {
  67	struct scatter_walk walk;
  68	unsigned int walk_bytes;
  69	u8 *walk_ptr;
  70	unsigned int walk_bytes_remain;
  71	u8 buf[AES_BLOCK_SIZE];
  72	unsigned int buf_bytes;
  73	u8 *ptr;
  74	unsigned int nbytes;
  75};
  76
  77static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  78		unsigned int key_len)
  79{
  80	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  81
  82	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  83	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  84			CRYPTO_TFM_REQ_MASK);
  85
  86	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  87}
  88
  89static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  90		       unsigned int key_len)
  91{
  92	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  93	unsigned long fc;
  94
  95	/* Pick the correct function code based on the key length */
  96	fc = (key_len == 16) ? CPACF_KM_AES_128 :
  97	     (key_len == 24) ? CPACF_KM_AES_192 :
  98	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
  99
 100	/* Check if the function code is available */
 101	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 102	if (!sctx->fc)
 103		return setkey_fallback_cip(tfm, in_key, key_len);
 104
 105	sctx->key_len = key_len;
 106	memcpy(sctx->key, in_key, key_len);
 107	return 0;
 108}
 109
 110static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 111{
 112	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 113
 114	if (unlikely(!sctx->fc)) {
 115		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
 116		return;
 117	}
 118	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
 119}
 120
 121static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 122{
 123	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 124
 125	if (unlikely(!sctx->fc)) {
 126		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
 127		return;
 128	}
 129	cpacf_km(sctx->fc | CPACF_DECRYPT,
 130		 &sctx->key, out, in, AES_BLOCK_SIZE);
 131}
 132
 133static int fallback_init_cip(struct crypto_tfm *tfm)
 134{
 135	const char *name = tfm->__crt_alg->cra_name;
 136	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 137
 138	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
 139						 CRYPTO_ALG_NEED_FALLBACK);
 140
 141	if (IS_ERR(sctx->fallback.cip)) {
 142		pr_err("Allocating AES fallback algorithm %s failed\n",
 143		       name);
 144		return PTR_ERR(sctx->fallback.cip);
 145	}
 146
 147	return 0;
 148}
 149
 150static void fallback_exit_cip(struct crypto_tfm *tfm)
 151{
 152	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 153
 154	crypto_free_cipher(sctx->fallback.cip);
 155	sctx->fallback.cip = NULL;
 156}
 157
 158static struct crypto_alg aes_alg = {
 159	.cra_name		=	"aes",
 160	.cra_driver_name	=	"aes-s390",
 161	.cra_priority		=	300,
 162	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
 163					CRYPTO_ALG_NEED_FALLBACK,
 164	.cra_blocksize		=	AES_BLOCK_SIZE,
 165	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 166	.cra_module		=	THIS_MODULE,
 167	.cra_init               =       fallback_init_cip,
 168	.cra_exit               =       fallback_exit_cip,
 169	.cra_u			=	{
 170		.cipher = {
 171			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
 172			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
 173			.cia_setkey		=	aes_set_key,
 174			.cia_encrypt		=	crypto_aes_encrypt,
 175			.cia_decrypt		=	crypto_aes_decrypt,
 176		}
 177	}
 178};
 179
 180static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
 181				    unsigned int len)
 182{
 183	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 184
 185	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
 186				    CRYPTO_TFM_REQ_MASK);
 187	crypto_skcipher_set_flags(sctx->fallback.skcipher,
 188				  crypto_skcipher_get_flags(tfm) &
 189				  CRYPTO_TFM_REQ_MASK);
 190	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
 191}
 192
 193static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
 194				   struct skcipher_request *req,
 195				   unsigned long modifier)
 196{
 197	struct skcipher_request *subreq = skcipher_request_ctx(req);
 198
 199	*subreq = *req;
 200	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
 201	return (modifier & CPACF_DECRYPT) ?
 202		crypto_skcipher_decrypt(subreq) :
 203		crypto_skcipher_encrypt(subreq);
 204}
 205
 206static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 207			   unsigned int key_len)
 208{
 209	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 210	unsigned long fc;
 211
 212	/* Pick the correct function code based on the key length */
 213	fc = (key_len == 16) ? CPACF_KM_AES_128 :
 214	     (key_len == 24) ? CPACF_KM_AES_192 :
 215	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
 216
 217	/* Check if the function code is available */
 218	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 219	if (!sctx->fc)
 220		return setkey_fallback_skcipher(tfm, in_key, key_len);
 221
 222	sctx->key_len = key_len;
 223	memcpy(sctx->key, in_key, key_len);
 224	return 0;
 225}
 226
 227static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 228{
 229	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 230	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 231	struct skcipher_walk walk;
 232	unsigned int nbytes, n;
 233	int ret;
 234
 235	if (unlikely(!sctx->fc))
 236		return fallback_skcipher_crypt(sctx, req, modifier);
 237
 238	ret = skcipher_walk_virt(&walk, req, false);
 239	while ((nbytes = walk.nbytes) != 0) {
 240		/* only use complete blocks */
 241		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 242		cpacf_km(sctx->fc | modifier, sctx->key,
 243			 walk.dst.virt.addr, walk.src.virt.addr, n);
 244		ret = skcipher_walk_done(&walk, nbytes - n);
 245	}
 246	return ret;
 247}
 248
 249static int ecb_aes_encrypt(struct skcipher_request *req)
 250{
 251	return ecb_aes_crypt(req, 0);
 252}
 253
 254static int ecb_aes_decrypt(struct skcipher_request *req)
 255{
 256	return ecb_aes_crypt(req, CPACF_DECRYPT);
 257}
 258
 259static int fallback_init_skcipher(struct crypto_skcipher *tfm)
 260{
 261	const char *name = crypto_tfm_alg_name(&tfm->base);
 262	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 263
 264	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
 265				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
 266
 267	if (IS_ERR(sctx->fallback.skcipher)) {
 268		pr_err("Allocating AES fallback algorithm %s failed\n",
 269		       name);
 270		return PTR_ERR(sctx->fallback.skcipher);
 271	}
 272
 273	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
 274				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
 275	return 0;
 276}
 277
 278static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
 279{
 280	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 281
 282	crypto_free_skcipher(sctx->fallback.skcipher);
 283}
 284
 285static struct skcipher_alg ecb_aes_alg = {
 286	.base.cra_name		=	"ecb(aes)",
 287	.base.cra_driver_name	=	"ecb-aes-s390",
 288	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
 289	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 290	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 291	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 292	.base.cra_module	=	THIS_MODULE,
 293	.init			=	fallback_init_skcipher,
 294	.exit			=	fallback_exit_skcipher,
 295	.min_keysize		=	AES_MIN_KEY_SIZE,
 296	.max_keysize		=	AES_MAX_KEY_SIZE,
 297	.setkey			=	ecb_aes_set_key,
 298	.encrypt		=	ecb_aes_encrypt,
 299	.decrypt		=	ecb_aes_decrypt,
 300};
 301
 302static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 303			   unsigned int key_len)
 304{
 305	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 306	unsigned long fc;
 307
 308	/* Pick the correct function code based on the key length */
 309	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
 310	     (key_len == 24) ? CPACF_KMC_AES_192 :
 311	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
 312
 313	/* Check if the function code is available */
 314	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
 315	if (!sctx->fc)
 316		return setkey_fallback_skcipher(tfm, in_key, key_len);
 317
 318	sctx->key_len = key_len;
 319	memcpy(sctx->key, in_key, key_len);
 320	return 0;
 321}
 322
 323static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 324{
 325	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 326	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 327	struct skcipher_walk walk;
 328	unsigned int nbytes, n;
 329	int ret;
 330	struct {
 331		u8 iv[AES_BLOCK_SIZE];
 332		u8 key[AES_MAX_KEY_SIZE];
 333	} param;
 334
 335	if (unlikely(!sctx->fc))
 336		return fallback_skcipher_crypt(sctx, req, modifier);
 337
 338	ret = skcipher_walk_virt(&walk, req, false);
 339	if (ret)
 340		return ret;
 341	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
 342	memcpy(param.key, sctx->key, sctx->key_len);
 343	while ((nbytes = walk.nbytes) != 0) {
 344		/* only use complete blocks */
 345		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 346		cpacf_kmc(sctx->fc | modifier, &param,
 347			  walk.dst.virt.addr, walk.src.virt.addr, n);
 348		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
 349		ret = skcipher_walk_done(&walk, nbytes - n);
 350	}
 351	memzero_explicit(&param, sizeof(param));
 352	return ret;
 353}
 354
 355static int cbc_aes_encrypt(struct skcipher_request *req)
 356{
 357	return cbc_aes_crypt(req, 0);
 358}
 359
 360static int cbc_aes_decrypt(struct skcipher_request *req)
 361{
 362	return cbc_aes_crypt(req, CPACF_DECRYPT);
 363}
 364
 365static struct skcipher_alg cbc_aes_alg = {
 366	.base.cra_name		=	"cbc(aes)",
 367	.base.cra_driver_name	=	"cbc-aes-s390",
 368	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 369	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 370	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 371	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 372	.base.cra_module	=	THIS_MODULE,
 373	.init			=	fallback_init_skcipher,
 374	.exit			=	fallback_exit_skcipher,
 375	.min_keysize		=	AES_MIN_KEY_SIZE,
 376	.max_keysize		=	AES_MAX_KEY_SIZE,
 377	.ivsize			=	AES_BLOCK_SIZE,
 378	.setkey			=	cbc_aes_set_key,
 379	.encrypt		=	cbc_aes_encrypt,
 380	.decrypt		=	cbc_aes_decrypt,
 381};
 382
 383static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
 384			       unsigned int len)
 385{
 386	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 387
 388	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
 389	crypto_skcipher_set_flags(xts_ctx->fallback,
 390				  crypto_skcipher_get_flags(tfm) &
 391				  CRYPTO_TFM_REQ_MASK);
 392	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
 393}
 394
 395static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 396			   unsigned int key_len)
 397{
 398	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 399	unsigned long fc;
 400	int err;
 401
 402	err = xts_fallback_setkey(tfm, in_key, key_len);
 403	if (err)
 404		return err;
 405
 
 
 
 
 406	/* Pick the correct function code based on the key length */
 407	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
 408	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
 409
 410	/* Check if the function code is available */
 411	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 412	if (!xts_ctx->fc)
 413		return 0;
 414
 415	/* Split the XTS key into the two subkeys */
 416	key_len = key_len / 2;
 417	xts_ctx->key_len = key_len;
 418	memcpy(xts_ctx->key, in_key, key_len);
 419	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
 420	return 0;
 421}
 422
 423static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 424{
 425	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 426	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 427	struct skcipher_walk walk;
 428	unsigned int offset, nbytes, n;
 429	int ret;
 430	struct {
 431		u8 key[32];
 432		u8 tweak[16];
 433		u8 block[16];
 434		u8 bit[16];
 435		u8 xts[16];
 436	} pcc_param;
 437	struct {
 438		u8 key[32];
 439		u8 init[16];
 440	} xts_param;
 441
 442	if (req->cryptlen < AES_BLOCK_SIZE)
 443		return -EINVAL;
 444
 445	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
 446		struct skcipher_request *subreq = skcipher_request_ctx(req);
 447
 448		*subreq = *req;
 449		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
 450		return (modifier & CPACF_DECRYPT) ?
 451			crypto_skcipher_decrypt(subreq) :
 452			crypto_skcipher_encrypt(subreq);
 453	}
 454
 455	ret = skcipher_walk_virt(&walk, req, false);
 456	if (ret)
 457		return ret;
 458	offset = xts_ctx->key_len & 0x10;
 459	memset(pcc_param.block, 0, sizeof(pcc_param.block));
 460	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
 461	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
 462	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
 463	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
 464	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
 465
 466	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
 467	memcpy(xts_param.init, pcc_param.xts, 16);
 468
 469	while ((nbytes = walk.nbytes) != 0) {
 470		/* only use complete blocks */
 471		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 472		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
 473			 walk.dst.virt.addr, walk.src.virt.addr, n);
 474		ret = skcipher_walk_done(&walk, nbytes - n);
 475	}
 476	memzero_explicit(&pcc_param, sizeof(pcc_param));
 477	memzero_explicit(&xts_param, sizeof(xts_param));
 478	return ret;
 479}
 480
 481static int xts_aes_encrypt(struct skcipher_request *req)
 482{
 483	return xts_aes_crypt(req, 0);
 484}
 485
 486static int xts_aes_decrypt(struct skcipher_request *req)
 487{
 488	return xts_aes_crypt(req, CPACF_DECRYPT);
 489}
 490
 491static int xts_fallback_init(struct crypto_skcipher *tfm)
 492{
 493	const char *name = crypto_tfm_alg_name(&tfm->base);
 494	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 495
 496	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
 497				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
 498
 499	if (IS_ERR(xts_ctx->fallback)) {
 500		pr_err("Allocating XTS fallback algorithm %s failed\n",
 501		       name);
 502		return PTR_ERR(xts_ctx->fallback);
 503	}
 504	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
 505				    crypto_skcipher_reqsize(xts_ctx->fallback));
 506	return 0;
 507}
 508
 509static void xts_fallback_exit(struct crypto_skcipher *tfm)
 510{
 511	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 512
 513	crypto_free_skcipher(xts_ctx->fallback);
 514}
 515
 516static struct skcipher_alg xts_aes_alg = {
 517	.base.cra_name		=	"xts(aes)",
 518	.base.cra_driver_name	=	"xts-aes-s390",
 519	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 520	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 521	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 522	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
 523	.base.cra_module	=	THIS_MODULE,
 524	.init			=	xts_fallback_init,
 525	.exit			=	xts_fallback_exit,
 526	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
 527	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
 528	.ivsize			=	AES_BLOCK_SIZE,
 529	.setkey			=	xts_aes_set_key,
 530	.encrypt		=	xts_aes_encrypt,
 531	.decrypt		=	xts_aes_decrypt,
 532};
 533
 534static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 535			       unsigned int key_len)
 536{
 537	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 538	unsigned long fc;
 539	int err;
 540
 541	err = xts_fallback_setkey(tfm, in_key, key_len);
 542	if (err)
 543		return err;
 544
 545	/* Pick the correct function code based on the key length */
 546	fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
 547	     (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
 548
 549	/* Check if the function code is available */
 550	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 551	if (!xts_ctx->fc)
 552		return 0;
 553
 554	/* Store double-key */
 555	memcpy(xts_ctx->keys, in_key, key_len);
 556	xts_ctx->key_len = key_len;
 557	return 0;
 558}
 559
 560static int fullxts_aes_crypt(struct skcipher_request *req,  unsigned long modifier)
 561{
 562	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 563	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 564	unsigned int offset, nbytes, n;
 565	struct skcipher_walk walk;
 566	int ret;
 567	struct {
 568		__u8 key[64];
 569		__u8 tweak[16];
 570		__u8 nap[16];
 571	} fxts_param = {
 572		.nap = {0},
 573	};
 574
 575	if (req->cryptlen < AES_BLOCK_SIZE)
 576		return -EINVAL;
 577
 578	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
 579		struct skcipher_request *subreq = skcipher_request_ctx(req);
 580
 581		*subreq = *req;
 582		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
 583		return (modifier & CPACF_DECRYPT) ?
 584			crypto_skcipher_decrypt(subreq) :
 585			crypto_skcipher_encrypt(subreq);
 586	}
 587
 588	ret = skcipher_walk_virt(&walk, req, false);
 589	if (ret)
 590		return ret;
 591
 592	offset = xts_ctx->key_len & 0x20;
 593	memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
 594	memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
 595	fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
 596
 597	while ((nbytes = walk.nbytes) != 0) {
 598		/* only use complete blocks */
 599		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 600		cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
 601			 walk.dst.virt.addr, walk.src.virt.addr, n);
 602		ret = skcipher_walk_done(&walk, nbytes - n);
 603	}
 604	memzero_explicit(&fxts_param, sizeof(fxts_param));
 605	return ret;
 606}
 607
 608static int fullxts_aes_encrypt(struct skcipher_request *req)
 609{
 610	return fullxts_aes_crypt(req, 0);
 611}
 612
 613static int fullxts_aes_decrypt(struct skcipher_request *req)
 614{
 615	return fullxts_aes_crypt(req, CPACF_DECRYPT);
 616}
 617
 618static struct skcipher_alg fullxts_aes_alg = {
 619	.base.cra_name		=	"xts(aes)",
 620	.base.cra_driver_name	=	"full-xts-aes-s390",
 621	.base.cra_priority	=	403,	/* aes-xts-s390 + 1 */
 622	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 623	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 624	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
 625	.base.cra_module	=	THIS_MODULE,
 626	.init			=	xts_fallback_init,
 627	.exit			=	xts_fallback_exit,
 628	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
 629	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
 630	.ivsize			=	AES_BLOCK_SIZE,
 631	.setkey			=	fullxts_aes_set_key,
 632	.encrypt		=	fullxts_aes_encrypt,
 633	.decrypt		=	fullxts_aes_decrypt,
 634};
 635
 636static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 637			   unsigned int key_len)
 638{
 639	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 640	unsigned long fc;
 641
 642	/* Pick the correct function code based on the key length */
 643	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
 644	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
 645	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
 646
 647	/* Check if the function code is available */
 648	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
 649	if (!sctx->fc)
 650		return setkey_fallback_skcipher(tfm, in_key, key_len);
 651
 652	sctx->key_len = key_len;
 653	memcpy(sctx->key, in_key, key_len);
 654	return 0;
 655}
 656
 657static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
 658{
 659	unsigned int i, n;
 660
 661	/* only use complete blocks, max. PAGE_SIZE */
 662	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
 663	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
 664	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
 665		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
 666		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
 667		ctrptr += AES_BLOCK_SIZE;
 668	}
 669	return n;
 670}
 671
 672static int ctr_aes_crypt(struct skcipher_request *req)
 673{
 674	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 675	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 676	u8 buf[AES_BLOCK_SIZE], *ctrptr;
 677	struct skcipher_walk walk;
 678	unsigned int n, nbytes;
 679	int ret, locked;
 680
 681	if (unlikely(!sctx->fc))
 682		return fallback_skcipher_crypt(sctx, req, 0);
 683
 684	locked = mutex_trylock(&ctrblk_lock);
 685
 686	ret = skcipher_walk_virt(&walk, req, false);
 687	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 688		n = AES_BLOCK_SIZE;
 689
 690		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
 691			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
 692		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
 693		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
 694			    walk.src.virt.addr, n, ctrptr);
 695		if (ctrptr == ctrblk)
 696			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
 697			       AES_BLOCK_SIZE);
 698		crypto_inc(walk.iv, AES_BLOCK_SIZE);
 699		ret = skcipher_walk_done(&walk, nbytes - n);
 700	}
 701	if (locked)
 702		mutex_unlock(&ctrblk_lock);
 703	/*
 704	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
 705	 */
 706	if (nbytes) {
 707		memset(buf, 0, AES_BLOCK_SIZE);
 708		memcpy(buf, walk.src.virt.addr, nbytes);
 709		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
 710			    AES_BLOCK_SIZE, walk.iv);
 711		memcpy(walk.dst.virt.addr, buf, nbytes);
 712		crypto_inc(walk.iv, AES_BLOCK_SIZE);
 713		ret = skcipher_walk_done(&walk, 0);
 714	}
 715
 716	return ret;
 717}
 718
 719static struct skcipher_alg ctr_aes_alg = {
 720	.base.cra_name		=	"ctr(aes)",
 721	.base.cra_driver_name	=	"ctr-aes-s390",
 722	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 723	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 724	.base.cra_blocksize	=	1,
 725	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 726	.base.cra_module	=	THIS_MODULE,
 727	.init			=	fallback_init_skcipher,
 728	.exit			=	fallback_exit_skcipher,
 729	.min_keysize		=	AES_MIN_KEY_SIZE,
 730	.max_keysize		=	AES_MAX_KEY_SIZE,
 731	.ivsize			=	AES_BLOCK_SIZE,
 732	.setkey			=	ctr_aes_set_key,
 733	.encrypt		=	ctr_aes_crypt,
 734	.decrypt		=	ctr_aes_crypt,
 735	.chunksize		=	AES_BLOCK_SIZE,
 736};
 737
 738static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
 739			  unsigned int keylen)
 740{
 741	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 742
 743	switch (keylen) {
 744	case AES_KEYSIZE_128:
 745		ctx->fc = CPACF_KMA_GCM_AES_128;
 746		break;
 747	case AES_KEYSIZE_192:
 748		ctx->fc = CPACF_KMA_GCM_AES_192;
 749		break;
 750	case AES_KEYSIZE_256:
 751		ctx->fc = CPACF_KMA_GCM_AES_256;
 752		break;
 753	default:
 754		return -EINVAL;
 755	}
 756
 757	memcpy(ctx->key, key, keylen);
 758	ctx->key_len = keylen;
 759	return 0;
 760}
 761
 762static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 763{
 764	switch (authsize) {
 765	case 4:
 766	case 8:
 767	case 12:
 768	case 13:
 769	case 14:
 770	case 15:
 771	case 16:
 772		break;
 773	default:
 774		return -EINVAL;
 775	}
 776
 777	return 0;
 778}
 779
 780static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
 781			   unsigned int len)
 782{
 783	memset(gw, 0, sizeof(*gw));
 784	gw->walk_bytes_remain = len;
 785	scatterwalk_start(&gw->walk, sg);
 786}
 787
 788static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
 789{
 790	struct scatterlist *nextsg;
 791
 792	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
 793	while (!gw->walk_bytes) {
 794		nextsg = sg_next(gw->walk.sg);
 795		if (!nextsg)
 796			return 0;
 797		scatterwalk_start(&gw->walk, nextsg);
 798		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
 799						   gw->walk_bytes_remain);
 800	}
 801	gw->walk_ptr = scatterwalk_map(&gw->walk);
 802	return gw->walk_bytes;
 803}
 804
 805static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
 806					     unsigned int nbytes)
 807{
 808	gw->walk_bytes_remain -= nbytes;
 809	scatterwalk_unmap(gw->walk_ptr);
 810	scatterwalk_advance(&gw->walk, nbytes);
 811	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
 812	gw->walk_ptr = NULL;
 813}
 814
 815static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 816{
 817	int n;
 818
 819	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
 820		gw->ptr = gw->buf;
 821		gw->nbytes = gw->buf_bytes;
 822		goto out;
 823	}
 824
 825	if (gw->walk_bytes_remain == 0) {
 826		gw->ptr = NULL;
 827		gw->nbytes = 0;
 828		goto out;
 829	}
 830
 831	if (!_gcm_sg_clamp_and_map(gw)) {
 832		gw->ptr = NULL;
 833		gw->nbytes = 0;
 834		goto out;
 835	}
 836
 837	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
 838		gw->ptr = gw->walk_ptr;
 839		gw->nbytes = gw->walk_bytes;
 840		goto out;
 841	}
 842
 843	while (1) {
 844		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
 845		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
 846		gw->buf_bytes += n;
 847		_gcm_sg_unmap_and_advance(gw, n);
 848		if (gw->buf_bytes >= minbytesneeded) {
 849			gw->ptr = gw->buf;
 850			gw->nbytes = gw->buf_bytes;
 851			goto out;
 852		}
 853		if (!_gcm_sg_clamp_and_map(gw)) {
 854			gw->ptr = NULL;
 855			gw->nbytes = 0;
 856			goto out;
 857		}
 858	}
 859
 860out:
 861	return gw->nbytes;
 862}
 863
 864static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 865{
 866	if (gw->walk_bytes_remain == 0) {
 867		gw->ptr = NULL;
 868		gw->nbytes = 0;
 869		goto out;
 870	}
 871
 872	if (!_gcm_sg_clamp_and_map(gw)) {
 873		gw->ptr = NULL;
 874		gw->nbytes = 0;
 875		goto out;
 876	}
 877
 878	if (gw->walk_bytes >= minbytesneeded) {
 879		gw->ptr = gw->walk_ptr;
 880		gw->nbytes = gw->walk_bytes;
 881		goto out;
 882	}
 883
 884	scatterwalk_unmap(gw->walk_ptr);
 885	gw->walk_ptr = NULL;
 886
 887	gw->ptr = gw->buf;
 888	gw->nbytes = sizeof(gw->buf);
 889
 890out:
 891	return gw->nbytes;
 892}
 893
 894static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 895{
 896	if (gw->ptr == NULL)
 897		return 0;
 898
 899	if (gw->ptr == gw->buf) {
 900		int n = gw->buf_bytes - bytesdone;
 901		if (n > 0) {
 902			memmove(gw->buf, gw->buf + bytesdone, n);
 903			gw->buf_bytes = n;
 904		} else
 905			gw->buf_bytes = 0;
 906	} else
 907		_gcm_sg_unmap_and_advance(gw, bytesdone);
 908
 909	return bytesdone;
 910}
 911
 912static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 913{
 914	int i, n;
 915
 916	if (gw->ptr == NULL)
 917		return 0;
 918
 919	if (gw->ptr == gw->buf) {
 920		for (i = 0; i < bytesdone; i += n) {
 921			if (!_gcm_sg_clamp_and_map(gw))
 922				return i;
 923			n = min(gw->walk_bytes, bytesdone - i);
 924			memcpy(gw->walk_ptr, gw->buf + i, n);
 925			_gcm_sg_unmap_and_advance(gw, n);
 926		}
 927	} else
 928		_gcm_sg_unmap_and_advance(gw, bytesdone);
 929
 930	return bytesdone;
 931}
 932
 933static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
 934{
 935	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 936	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 937	unsigned int ivsize = crypto_aead_ivsize(tfm);
 938	unsigned int taglen = crypto_aead_authsize(tfm);
 939	unsigned int aadlen = req->assoclen;
 940	unsigned int pclen = req->cryptlen;
 941	int ret = 0;
 942
 943	unsigned int n, len, in_bytes, out_bytes,
 944		     min_bytes, bytes, aad_bytes, pc_bytes;
 945	struct gcm_sg_walk gw_in, gw_out;
 946	u8 tag[GHASH_DIGEST_SIZE];
 947
 948	struct {
 949		u32 _[3];		/* reserved */
 950		u32 cv;			/* Counter Value */
 951		u8 t[GHASH_DIGEST_SIZE];/* Tag */
 952		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
 953		u64 taadl;		/* Total AAD Length */
 954		u64 tpcl;		/* Total Plain-/Cipher-text Length */
 955		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
 956		u8 k[AES_MAX_KEY_SIZE];	/* Key */
 957	} param;
 958
 959	/*
 960	 * encrypt
 961	 *   req->src: aad||plaintext
 962	 *   req->dst: aad||ciphertext||tag
 963	 * decrypt
 964	 *   req->src: aad||ciphertext||tag
 965	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
 966	 * aad, plaintext and ciphertext may be empty.
 967	 */
 968	if (flags & CPACF_DECRYPT)
 969		pclen -= taglen;
 970	len = aadlen + pclen;
 971
 972	memset(&param, 0, sizeof(param));
 973	param.cv = 1;
 974	param.taadl = aadlen * 8;
 975	param.tpcl = pclen * 8;
 976	memcpy(param.j0, req->iv, ivsize);
 977	*(u32 *)(param.j0 + ivsize) = 1;
 978	memcpy(param.k, ctx->key, ctx->key_len);
 979
 980	gcm_walk_start(&gw_in, req->src, len);
 981	gcm_walk_start(&gw_out, req->dst, len);
 982
 983	do {
 984		min_bytes = min_t(unsigned int,
 985				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
 986		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
 987		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
 988		bytes = min(in_bytes, out_bytes);
 989
 990		if (aadlen + pclen <= bytes) {
 991			aad_bytes = aadlen;
 992			pc_bytes = pclen;
 993			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
 994		} else {
 995			if (aadlen <= bytes) {
 996				aad_bytes = aadlen;
 997				pc_bytes = (bytes - aadlen) &
 998					   ~(AES_BLOCK_SIZE - 1);
 999				flags |= CPACF_KMA_LAAD;
1000			} else {
1001				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
1002				pc_bytes = 0;
1003			}
1004		}
1005
1006		if (aad_bytes > 0)
1007			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1008
1009		cpacf_kma(ctx->fc | flags, &param,
1010			  gw_out.ptr + aad_bytes,
1011			  gw_in.ptr + aad_bytes, pc_bytes,
1012			  gw_in.ptr, aad_bytes);
1013
1014		n = aad_bytes + pc_bytes;
1015		if (gcm_in_walk_done(&gw_in, n) != n)
1016			return -ENOMEM;
1017		if (gcm_out_walk_done(&gw_out, n) != n)
1018			return -ENOMEM;
1019		aadlen -= aad_bytes;
1020		pclen -= pc_bytes;
1021	} while (aadlen + pclen > 0);
1022
1023	if (flags & CPACF_DECRYPT) {
1024		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1025		if (crypto_memneq(tag, param.t, taglen))
1026			ret = -EBADMSG;
1027	} else
1028		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1029
1030	memzero_explicit(&param, sizeof(param));
1031	return ret;
1032}
1033
1034static int gcm_aes_encrypt(struct aead_request *req)
1035{
1036	return gcm_aes_crypt(req, CPACF_ENCRYPT);
1037}
1038
1039static int gcm_aes_decrypt(struct aead_request *req)
1040{
1041	return gcm_aes_crypt(req, CPACF_DECRYPT);
1042}
1043
1044static struct aead_alg gcm_aes_aead = {
1045	.setkey			= gcm_aes_setkey,
1046	.setauthsize		= gcm_aes_setauthsize,
1047	.encrypt		= gcm_aes_encrypt,
1048	.decrypt		= gcm_aes_decrypt,
1049
1050	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
1051	.maxauthsize		= GHASH_DIGEST_SIZE,
1052	.chunksize		= AES_BLOCK_SIZE,
1053
1054	.base			= {
1055		.cra_blocksize		= 1,
1056		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
1057		.cra_priority		= 900,
1058		.cra_name		= "gcm(aes)",
1059		.cra_driver_name	= "gcm-aes-s390",
1060		.cra_module		= THIS_MODULE,
1061	},
1062};
1063
1064static struct crypto_alg *aes_s390_alg;
1065static struct skcipher_alg *aes_s390_skcipher_algs[5];
1066static int aes_s390_skciphers_num;
1067static struct aead_alg *aes_s390_aead_alg;
1068
1069static int aes_s390_register_skcipher(struct skcipher_alg *alg)
1070{
1071	int ret;
1072
1073	ret = crypto_register_skcipher(alg);
1074	if (!ret)
1075		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
1076	return ret;
1077}
1078
1079static void aes_s390_fini(void)
1080{
1081	if (aes_s390_alg)
1082		crypto_unregister_alg(aes_s390_alg);
1083	while (aes_s390_skciphers_num--)
1084		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
1085	if (ctrblk)
1086		free_page((unsigned long) ctrblk);
1087
1088	if (aes_s390_aead_alg)
1089		crypto_unregister_aead(aes_s390_aead_alg);
1090}
1091
1092static int __init aes_s390_init(void)
1093{
1094	int ret;
1095
1096	/* Query available functions for KM, KMC, KMCTR and KMA */
1097	cpacf_query(CPACF_KM, &km_functions);
1098	cpacf_query(CPACF_KMC, &kmc_functions);
1099	cpacf_query(CPACF_KMCTR, &kmctr_functions);
1100	cpacf_query(CPACF_KMA, &kma_functions);
1101
1102	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1103	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1104	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1105		ret = crypto_register_alg(&aes_alg);
1106		if (ret)
1107			goto out_err;
1108		aes_s390_alg = &aes_alg;
1109		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1110		if (ret)
1111			goto out_err;
1112	}
1113
1114	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1115	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1116	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1117		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1118		if (ret)
1119			goto out_err;
1120	}
1121
1122	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
1123	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
1124		ret = aes_s390_register_skcipher(&fullxts_aes_alg);
1125		if (ret)
1126			goto out_err;
1127	}
1128
1129	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1130	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1131		ret = aes_s390_register_skcipher(&xts_aes_alg);
1132		if (ret)
1133			goto out_err;
1134	}
1135
1136	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1137	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1138	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1139		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1140		if (!ctrblk) {
1141			ret = -ENOMEM;
1142			goto out_err;
1143		}
1144		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1145		if (ret)
1146			goto out_err;
1147	}
1148
1149	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1150	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1151	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1152		ret = crypto_register_aead(&gcm_aes_aead);
1153		if (ret)
1154			goto out_err;
1155		aes_s390_aead_alg = &gcm_aes_aead;
1156	}
1157
1158	return 0;
1159out_err:
1160	aes_s390_fini();
1161	return ret;
1162}
1163
1164module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1165module_exit(aes_s390_fini);
1166
1167MODULE_ALIAS_CRYPTO("aes-all");
1168
1169MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1170MODULE_LICENSE("GPL");
1171MODULE_IMPORT_NS("CRYPTO_INTERNAL");
v5.9
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Cryptographic API.
   4 *
   5 * s390 implementation of the AES Cipher Algorithm.
   6 *
   7 * s390 Version:
   8 *   Copyright IBM Corp. 2005, 2017
   9 *   Author(s): Jan Glauber (jang@de.ibm.com)
  10 *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  11 *		Patrick Steuer <patrick.steuer@de.ibm.com>
  12 *		Harald Freudenberger <freude@de.ibm.com>
  13 *
  14 * Derived from "crypto/aes_generic.c"
  15 */
  16
  17#define KMSG_COMPONENT "aes_s390"
  18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19
  20#include <crypto/aes.h>
  21#include <crypto/algapi.h>
  22#include <crypto/ghash.h>
  23#include <crypto/internal/aead.h>
 
  24#include <crypto/internal/skcipher.h>
  25#include <crypto/scatterwalk.h>
  26#include <linux/err.h>
  27#include <linux/module.h>
  28#include <linux/cpufeature.h>
  29#include <linux/init.h>
  30#include <linux/mutex.h>
  31#include <linux/fips.h>
  32#include <linux/string.h>
  33#include <crypto/xts.h>
  34#include <asm/cpacf.h>
  35
  36static u8 *ctrblk;
  37static DEFINE_MUTEX(ctrblk_lock);
  38
  39static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
  40		    kma_functions;
  41
  42struct s390_aes_ctx {
  43	u8 key[AES_MAX_KEY_SIZE];
  44	int key_len;
  45	unsigned long fc;
  46	union {
  47		struct crypto_skcipher *skcipher;
  48		struct crypto_cipher *cip;
  49	} fallback;
  50};
  51
  52struct s390_xts_ctx {
  53	u8 key[32];
  54	u8 pcc_key[32];
 
 
 
 
 
  55	int key_len;
  56	unsigned long fc;
  57	struct crypto_skcipher *fallback;
  58};
  59
  60struct gcm_sg_walk {
  61	struct scatter_walk walk;
  62	unsigned int walk_bytes;
  63	u8 *walk_ptr;
  64	unsigned int walk_bytes_remain;
  65	u8 buf[AES_BLOCK_SIZE];
  66	unsigned int buf_bytes;
  67	u8 *ptr;
  68	unsigned int nbytes;
  69};
  70
  71static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  72		unsigned int key_len)
  73{
  74	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  75
  76	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  77	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  78			CRYPTO_TFM_REQ_MASK);
  79
  80	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  81}
  82
  83static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  84		       unsigned int key_len)
  85{
  86	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  87	unsigned long fc;
  88
  89	/* Pick the correct function code based on the key length */
  90	fc = (key_len == 16) ? CPACF_KM_AES_128 :
  91	     (key_len == 24) ? CPACF_KM_AES_192 :
  92	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
  93
  94	/* Check if the function code is available */
  95	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  96	if (!sctx->fc)
  97		return setkey_fallback_cip(tfm, in_key, key_len);
  98
  99	sctx->key_len = key_len;
 100	memcpy(sctx->key, in_key, key_len);
 101	return 0;
 102}
 103
 104static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 105{
 106	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 107
 108	if (unlikely(!sctx->fc)) {
 109		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
 110		return;
 111	}
 112	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
 113}
 114
 115static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 116{
 117	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 118
 119	if (unlikely(!sctx->fc)) {
 120		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
 121		return;
 122	}
 123	cpacf_km(sctx->fc | CPACF_DECRYPT,
 124		 &sctx->key, out, in, AES_BLOCK_SIZE);
 125}
 126
 127static int fallback_init_cip(struct crypto_tfm *tfm)
 128{
 129	const char *name = tfm->__crt_alg->cra_name;
 130	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 131
 132	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
 133						 CRYPTO_ALG_NEED_FALLBACK);
 134
 135	if (IS_ERR(sctx->fallback.cip)) {
 136		pr_err("Allocating AES fallback algorithm %s failed\n",
 137		       name);
 138		return PTR_ERR(sctx->fallback.cip);
 139	}
 140
 141	return 0;
 142}
 143
 144static void fallback_exit_cip(struct crypto_tfm *tfm)
 145{
 146	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 147
 148	crypto_free_cipher(sctx->fallback.cip);
 149	sctx->fallback.cip = NULL;
 150}
 151
 152static struct crypto_alg aes_alg = {
 153	.cra_name		=	"aes",
 154	.cra_driver_name	=	"aes-s390",
 155	.cra_priority		=	300,
 156	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
 157					CRYPTO_ALG_NEED_FALLBACK,
 158	.cra_blocksize		=	AES_BLOCK_SIZE,
 159	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
 160	.cra_module		=	THIS_MODULE,
 161	.cra_init               =       fallback_init_cip,
 162	.cra_exit               =       fallback_exit_cip,
 163	.cra_u			=	{
 164		.cipher = {
 165			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
 166			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
 167			.cia_setkey		=	aes_set_key,
 168			.cia_encrypt		=	crypto_aes_encrypt,
 169			.cia_decrypt		=	crypto_aes_decrypt,
 170		}
 171	}
 172};
 173
 174static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
 175				    unsigned int len)
 176{
 177	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 178
 179	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
 180				    CRYPTO_TFM_REQ_MASK);
 181	crypto_skcipher_set_flags(sctx->fallback.skcipher,
 182				  crypto_skcipher_get_flags(tfm) &
 183				  CRYPTO_TFM_REQ_MASK);
 184	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
 185}
 186
 187static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
 188				   struct skcipher_request *req,
 189				   unsigned long modifier)
 190{
 191	struct skcipher_request *subreq = skcipher_request_ctx(req);
 192
 193	*subreq = *req;
 194	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
 195	return (modifier & CPACF_DECRYPT) ?
 196		crypto_skcipher_decrypt(subreq) :
 197		crypto_skcipher_encrypt(subreq);
 198}
 199
 200static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 201			   unsigned int key_len)
 202{
 203	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 204	unsigned long fc;
 205
 206	/* Pick the correct function code based on the key length */
 207	fc = (key_len == 16) ? CPACF_KM_AES_128 :
 208	     (key_len == 24) ? CPACF_KM_AES_192 :
 209	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
 210
 211	/* Check if the function code is available */
 212	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 213	if (!sctx->fc)
 214		return setkey_fallback_skcipher(tfm, in_key, key_len);
 215
 216	sctx->key_len = key_len;
 217	memcpy(sctx->key, in_key, key_len);
 218	return 0;
 219}
 220
 221static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 222{
 223	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 224	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 225	struct skcipher_walk walk;
 226	unsigned int nbytes, n;
 227	int ret;
 228
 229	if (unlikely(!sctx->fc))
 230		return fallback_skcipher_crypt(sctx, req, modifier);
 231
 232	ret = skcipher_walk_virt(&walk, req, false);
 233	while ((nbytes = walk.nbytes) != 0) {
 234		/* only use complete blocks */
 235		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 236		cpacf_km(sctx->fc | modifier, sctx->key,
 237			 walk.dst.virt.addr, walk.src.virt.addr, n);
 238		ret = skcipher_walk_done(&walk, nbytes - n);
 239	}
 240	return ret;
 241}
 242
 243static int ecb_aes_encrypt(struct skcipher_request *req)
 244{
 245	return ecb_aes_crypt(req, 0);
 246}
 247
 248static int ecb_aes_decrypt(struct skcipher_request *req)
 249{
 250	return ecb_aes_crypt(req, CPACF_DECRYPT);
 251}
 252
 253static int fallback_init_skcipher(struct crypto_skcipher *tfm)
 254{
 255	const char *name = crypto_tfm_alg_name(&tfm->base);
 256	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 257
 258	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
 259				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
 260
 261	if (IS_ERR(sctx->fallback.skcipher)) {
 262		pr_err("Allocating AES fallback algorithm %s failed\n",
 263		       name);
 264		return PTR_ERR(sctx->fallback.skcipher);
 265	}
 266
 267	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
 268				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
 269	return 0;
 270}
 271
 272static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
 273{
 274	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 275
 276	crypto_free_skcipher(sctx->fallback.skcipher);
 277}
 278
 279static struct skcipher_alg ecb_aes_alg = {
 280	.base.cra_name		=	"ecb(aes)",
 281	.base.cra_driver_name	=	"ecb-aes-s390",
 282	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
 283	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 284	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 285	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 286	.base.cra_module	=	THIS_MODULE,
 287	.init			=	fallback_init_skcipher,
 288	.exit			=	fallback_exit_skcipher,
 289	.min_keysize		=	AES_MIN_KEY_SIZE,
 290	.max_keysize		=	AES_MAX_KEY_SIZE,
 291	.setkey			=	ecb_aes_set_key,
 292	.encrypt		=	ecb_aes_encrypt,
 293	.decrypt		=	ecb_aes_decrypt,
 294};
 295
 296static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 297			   unsigned int key_len)
 298{
 299	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 300	unsigned long fc;
 301
 302	/* Pick the correct function code based on the key length */
 303	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
 304	     (key_len == 24) ? CPACF_KMC_AES_192 :
 305	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
 306
 307	/* Check if the function code is available */
 308	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
 309	if (!sctx->fc)
 310		return setkey_fallback_skcipher(tfm, in_key, key_len);
 311
 312	sctx->key_len = key_len;
 313	memcpy(sctx->key, in_key, key_len);
 314	return 0;
 315}
 316
 317static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 318{
 319	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 320	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 321	struct skcipher_walk walk;
 322	unsigned int nbytes, n;
 323	int ret;
 324	struct {
 325		u8 iv[AES_BLOCK_SIZE];
 326		u8 key[AES_MAX_KEY_SIZE];
 327	} param;
 328
 329	if (unlikely(!sctx->fc))
 330		return fallback_skcipher_crypt(sctx, req, modifier);
 331
 332	ret = skcipher_walk_virt(&walk, req, false);
 333	if (ret)
 334		return ret;
 335	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
 336	memcpy(param.key, sctx->key, sctx->key_len);
 337	while ((nbytes = walk.nbytes) != 0) {
 338		/* only use complete blocks */
 339		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 340		cpacf_kmc(sctx->fc | modifier, &param,
 341			  walk.dst.virt.addr, walk.src.virt.addr, n);
 342		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
 343		ret = skcipher_walk_done(&walk, nbytes - n);
 344	}
 345	memzero_explicit(&param, sizeof(param));
 346	return ret;
 347}
 348
 349static int cbc_aes_encrypt(struct skcipher_request *req)
 350{
 351	return cbc_aes_crypt(req, 0);
 352}
 353
 354static int cbc_aes_decrypt(struct skcipher_request *req)
 355{
 356	return cbc_aes_crypt(req, CPACF_DECRYPT);
 357}
 358
 359static struct skcipher_alg cbc_aes_alg = {
 360	.base.cra_name		=	"cbc(aes)",
 361	.base.cra_driver_name	=	"cbc-aes-s390",
 362	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 363	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 364	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 365	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 366	.base.cra_module	=	THIS_MODULE,
 367	.init			=	fallback_init_skcipher,
 368	.exit			=	fallback_exit_skcipher,
 369	.min_keysize		=	AES_MIN_KEY_SIZE,
 370	.max_keysize		=	AES_MAX_KEY_SIZE,
 371	.ivsize			=	AES_BLOCK_SIZE,
 372	.setkey			=	cbc_aes_set_key,
 373	.encrypt		=	cbc_aes_encrypt,
 374	.decrypt		=	cbc_aes_decrypt,
 375};
 376
 377static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
 378			       unsigned int len)
 379{
 380	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 381
 382	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
 383	crypto_skcipher_set_flags(xts_ctx->fallback,
 384				  crypto_skcipher_get_flags(tfm) &
 385				  CRYPTO_TFM_REQ_MASK);
 386	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
 387}
 388
 389static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 390			   unsigned int key_len)
 391{
 392	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 393	unsigned long fc;
 394	int err;
 395
 396	err = xts_fallback_setkey(tfm, in_key, key_len);
 397	if (err)
 398		return err;
 399
 400	/* In fips mode only 128 bit or 256 bit keys are valid */
 401	if (fips_enabled && key_len != 32 && key_len != 64)
 402		return -EINVAL;
 403
 404	/* Pick the correct function code based on the key length */
 405	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
 406	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
 407
 408	/* Check if the function code is available */
 409	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
 410	if (!xts_ctx->fc)
 411		return 0;
 412
 413	/* Split the XTS key into the two subkeys */
 414	key_len = key_len / 2;
 415	xts_ctx->key_len = key_len;
 416	memcpy(xts_ctx->key, in_key, key_len);
 417	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
 418	return 0;
 419}
 420
 421static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
 422{
 423	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 424	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 425	struct skcipher_walk walk;
 426	unsigned int offset, nbytes, n;
 427	int ret;
 428	struct {
 429		u8 key[32];
 430		u8 tweak[16];
 431		u8 block[16];
 432		u8 bit[16];
 433		u8 xts[16];
 434	} pcc_param;
 435	struct {
 436		u8 key[32];
 437		u8 init[16];
 438	} xts_param;
 439
 440	if (req->cryptlen < AES_BLOCK_SIZE)
 441		return -EINVAL;
 442
 443	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
 444		struct skcipher_request *subreq = skcipher_request_ctx(req);
 445
 446		*subreq = *req;
 447		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
 448		return (modifier & CPACF_DECRYPT) ?
 449			crypto_skcipher_decrypt(subreq) :
 450			crypto_skcipher_encrypt(subreq);
 451	}
 452
 453	ret = skcipher_walk_virt(&walk, req, false);
 454	if (ret)
 455		return ret;
 456	offset = xts_ctx->key_len & 0x10;
 457	memset(pcc_param.block, 0, sizeof(pcc_param.block));
 458	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
 459	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
 460	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
 461	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
 462	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
 463
 464	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
 465	memcpy(xts_param.init, pcc_param.xts, 16);
 466
 467	while ((nbytes = walk.nbytes) != 0) {
 468		/* only use complete blocks */
 469		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 470		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
 471			 walk.dst.virt.addr, walk.src.virt.addr, n);
 472		ret = skcipher_walk_done(&walk, nbytes - n);
 473	}
 474	memzero_explicit(&pcc_param, sizeof(pcc_param));
 475	memzero_explicit(&xts_param, sizeof(xts_param));
 476	return ret;
 477}
 478
 479static int xts_aes_encrypt(struct skcipher_request *req)
 480{
 481	return xts_aes_crypt(req, 0);
 482}
 483
 484static int xts_aes_decrypt(struct skcipher_request *req)
 485{
 486	return xts_aes_crypt(req, CPACF_DECRYPT);
 487}
 488
 489static int xts_fallback_init(struct crypto_skcipher *tfm)
 490{
 491	const char *name = crypto_tfm_alg_name(&tfm->base);
 492	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 493
 494	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
 495				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
 496
 497	if (IS_ERR(xts_ctx->fallback)) {
 498		pr_err("Allocating XTS fallback algorithm %s failed\n",
 499		       name);
 500		return PTR_ERR(xts_ctx->fallback);
 501	}
 502	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
 503				    crypto_skcipher_reqsize(xts_ctx->fallback));
 504	return 0;
 505}
 506
 507static void xts_fallback_exit(struct crypto_skcipher *tfm)
 508{
 509	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
 510
 511	crypto_free_skcipher(xts_ctx->fallback);
 512}
 513
 514static struct skcipher_alg xts_aes_alg = {
 515	.base.cra_name		=	"xts(aes)",
 516	.base.cra_driver_name	=	"xts-aes-s390",
 517	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 518	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 519	.base.cra_blocksize	=	AES_BLOCK_SIZE,
 520	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
 521	.base.cra_module	=	THIS_MODULE,
 522	.init			=	xts_fallback_init,
 523	.exit			=	xts_fallback_exit,
 524	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
 525	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
 526	.ivsize			=	AES_BLOCK_SIZE,
 527	.setkey			=	xts_aes_set_key,
 528	.encrypt		=	xts_aes_encrypt,
 529	.decrypt		=	xts_aes_decrypt,
 530};
 531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 533			   unsigned int key_len)
 534{
 535	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 536	unsigned long fc;
 537
 538	/* Pick the correct function code based on the key length */
 539	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
 540	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
 541	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
 542
 543	/* Check if the function code is available */
 544	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
 545	if (!sctx->fc)
 546		return setkey_fallback_skcipher(tfm, in_key, key_len);
 547
 548	sctx->key_len = key_len;
 549	memcpy(sctx->key, in_key, key_len);
 550	return 0;
 551}
 552
 553static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
 554{
 555	unsigned int i, n;
 556
 557	/* only use complete blocks, max. PAGE_SIZE */
 558	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
 559	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
 560	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
 561		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
 562		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
 563		ctrptr += AES_BLOCK_SIZE;
 564	}
 565	return n;
 566}
 567
 568static int ctr_aes_crypt(struct skcipher_request *req)
 569{
 570	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 571	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
 572	u8 buf[AES_BLOCK_SIZE], *ctrptr;
 573	struct skcipher_walk walk;
 574	unsigned int n, nbytes;
 575	int ret, locked;
 576
 577	if (unlikely(!sctx->fc))
 578		return fallback_skcipher_crypt(sctx, req, 0);
 579
 580	locked = mutex_trylock(&ctrblk_lock);
 581
 582	ret = skcipher_walk_virt(&walk, req, false);
 583	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 584		n = AES_BLOCK_SIZE;
 585
 586		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
 587			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
 588		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
 589		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
 590			    walk.src.virt.addr, n, ctrptr);
 591		if (ctrptr == ctrblk)
 592			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
 593			       AES_BLOCK_SIZE);
 594		crypto_inc(walk.iv, AES_BLOCK_SIZE);
 595		ret = skcipher_walk_done(&walk, nbytes - n);
 596	}
 597	if (locked)
 598		mutex_unlock(&ctrblk_lock);
 599	/*
 600	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
 601	 */
 602	if (nbytes) {
 603		cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
 
 
 604			    AES_BLOCK_SIZE, walk.iv);
 605		memcpy(walk.dst.virt.addr, buf, nbytes);
 606		crypto_inc(walk.iv, AES_BLOCK_SIZE);
 607		ret = skcipher_walk_done(&walk, 0);
 608	}
 609
 610	return ret;
 611}
 612
 613static struct skcipher_alg ctr_aes_alg = {
 614	.base.cra_name		=	"ctr(aes)",
 615	.base.cra_driver_name	=	"ctr-aes-s390",
 616	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 617	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 618	.base.cra_blocksize	=	1,
 619	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
 620	.base.cra_module	=	THIS_MODULE,
 621	.init			=	fallback_init_skcipher,
 622	.exit			=	fallback_exit_skcipher,
 623	.min_keysize		=	AES_MIN_KEY_SIZE,
 624	.max_keysize		=	AES_MAX_KEY_SIZE,
 625	.ivsize			=	AES_BLOCK_SIZE,
 626	.setkey			=	ctr_aes_set_key,
 627	.encrypt		=	ctr_aes_crypt,
 628	.decrypt		=	ctr_aes_crypt,
 629	.chunksize		=	AES_BLOCK_SIZE,
 630};
 631
 632static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
 633			  unsigned int keylen)
 634{
 635	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 636
 637	switch (keylen) {
 638	case AES_KEYSIZE_128:
 639		ctx->fc = CPACF_KMA_GCM_AES_128;
 640		break;
 641	case AES_KEYSIZE_192:
 642		ctx->fc = CPACF_KMA_GCM_AES_192;
 643		break;
 644	case AES_KEYSIZE_256:
 645		ctx->fc = CPACF_KMA_GCM_AES_256;
 646		break;
 647	default:
 648		return -EINVAL;
 649	}
 650
 651	memcpy(ctx->key, key, keylen);
 652	ctx->key_len = keylen;
 653	return 0;
 654}
 655
 656static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 657{
 658	switch (authsize) {
 659	case 4:
 660	case 8:
 661	case 12:
 662	case 13:
 663	case 14:
 664	case 15:
 665	case 16:
 666		break;
 667	default:
 668		return -EINVAL;
 669	}
 670
 671	return 0;
 672}
 673
 674static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
 675			   unsigned int len)
 676{
 677	memset(gw, 0, sizeof(*gw));
 678	gw->walk_bytes_remain = len;
 679	scatterwalk_start(&gw->walk, sg);
 680}
 681
 682static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
 683{
 684	struct scatterlist *nextsg;
 685
 686	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
 687	while (!gw->walk_bytes) {
 688		nextsg = sg_next(gw->walk.sg);
 689		if (!nextsg)
 690			return 0;
 691		scatterwalk_start(&gw->walk, nextsg);
 692		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
 693						   gw->walk_bytes_remain);
 694	}
 695	gw->walk_ptr = scatterwalk_map(&gw->walk);
 696	return gw->walk_bytes;
 697}
 698
 699static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
 700					     unsigned int nbytes)
 701{
 702	gw->walk_bytes_remain -= nbytes;
 703	scatterwalk_unmap(&gw->walk);
 704	scatterwalk_advance(&gw->walk, nbytes);
 705	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
 706	gw->walk_ptr = NULL;
 707}
 708
 709static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 710{
 711	int n;
 712
 713	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
 714		gw->ptr = gw->buf;
 715		gw->nbytes = gw->buf_bytes;
 716		goto out;
 717	}
 718
 719	if (gw->walk_bytes_remain == 0) {
 720		gw->ptr = NULL;
 721		gw->nbytes = 0;
 722		goto out;
 723	}
 724
 725	if (!_gcm_sg_clamp_and_map(gw)) {
 726		gw->ptr = NULL;
 727		gw->nbytes = 0;
 728		goto out;
 729	}
 730
 731	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
 732		gw->ptr = gw->walk_ptr;
 733		gw->nbytes = gw->walk_bytes;
 734		goto out;
 735	}
 736
 737	while (1) {
 738		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
 739		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
 740		gw->buf_bytes += n;
 741		_gcm_sg_unmap_and_advance(gw, n);
 742		if (gw->buf_bytes >= minbytesneeded) {
 743			gw->ptr = gw->buf;
 744			gw->nbytes = gw->buf_bytes;
 745			goto out;
 746		}
 747		if (!_gcm_sg_clamp_and_map(gw)) {
 748			gw->ptr = NULL;
 749			gw->nbytes = 0;
 750			goto out;
 751		}
 752	}
 753
 754out:
 755	return gw->nbytes;
 756}
 757
 758static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
 759{
 760	if (gw->walk_bytes_remain == 0) {
 761		gw->ptr = NULL;
 762		gw->nbytes = 0;
 763		goto out;
 764	}
 765
 766	if (!_gcm_sg_clamp_and_map(gw)) {
 767		gw->ptr = NULL;
 768		gw->nbytes = 0;
 769		goto out;
 770	}
 771
 772	if (gw->walk_bytes >= minbytesneeded) {
 773		gw->ptr = gw->walk_ptr;
 774		gw->nbytes = gw->walk_bytes;
 775		goto out;
 776	}
 777
 778	scatterwalk_unmap(&gw->walk);
 779	gw->walk_ptr = NULL;
 780
 781	gw->ptr = gw->buf;
 782	gw->nbytes = sizeof(gw->buf);
 783
 784out:
 785	return gw->nbytes;
 786}
 787
 788static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 789{
 790	if (gw->ptr == NULL)
 791		return 0;
 792
 793	if (gw->ptr == gw->buf) {
 794		int n = gw->buf_bytes - bytesdone;
 795		if (n > 0) {
 796			memmove(gw->buf, gw->buf + bytesdone, n);
 797			gw->buf_bytes = n;
 798		} else
 799			gw->buf_bytes = 0;
 800	} else
 801		_gcm_sg_unmap_and_advance(gw, bytesdone);
 802
 803	return bytesdone;
 804}
 805
 806static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
 807{
 808	int i, n;
 809
 810	if (gw->ptr == NULL)
 811		return 0;
 812
 813	if (gw->ptr == gw->buf) {
 814		for (i = 0; i < bytesdone; i += n) {
 815			if (!_gcm_sg_clamp_and_map(gw))
 816				return i;
 817			n = min(gw->walk_bytes, bytesdone - i);
 818			memcpy(gw->walk_ptr, gw->buf + i, n);
 819			_gcm_sg_unmap_and_advance(gw, n);
 820		}
 821	} else
 822		_gcm_sg_unmap_and_advance(gw, bytesdone);
 823
 824	return bytesdone;
 825}
 826
 827static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
 828{
 829	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 830	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
 831	unsigned int ivsize = crypto_aead_ivsize(tfm);
 832	unsigned int taglen = crypto_aead_authsize(tfm);
 833	unsigned int aadlen = req->assoclen;
 834	unsigned int pclen = req->cryptlen;
 835	int ret = 0;
 836
 837	unsigned int n, len, in_bytes, out_bytes,
 838		     min_bytes, bytes, aad_bytes, pc_bytes;
 839	struct gcm_sg_walk gw_in, gw_out;
 840	u8 tag[GHASH_DIGEST_SIZE];
 841
 842	struct {
 843		u32 _[3];		/* reserved */
 844		u32 cv;			/* Counter Value */
 845		u8 t[GHASH_DIGEST_SIZE];/* Tag */
 846		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
 847		u64 taadl;		/* Total AAD Length */
 848		u64 tpcl;		/* Total Plain-/Cipher-text Length */
 849		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
 850		u8 k[AES_MAX_KEY_SIZE];	/* Key */
 851	} param;
 852
 853	/*
 854	 * encrypt
 855	 *   req->src: aad||plaintext
 856	 *   req->dst: aad||ciphertext||tag
 857	 * decrypt
 858	 *   req->src: aad||ciphertext||tag
 859	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
 860	 * aad, plaintext and ciphertext may be empty.
 861	 */
 862	if (flags & CPACF_DECRYPT)
 863		pclen -= taglen;
 864	len = aadlen + pclen;
 865
 866	memset(&param, 0, sizeof(param));
 867	param.cv = 1;
 868	param.taadl = aadlen * 8;
 869	param.tpcl = pclen * 8;
 870	memcpy(param.j0, req->iv, ivsize);
 871	*(u32 *)(param.j0 + ivsize) = 1;
 872	memcpy(param.k, ctx->key, ctx->key_len);
 873
 874	gcm_walk_start(&gw_in, req->src, len);
 875	gcm_walk_start(&gw_out, req->dst, len);
 876
 877	do {
 878		min_bytes = min_t(unsigned int,
 879				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
 880		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
 881		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
 882		bytes = min(in_bytes, out_bytes);
 883
 884		if (aadlen + pclen <= bytes) {
 885			aad_bytes = aadlen;
 886			pc_bytes = pclen;
 887			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
 888		} else {
 889			if (aadlen <= bytes) {
 890				aad_bytes = aadlen;
 891				pc_bytes = (bytes - aadlen) &
 892					   ~(AES_BLOCK_SIZE - 1);
 893				flags |= CPACF_KMA_LAAD;
 894			} else {
 895				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
 896				pc_bytes = 0;
 897			}
 898		}
 899
 900		if (aad_bytes > 0)
 901			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
 902
 903		cpacf_kma(ctx->fc | flags, &param,
 904			  gw_out.ptr + aad_bytes,
 905			  gw_in.ptr + aad_bytes, pc_bytes,
 906			  gw_in.ptr, aad_bytes);
 907
 908		n = aad_bytes + pc_bytes;
 909		if (gcm_in_walk_done(&gw_in, n) != n)
 910			return -ENOMEM;
 911		if (gcm_out_walk_done(&gw_out, n) != n)
 912			return -ENOMEM;
 913		aadlen -= aad_bytes;
 914		pclen -= pc_bytes;
 915	} while (aadlen + pclen > 0);
 916
 917	if (flags & CPACF_DECRYPT) {
 918		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
 919		if (crypto_memneq(tag, param.t, taglen))
 920			ret = -EBADMSG;
 921	} else
 922		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
 923
 924	memzero_explicit(&param, sizeof(param));
 925	return ret;
 926}
 927
 928static int gcm_aes_encrypt(struct aead_request *req)
 929{
 930	return gcm_aes_crypt(req, CPACF_ENCRYPT);
 931}
 932
 933static int gcm_aes_decrypt(struct aead_request *req)
 934{
 935	return gcm_aes_crypt(req, CPACF_DECRYPT);
 936}
 937
 938static struct aead_alg gcm_aes_aead = {
 939	.setkey			= gcm_aes_setkey,
 940	.setauthsize		= gcm_aes_setauthsize,
 941	.encrypt		= gcm_aes_encrypt,
 942	.decrypt		= gcm_aes_decrypt,
 943
 944	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
 945	.maxauthsize		= GHASH_DIGEST_SIZE,
 946	.chunksize		= AES_BLOCK_SIZE,
 947
 948	.base			= {
 949		.cra_blocksize		= 1,
 950		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
 951		.cra_priority		= 900,
 952		.cra_name		= "gcm(aes)",
 953		.cra_driver_name	= "gcm-aes-s390",
 954		.cra_module		= THIS_MODULE,
 955	},
 956};
 957
 958static struct crypto_alg *aes_s390_alg;
 959static struct skcipher_alg *aes_s390_skcipher_algs[4];
 960static int aes_s390_skciphers_num;
 961static struct aead_alg *aes_s390_aead_alg;
 962
 963static int aes_s390_register_skcipher(struct skcipher_alg *alg)
 964{
 965	int ret;
 966
 967	ret = crypto_register_skcipher(alg);
 968	if (!ret)
 969		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
 970	return ret;
 971}
 972
 973static void aes_s390_fini(void)
 974{
 975	if (aes_s390_alg)
 976		crypto_unregister_alg(aes_s390_alg);
 977	while (aes_s390_skciphers_num--)
 978		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
 979	if (ctrblk)
 980		free_page((unsigned long) ctrblk);
 981
 982	if (aes_s390_aead_alg)
 983		crypto_unregister_aead(aes_s390_aead_alg);
 984}
 985
 986static int __init aes_s390_init(void)
 987{
 988	int ret;
 989
 990	/* Query available functions for KM, KMC, KMCTR and KMA */
 991	cpacf_query(CPACF_KM, &km_functions);
 992	cpacf_query(CPACF_KMC, &kmc_functions);
 993	cpacf_query(CPACF_KMCTR, &kmctr_functions);
 994	cpacf_query(CPACF_KMA, &kma_functions);
 995
 996	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
 997	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
 998	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
 999		ret = crypto_register_alg(&aes_alg);
1000		if (ret)
1001			goto out_err;
1002		aes_s390_alg = &aes_alg;
1003		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1004		if (ret)
1005			goto out_err;
1006	}
1007
1008	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1009	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1010	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1011		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1012		if (ret)
1013			goto out_err;
1014	}
1015
 
 
 
 
 
 
 
1016	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1017	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1018		ret = aes_s390_register_skcipher(&xts_aes_alg);
1019		if (ret)
1020			goto out_err;
1021	}
1022
1023	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1024	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1025	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1026		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1027		if (!ctrblk) {
1028			ret = -ENOMEM;
1029			goto out_err;
1030		}
1031		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1032		if (ret)
1033			goto out_err;
1034	}
1035
1036	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1037	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1038	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1039		ret = crypto_register_aead(&gcm_aes_aead);
1040		if (ret)
1041			goto out_err;
1042		aes_s390_aead_alg = &gcm_aes_aead;
1043	}
1044
1045	return 0;
1046out_err:
1047	aes_s390_fini();
1048	return ret;
1049}
1050
1051module_cpu_feature_match(MSA, aes_s390_init);
1052module_exit(aes_s390_fini);
1053
1054MODULE_ALIAS_CRYPTO("aes-all");
1055
1056MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1057MODULE_LICENSE("GPL");