Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Cryptographic API.
  4 *
  5 * s390 implementation of the AES Cipher Algorithm with protected keys.
  6 *
  7 * s390 Version:
  8 *   Copyright IBM Corp. 2017, 2023
  9 *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 10 *		Harald Freudenberger <freude@de.ibm.com>
 11 */
 12
 13#define KMSG_COMPONENT "paes_s390"
 14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 15
 16#include <crypto/aes.h>
 17#include <crypto/algapi.h>
 18#include <linux/bug.h>
 19#include <linux/err.h>
 20#include <linux/module.h>
 21#include <linux/cpufeature.h>
 22#include <linux/init.h>
 23#include <linux/mutex.h>
 24#include <linux/spinlock.h>
 25#include <linux/delay.h>
 26#include <crypto/internal/skcipher.h>
 27#include <crypto/xts.h>
 28#include <asm/cpacf.h>
 29#include <asm/pkey.h>
 30
 31/*
 32 * Key blobs smaller/bigger than these defines are rejected
 33 * by the common code even before the individual setkey function
 34 * is called. As paes can handle different kinds of key blobs
 35 * and padding is also possible, the limits need to be generous.
 36 */
 37#define PAES_MIN_KEYSIZE	16
 38#define PAES_MAX_KEYSIZE	MAXEP11AESKEYBLOBSIZE
 39#define PAES_256_PROTKEY_SIZE	(32 + 32)	/* key + verification pattern */
 40#define PXTS_256_PROTKEY_SIZE	(32 + 32 + 32)	/* k1 + k2 + verification pattern */
 41
 42static u8 *ctrblk;
 43static DEFINE_MUTEX(ctrblk_lock);
 44
 45static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
 46
 47struct paes_protkey {
 48	u32 type;
 49	u32 len;
 50	u8 protkey[PXTS_256_PROTKEY_SIZE];
 51};
 52
 53struct key_blob {
 54	/*
 55	 * Small keys will be stored in the keybuf. Larger keys are
 56	 * stored in extra allocated memory. In both cases does
 57	 * key point to the memory where the key is stored.
 58	 * The code distinguishes by checking keylen against
 59	 * sizeof(keybuf). See the two following helper functions.
 60	 */
 61	u8 *key;
 62	u8 keybuf[128];
 63	unsigned int keylen;
 64};
 65
 66/*
 67 * make_clrkey_token() - wrap the raw key ck with pkey clearkey token
 68 * information.
 69 * @returns the size of the clearkey token
 70 */
 71static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
 72{
 73	struct clrkey_token {
 74		u8  type;
 75		u8  res0[3];
 76		u8  version;
 77		u8  res1[3];
 78		u32 keytype;
 79		u32 len;
 80		u8 key[];
 81	} __packed *token = (struct clrkey_token *)dest;
 82
 83	token->type = 0x00;
 84	token->version = 0x02;
 85	token->keytype = (cklen - 8) >> 3;
 86	token->len = cklen;
 87	memcpy(token->key, ck, cklen);
 88
 89	return sizeof(*token) + cklen;
 90}
 91
 92static inline int _key_to_kb(struct key_blob *kb,
 93			     const u8 *key,
 94			     unsigned int keylen)
 95{
 96	switch (keylen) {
 97	case 16:
 98	case 24:
 99	case 32:
100		/* clear key value, prepare pkey clear key token in keybuf */
101		memset(kb->keybuf, 0, sizeof(kb->keybuf));
102		kb->keylen = make_clrkey_token(key, keylen, kb->keybuf);
103		kb->key = kb->keybuf;
104		break;
105	default:
106		/* other key material, let pkey handle this */
107		if (keylen <= sizeof(kb->keybuf))
108			kb->key = kb->keybuf;
109		else {
110			kb->key = kmalloc(keylen, GFP_KERNEL);
111			if (!kb->key)
112				return -ENOMEM;
113		}
114		memcpy(kb->key, key, keylen);
115		kb->keylen = keylen;
116		break;
117	}
118
119	return 0;
120}
121
122static inline int _xts_key_to_kb(struct key_blob *kb,
123				 const u8 *key,
124				 unsigned int keylen)
125{
126	size_t cklen = keylen / 2;
127
128	memset(kb->keybuf, 0, sizeof(kb->keybuf));
129
130	switch (keylen) {
131	case 32:
132	case 64:
133		/* clear key value, prepare pkey clear key tokens in keybuf */
134		kb->key = kb->keybuf;
135		kb->keylen  = make_clrkey_token(key, cklen, kb->key);
136		kb->keylen += make_clrkey_token(key + cklen, cklen,
137						kb->key + kb->keylen);
138		break;
139	default:
140		/* other key material, let pkey handle this */
141		if (keylen <= sizeof(kb->keybuf)) {
142			kb->key = kb->keybuf;
143		} else {
144			kb->key = kmalloc(keylen, GFP_KERNEL);
145			if (!kb->key)
146				return -ENOMEM;
147		}
148		memcpy(kb->key, key, keylen);
149		kb->keylen = keylen;
150		break;
151	}
152
153	return 0;
154}
155
156static inline void _free_kb_keybuf(struct key_blob *kb)
157{
158	if (kb->key && kb->key != kb->keybuf
159	    && kb->keylen > sizeof(kb->keybuf)) {
160		kfree_sensitive(kb->key);
161		kb->key = NULL;
162	}
163	memzero_explicit(kb->keybuf, sizeof(kb->keybuf));
164}
165
166struct s390_paes_ctx {
167	struct key_blob kb;
168	struct paes_protkey pk;
169	spinlock_t pk_lock;
170	unsigned long fc;
171};
172
173struct s390_pxts_ctx {
174	struct key_blob kb;
175	struct paes_protkey pk[2];
176	spinlock_t pk_lock;
177	unsigned long fc;
178};
179
180static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen,
181				      struct paes_protkey *pk)
182{
183	int i, rc = -EIO;
184
185	/* try three times in case of busy card */
186	for (i = 0; rc && i < 3; i++) {
187		if (rc == -EBUSY && in_task()) {
188			if (msleep_interruptible(1000))
189				return -EINTR;
190		}
191		rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len,
192				      &pk->type);
193	}
194
195	return rc;
196}
197
198static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
199{
200	struct paes_protkey pk;
201	int rc;
202
203	pk.len = sizeof(pk.protkey);
204	rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk);
205	if (rc)
206		return rc;
207
208	spin_lock_bh(&ctx->pk_lock);
209	memcpy(&ctx->pk, &pk, sizeof(pk));
210	spin_unlock_bh(&ctx->pk_lock);
211
212	return 0;
213}
214
215static int ecb_paes_init(struct crypto_skcipher *tfm)
216{
217	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
218
219	ctx->kb.key = NULL;
220	spin_lock_init(&ctx->pk_lock);
221
222	return 0;
223}
224
225static void ecb_paes_exit(struct crypto_skcipher *tfm)
226{
227	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
228
229	_free_kb_keybuf(&ctx->kb);
230}
231
232static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
233{
234	unsigned long fc;
235	int rc;
236
237	rc = __paes_convert_key(ctx);
238	if (rc)
239		return rc;
240
241	/* Pick the correct function code based on the protected key type */
242	fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
243		(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
244		(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
245
246	/* Check if the function code is available */
247	ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
248
249	return ctx->fc ? 0 : -EINVAL;
250}
251
252static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
253			    unsigned int key_len)
254{
255	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
256	int rc;
257
258	_free_kb_keybuf(&ctx->kb);
259	rc = _key_to_kb(&ctx->kb, in_key, key_len);
260	if (rc)
261		return rc;
262
263	return __ecb_paes_set_key(ctx);
264}
265
266static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
267{
268	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
269	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
270	struct {
271		u8 key[PAES_256_PROTKEY_SIZE];
272	} param;
273	struct skcipher_walk walk;
274	unsigned int nbytes, n, k;
275	int rc;
276
277	rc = skcipher_walk_virt(&walk, req, false);
278	if (rc)
279		return rc;
280
281	spin_lock_bh(&ctx->pk_lock);
282	memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
283	spin_unlock_bh(&ctx->pk_lock);
284
285	while ((nbytes = walk.nbytes) != 0) {
286		/* only use complete blocks */
287		n = nbytes & ~(AES_BLOCK_SIZE - 1);
288		k = cpacf_km(ctx->fc | modifier, &param,
289			     walk.dst.virt.addr, walk.src.virt.addr, n);
290		if (k)
291			rc = skcipher_walk_done(&walk, nbytes - k);
292		if (k < n) {
293			if (__paes_convert_key(ctx))
294				return skcipher_walk_done(&walk, -EIO);
295			spin_lock_bh(&ctx->pk_lock);
296			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
297			spin_unlock_bh(&ctx->pk_lock);
298		}
299	}
300	return rc;
301}
302
303static int ecb_paes_encrypt(struct skcipher_request *req)
304{
305	return ecb_paes_crypt(req, 0);
306}
307
308static int ecb_paes_decrypt(struct skcipher_request *req)
309{
310	return ecb_paes_crypt(req, CPACF_DECRYPT);
311}
312
313static struct skcipher_alg ecb_paes_alg = {
314	.base.cra_name		=	"ecb(paes)",
315	.base.cra_driver_name	=	"ecb-paes-s390",
316	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
317	.base.cra_blocksize	=	AES_BLOCK_SIZE,
318	.base.cra_ctxsize	=	sizeof(struct s390_paes_ctx),
319	.base.cra_module	=	THIS_MODULE,
320	.base.cra_list		=	LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
321	.init			=	ecb_paes_init,
322	.exit			=	ecb_paes_exit,
323	.min_keysize		=	PAES_MIN_KEYSIZE,
324	.max_keysize		=	PAES_MAX_KEYSIZE,
325	.setkey			=	ecb_paes_set_key,
326	.encrypt		=	ecb_paes_encrypt,
327	.decrypt		=	ecb_paes_decrypt,
328};
329
330static int cbc_paes_init(struct crypto_skcipher *tfm)
331{
332	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
333
334	ctx->kb.key = NULL;
335	spin_lock_init(&ctx->pk_lock);
336
337	return 0;
338}
339
340static void cbc_paes_exit(struct crypto_skcipher *tfm)
341{
342	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
343
344	_free_kb_keybuf(&ctx->kb);
345}
346
347static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
348{
349	unsigned long fc;
350	int rc;
351
352	rc = __paes_convert_key(ctx);
353	if (rc)
354		return rc;
355
356	/* Pick the correct function code based on the protected key type */
357	fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
358		(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
359		(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
360
361	/* Check if the function code is available */
362	ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
363
364	return ctx->fc ? 0 : -EINVAL;
365}
366
367static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
368			    unsigned int key_len)
369{
370	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
371	int rc;
372
373	_free_kb_keybuf(&ctx->kb);
374	rc = _key_to_kb(&ctx->kb, in_key, key_len);
375	if (rc)
376		return rc;
377
378	return __cbc_paes_set_key(ctx);
379}
380
381static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
382{
383	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
384	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
385	struct {
386		u8 iv[AES_BLOCK_SIZE];
387		u8 key[PAES_256_PROTKEY_SIZE];
388	} param;
389	struct skcipher_walk walk;
390	unsigned int nbytes, n, k;
391	int rc;
392
393	rc = skcipher_walk_virt(&walk, req, false);
394	if (rc)
395		return rc;
396
397	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
398	spin_lock_bh(&ctx->pk_lock);
399	memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
400	spin_unlock_bh(&ctx->pk_lock);
401
402	while ((nbytes = walk.nbytes) != 0) {
403		/* only use complete blocks */
404		n = nbytes & ~(AES_BLOCK_SIZE - 1);
405		k = cpacf_kmc(ctx->fc | modifier, &param,
406			      walk.dst.virt.addr, walk.src.virt.addr, n);
407		if (k) {
408			memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
409			rc = skcipher_walk_done(&walk, nbytes - k);
410		}
411		if (k < n) {
412			if (__paes_convert_key(ctx))
413				return skcipher_walk_done(&walk, -EIO);
414			spin_lock_bh(&ctx->pk_lock);
415			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
416			spin_unlock_bh(&ctx->pk_lock);
417		}
418	}
419	return rc;
420}
421
422static int cbc_paes_encrypt(struct skcipher_request *req)
423{
424	return cbc_paes_crypt(req, 0);
425}
426
427static int cbc_paes_decrypt(struct skcipher_request *req)
428{
429	return cbc_paes_crypt(req, CPACF_DECRYPT);
430}
431
432static struct skcipher_alg cbc_paes_alg = {
433	.base.cra_name		=	"cbc(paes)",
434	.base.cra_driver_name	=	"cbc-paes-s390",
435	.base.cra_priority	=	402,	/* ecb-paes-s390 + 1 */
436	.base.cra_blocksize	=	AES_BLOCK_SIZE,
437	.base.cra_ctxsize	=	sizeof(struct s390_paes_ctx),
438	.base.cra_module	=	THIS_MODULE,
439	.base.cra_list		=	LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
440	.init			=	cbc_paes_init,
441	.exit			=	cbc_paes_exit,
442	.min_keysize		=	PAES_MIN_KEYSIZE,
443	.max_keysize		=	PAES_MAX_KEYSIZE,
444	.ivsize			=	AES_BLOCK_SIZE,
445	.setkey			=	cbc_paes_set_key,
446	.encrypt		=	cbc_paes_encrypt,
447	.decrypt		=	cbc_paes_decrypt,
448};
449
450static int xts_paes_init(struct crypto_skcipher *tfm)
451{
452	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
453
454	ctx->kb.key = NULL;
455	spin_lock_init(&ctx->pk_lock);
456
457	return 0;
458}
459
460static void xts_paes_exit(struct crypto_skcipher *tfm)
461{
462	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
463
464	_free_kb_keybuf(&ctx->kb);
465}
466
467static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
468{
469	struct paes_protkey pk0, pk1;
470	size_t split_keylen;
471	int rc;
472
473	pk0.len = sizeof(pk0.protkey);
474	pk1.len = sizeof(pk1.protkey);
475
476	rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0);
477	if (rc)
478		return rc;
479
480	switch (pk0.type) {
481	case PKEY_KEYTYPE_AES_128:
482	case PKEY_KEYTYPE_AES_256:
483		/* second keytoken required */
484		if (ctx->kb.keylen % 2)
485			return -EINVAL;
486		split_keylen = ctx->kb.keylen / 2;
487
488		rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen,
489					 split_keylen, &pk1);
490		if (rc)
491			return rc;
492
493		if (pk0.type != pk1.type)
494			return -EINVAL;
495		break;
496	case PKEY_KEYTYPE_AES_XTS_128:
497	case PKEY_KEYTYPE_AES_XTS_256:
498		/* single key */
499		pk1.type = 0;
500		break;
501	default:
502		/* unsupported protected keytype */
503		return -EINVAL;
504	}
505
506	spin_lock_bh(&ctx->pk_lock);
507	ctx->pk[0] = pk0;
508	ctx->pk[1] = pk1;
509	spin_unlock_bh(&ctx->pk_lock);
510
511	return 0;
512}
513
514static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
515{
516	unsigned long fc;
517	int rc;
518
519	rc = __xts_paes_convert_key(ctx);
520	if (rc)
521		return rc;
522
523	/* Pick the correct function code based on the protected key type */
524	switch (ctx->pk[0].type) {
525	case PKEY_KEYTYPE_AES_128:
526		fc = CPACF_KM_PXTS_128;
527		break;
528	case PKEY_KEYTYPE_AES_256:
529		fc = CPACF_KM_PXTS_256;
530		break;
531	case PKEY_KEYTYPE_AES_XTS_128:
532		fc = CPACF_KM_PXTS_128_FULL;
533		break;
534	case PKEY_KEYTYPE_AES_XTS_256:
535		fc = CPACF_KM_PXTS_256_FULL;
536		break;
537	default:
538		fc = 0;
539		break;
540	}
541
542	/* Check if the function code is available */
543	ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
544
545	return ctx->fc ? 0 : -EINVAL;
546}
547
548static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
549			    unsigned int in_keylen)
550{
551	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
552	u8 ckey[2 * AES_MAX_KEY_SIZE];
553	unsigned int ckey_len;
554	int rc;
555
556	if ((in_keylen == 32 || in_keylen == 64) &&
557	    xts_verify_key(tfm, in_key, in_keylen))
558		return -EINVAL;
559
560	_free_kb_keybuf(&ctx->kb);
561	rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen);
562	if (rc)
563		return rc;
564
565	rc = __xts_paes_set_key(ctx);
566	if (rc)
567		return rc;
568
569	/*
570	 * It is not possible on a single protected key (e.g. full AES-XTS) to
571	 * check, if k1 and k2 are the same.
572	 */
573	if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 ||
574	    ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256)
575		return 0;
576	/*
577	 * xts_verify_key verifies the key length is not odd and makes
578	 * sure that the two keys are not the same. This can be done
579	 * on the two protected keys as well
580	 */
581	ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
582		AES_KEYSIZE_128 : AES_KEYSIZE_256;
583	memcpy(ckey, ctx->pk[0].protkey, ckey_len);
584	memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
585	return xts_verify_key(tfm, ckey, 2*ckey_len);
586}
587
588static int paes_xts_crypt_full(struct skcipher_request *req,
589			       unsigned long modifier)
590{
591	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
592	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
593	unsigned int keylen, offset, nbytes, n, k;
594	struct {
595		u8 key[64];
596		u8 tweak[16];
597		u8 nap[16];
598		u8 wkvp[32];
599	} fxts_param = {
600		.nap = {0},
601	};
602	struct skcipher_walk walk;
603	int rc;
604
605	rc = skcipher_walk_virt(&walk, req, false);
606	if (rc)
607		return rc;
608
609	keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
610	offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
611
612	spin_lock_bh(&ctx->pk_lock);
613	memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen);
614	memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
615	       sizeof(fxts_param.wkvp));
616	spin_unlock_bh(&ctx->pk_lock);
617	memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak));
618	fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
619
620	while ((nbytes = walk.nbytes) != 0) {
621		/* only use complete blocks */
622		n = nbytes & ~(AES_BLOCK_SIZE - 1);
623		k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset,
624			     walk.dst.virt.addr, walk.src.virt.addr, n);
625		if (k)
626			rc = skcipher_walk_done(&walk, nbytes - k);
627		if (k < n) {
628			if (__xts_paes_convert_key(ctx))
629				return skcipher_walk_done(&walk, -EIO);
630			spin_lock_bh(&ctx->pk_lock);
631			memcpy(fxts_param.key + offset, ctx->pk[0].protkey,
632			       keylen);
633			memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
634			       sizeof(fxts_param.wkvp));
635			spin_unlock_bh(&ctx->pk_lock);
636		}
637	}
638
639	return rc;
640}
641
642static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier)
643{
644	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
645	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
646	unsigned int keylen, offset, nbytes, n, k;
647	struct {
648		u8 key[PAES_256_PROTKEY_SIZE];
649		u8 tweak[16];
650		u8 block[16];
651		u8 bit[16];
652		u8 xts[16];
653	} pcc_param;
654	struct {
655		u8 key[PAES_256_PROTKEY_SIZE];
656		u8 init[16];
657	} xts_param;
658	struct skcipher_walk walk;
659	int rc;
660
661	rc = skcipher_walk_virt(&walk, req, false);
662	if (rc)
663		return rc;
664
665	keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
666	offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
667
668	memset(&pcc_param, 0, sizeof(pcc_param));
669	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
670	spin_lock_bh(&ctx->pk_lock);
671	memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
672	memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
673	spin_unlock_bh(&ctx->pk_lock);
674	cpacf_pcc(ctx->fc, pcc_param.key + offset);
675	memcpy(xts_param.init, pcc_param.xts, 16);
676
677	while ((nbytes = walk.nbytes) != 0) {
678		/* only use complete blocks */
679		n = nbytes & ~(AES_BLOCK_SIZE - 1);
680		k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
681			     walk.dst.virt.addr, walk.src.virt.addr, n);
682		if (k)
683			rc = skcipher_walk_done(&walk, nbytes - k);
684		if (k < n) {
685			if (__xts_paes_convert_key(ctx))
686				return skcipher_walk_done(&walk, -EIO);
687			spin_lock_bh(&ctx->pk_lock);
688			memcpy(xts_param.key + offset,
689			       ctx->pk[0].protkey, keylen);
690			spin_unlock_bh(&ctx->pk_lock);
691		}
692	}
693
694	return rc;
695}
696
697static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
698{
699	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
700	struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
701
702	switch (ctx->fc) {
703	case CPACF_KM_PXTS_128:
704	case CPACF_KM_PXTS_256:
705		return paes_xts_crypt(req, modifier);
706	case CPACF_KM_PXTS_128_FULL:
707	case CPACF_KM_PXTS_256_FULL:
708		return paes_xts_crypt_full(req, modifier);
709	default:
710		return -EINVAL;
711	}
712}
713
714static int xts_paes_encrypt(struct skcipher_request *req)
715{
716	return xts_paes_crypt(req, 0);
717}
718
719static int xts_paes_decrypt(struct skcipher_request *req)
720{
721	return xts_paes_crypt(req, CPACF_DECRYPT);
722}
723
724static struct skcipher_alg xts_paes_alg = {
725	.base.cra_name		=	"xts(paes)",
726	.base.cra_driver_name	=	"xts-paes-s390",
727	.base.cra_priority	=	402,	/* ecb-paes-s390 + 1 */
728	.base.cra_blocksize	=	AES_BLOCK_SIZE,
729	.base.cra_ctxsize	=	sizeof(struct s390_pxts_ctx),
730	.base.cra_module	=	THIS_MODULE,
731	.base.cra_list		=	LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
732	.init			=	xts_paes_init,
733	.exit			=	xts_paes_exit,
734	.min_keysize		=	2 * PAES_MIN_KEYSIZE,
735	.max_keysize		=	2 * PAES_MAX_KEYSIZE,
736	.ivsize			=	AES_BLOCK_SIZE,
737	.setkey			=	xts_paes_set_key,
738	.encrypt		=	xts_paes_encrypt,
739	.decrypt		=	xts_paes_decrypt,
740};
741
742static int ctr_paes_init(struct crypto_skcipher *tfm)
743{
744	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
745
746	ctx->kb.key = NULL;
747	spin_lock_init(&ctx->pk_lock);
748
749	return 0;
750}
751
752static void ctr_paes_exit(struct crypto_skcipher *tfm)
753{
754	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
755
756	_free_kb_keybuf(&ctx->kb);
757}
758
759static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
760{
761	unsigned long fc;
762	int rc;
763
764	rc = __paes_convert_key(ctx);
765	if (rc)
766		return rc;
767
768	/* Pick the correct function code based on the protected key type */
769	fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
770		(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
771		(ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
772		CPACF_KMCTR_PAES_256 : 0;
773
774	/* Check if the function code is available */
775	ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
776
777	return ctx->fc ? 0 : -EINVAL;
778}
779
780static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
781			    unsigned int key_len)
782{
783	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
784	int rc;
785
786	_free_kb_keybuf(&ctx->kb);
787	rc = _key_to_kb(&ctx->kb, in_key, key_len);
788	if (rc)
789		return rc;
790
791	return __ctr_paes_set_key(ctx);
792}
793
794static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
795{
796	unsigned int i, n;
797
798	/* only use complete blocks, max. PAGE_SIZE */
799	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
800	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
801	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
802		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
803		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
804		ctrptr += AES_BLOCK_SIZE;
805	}
806	return n;
807}
808
809static int ctr_paes_crypt(struct skcipher_request *req)
810{
811	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
812	struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
813	u8 buf[AES_BLOCK_SIZE], *ctrptr;
814	struct {
815		u8 key[PAES_256_PROTKEY_SIZE];
816	} param;
817	struct skcipher_walk walk;
818	unsigned int nbytes, n, k;
819	int rc, locked;
820
821	rc = skcipher_walk_virt(&walk, req, false);
822	if (rc)
823		return rc;
824
825	spin_lock_bh(&ctx->pk_lock);
826	memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
827	spin_unlock_bh(&ctx->pk_lock);
828
829	locked = mutex_trylock(&ctrblk_lock);
830
831	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
832		n = AES_BLOCK_SIZE;
833		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
834			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
835		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
836		k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr,
837				walk.src.virt.addr, n, ctrptr);
838		if (k) {
839			if (ctrptr == ctrblk)
840				memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
841				       AES_BLOCK_SIZE);
842			crypto_inc(walk.iv, AES_BLOCK_SIZE);
843			rc = skcipher_walk_done(&walk, nbytes - k);
844		}
845		if (k < n) {
846			if (__paes_convert_key(ctx)) {
847				if (locked)
848					mutex_unlock(&ctrblk_lock);
849				return skcipher_walk_done(&walk, -EIO);
850			}
851			spin_lock_bh(&ctx->pk_lock);
852			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
853			spin_unlock_bh(&ctx->pk_lock);
854		}
855	}
856	if (locked)
857		mutex_unlock(&ctrblk_lock);
858	/*
859	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
860	 */
861	if (nbytes) {
862		memset(buf, 0, AES_BLOCK_SIZE);
863		memcpy(buf, walk.src.virt.addr, nbytes);
864		while (1) {
865			if (cpacf_kmctr(ctx->fc, &param, buf,
866					buf, AES_BLOCK_SIZE,
867					walk.iv) == AES_BLOCK_SIZE)
868				break;
869			if (__paes_convert_key(ctx))
870				return skcipher_walk_done(&walk, -EIO);
871			spin_lock_bh(&ctx->pk_lock);
872			memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
873			spin_unlock_bh(&ctx->pk_lock);
874		}
875		memcpy(walk.dst.virt.addr, buf, nbytes);
876		crypto_inc(walk.iv, AES_BLOCK_SIZE);
877		rc = skcipher_walk_done(&walk, nbytes);
878	}
879
880	return rc;
881}
882
883static struct skcipher_alg ctr_paes_alg = {
884	.base.cra_name		=	"ctr(paes)",
885	.base.cra_driver_name	=	"ctr-paes-s390",
886	.base.cra_priority	=	402,	/* ecb-paes-s390 + 1 */
887	.base.cra_blocksize	=	1,
888	.base.cra_ctxsize	=	sizeof(struct s390_paes_ctx),
889	.base.cra_module	=	THIS_MODULE,
890	.base.cra_list		=	LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
891	.init			=	ctr_paes_init,
892	.exit			=	ctr_paes_exit,
893	.min_keysize		=	PAES_MIN_KEYSIZE,
894	.max_keysize		=	PAES_MAX_KEYSIZE,
895	.ivsize			=	AES_BLOCK_SIZE,
896	.setkey			=	ctr_paes_set_key,
897	.encrypt		=	ctr_paes_crypt,
898	.decrypt		=	ctr_paes_crypt,
899	.chunksize		=	AES_BLOCK_SIZE,
900};
901
902static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
903{
904	if (!list_empty(&alg->base.cra_list))
905		crypto_unregister_skcipher(alg);
906}
907
908static void paes_s390_fini(void)
909{
910	__crypto_unregister_skcipher(&ctr_paes_alg);
911	__crypto_unregister_skcipher(&xts_paes_alg);
912	__crypto_unregister_skcipher(&cbc_paes_alg);
913	__crypto_unregister_skcipher(&ecb_paes_alg);
914	if (ctrblk)
915		free_page((unsigned long) ctrblk);
916}
917
918static int __init paes_s390_init(void)
919{
920	int rc;
921
922	/* Query available functions for KM, KMC and KMCTR */
923	cpacf_query(CPACF_KM, &km_functions);
924	cpacf_query(CPACF_KMC, &kmc_functions);
925	cpacf_query(CPACF_KMCTR, &kmctr_functions);
926
927	if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
928	    cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
929	    cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
930		rc = crypto_register_skcipher(&ecb_paes_alg);
931		if (rc)
932			goto out_err;
933	}
934
935	if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
936	    cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
937	    cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
938		rc = crypto_register_skcipher(&cbc_paes_alg);
939		if (rc)
940			goto out_err;
941	}
942
943	if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
944	    cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
945		rc = crypto_register_skcipher(&xts_paes_alg);
946		if (rc)
947			goto out_err;
948	}
949
950	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
951	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
952	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
953		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
954		if (!ctrblk) {
955			rc = -ENOMEM;
956			goto out_err;
957		}
958		rc = crypto_register_skcipher(&ctr_paes_alg);
959		if (rc)
960			goto out_err;
961	}
962
963	return 0;
964out_err:
965	paes_s390_fini();
966	return rc;
967}
968
969module_init(paes_s390_init);
970module_exit(paes_s390_fini);
971
972MODULE_ALIAS_CRYPTO("ecb(paes)");
973MODULE_ALIAS_CRYPTO("cbc(paes)");
974MODULE_ALIAS_CRYPTO("ctr(paes)");
975MODULE_ALIAS_CRYPTO("xts(paes)");
976
977MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
978MODULE_LICENSE("GPL");