Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm with protected keys.
6 *
7 * s390 Version:
8 * Copyright IBM Corp. 2017,2019
9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Harald Freudenberger <freude@de.ibm.com>
11 */
12
13#define KMSG_COMPONENT "paes_s390"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <crypto/aes.h>
17#include <crypto/algapi.h>
18#include <linux/bug.h>
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/cpufeature.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <crypto/xts.h>
25#include <asm/cpacf.h>
26#include <asm/pkey.h>
27
28/*
29 * Key blobs smaller/bigger than these defines are rejected
30 * by the common code even before the individual setkey function
31 * is called. As paes can handle different kinds of key blobs
32 * and padding is also possible, the limits need to be generous.
33 */
34#define PAES_MIN_KEYSIZE 64
35#define PAES_MAX_KEYSIZE 256
36
37static u8 *ctrblk;
38static DEFINE_SPINLOCK(ctrblk_lock);
39
40static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
41
42struct key_blob {
43 /*
44 * Small keys will be stored in the keybuf. Larger keys are
45 * stored in extra allocated memory. In both cases does
46 * key point to the memory where the key is stored.
47 * The code distinguishes by checking keylen against
48 * sizeof(keybuf). See the two following helper functions.
49 */
50 u8 *key;
51 u8 keybuf[128];
52 unsigned int keylen;
53};
54
55static inline int _copy_key_to_kb(struct key_blob *kb,
56 const u8 *key,
57 unsigned int keylen)
58{
59 if (keylen <= sizeof(kb->keybuf))
60 kb->key = kb->keybuf;
61 else {
62 kb->key = kmalloc(keylen, GFP_KERNEL);
63 if (!kb->key)
64 return -ENOMEM;
65 }
66 memcpy(kb->key, key, keylen);
67 kb->keylen = keylen;
68
69 return 0;
70}
71
72static inline void _free_kb_keybuf(struct key_blob *kb)
73{
74 if (kb->key && kb->key != kb->keybuf
75 && kb->keylen > sizeof(kb->keybuf)) {
76 kfree(kb->key);
77 kb->key = NULL;
78 }
79}
80
81struct s390_paes_ctx {
82 struct key_blob kb;
83 struct pkey_protkey pk;
84 unsigned long fc;
85};
86
87struct s390_pxts_ctx {
88 struct key_blob kb[2];
89 struct pkey_protkey pk[2];
90 unsigned long fc;
91};
92
93static inline int __paes_convert_key(struct key_blob *kb,
94 struct pkey_protkey *pk)
95{
96 int i, ret;
97
98 /* try three times in case of failure */
99 for (i = 0; i < 3; i++) {
100 ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
101 if (ret == 0)
102 break;
103 }
104
105 return ret;
106}
107
108static int __paes_set_key(struct s390_paes_ctx *ctx)
109{
110 unsigned long fc;
111
112 if (__paes_convert_key(&ctx->kb, &ctx->pk))
113 return -EINVAL;
114
115 /* Pick the correct function code based on the protected key type */
116 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
117 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
118 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
119
120 /* Check if the function code is available */
121 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
122
123 return ctx->fc ? 0 : -EINVAL;
124}
125
126static int ecb_paes_init(struct crypto_tfm *tfm)
127{
128 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
129
130 ctx->kb.key = NULL;
131
132 return 0;
133}
134
135static void ecb_paes_exit(struct crypto_tfm *tfm)
136{
137 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
138
139 _free_kb_keybuf(&ctx->kb);
140}
141
142static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
143 unsigned int key_len)
144{
145 int rc;
146 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
147
148 _free_kb_keybuf(&ctx->kb);
149 rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
150 if (rc)
151 return rc;
152
153 if (__paes_set_key(ctx)) {
154 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
155 return -EINVAL;
156 }
157 return 0;
158}
159
160static int ecb_paes_crypt(struct blkcipher_desc *desc,
161 unsigned long modifier,
162 struct blkcipher_walk *walk)
163{
164 struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
165 unsigned int nbytes, n, k;
166 int ret;
167
168 ret = blkcipher_walk_virt(desc, walk);
169 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
170 /* only use complete blocks */
171 n = nbytes & ~(AES_BLOCK_SIZE - 1);
172 k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
173 walk->dst.virt.addr, walk->src.virt.addr, n);
174 if (k)
175 ret = blkcipher_walk_done(desc, walk, nbytes - k);
176 if (k < n) {
177 if (__paes_set_key(ctx) != 0)
178 return blkcipher_walk_done(desc, walk, -EIO);
179 }
180 }
181 return ret;
182}
183
184static int ecb_paes_encrypt(struct blkcipher_desc *desc,
185 struct scatterlist *dst, struct scatterlist *src,
186 unsigned int nbytes)
187{
188 struct blkcipher_walk walk;
189
190 blkcipher_walk_init(&walk, dst, src, nbytes);
191 return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
192}
193
194static int ecb_paes_decrypt(struct blkcipher_desc *desc,
195 struct scatterlist *dst, struct scatterlist *src,
196 unsigned int nbytes)
197{
198 struct blkcipher_walk walk;
199
200 blkcipher_walk_init(&walk, dst, src, nbytes);
201 return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
202}
203
204static struct crypto_alg ecb_paes_alg = {
205 .cra_name = "ecb(paes)",
206 .cra_driver_name = "ecb-paes-s390",
207 .cra_priority = 401, /* combo: aes + ecb + 1 */
208 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
209 .cra_blocksize = AES_BLOCK_SIZE,
210 .cra_ctxsize = sizeof(struct s390_paes_ctx),
211 .cra_type = &crypto_blkcipher_type,
212 .cra_module = THIS_MODULE,
213 .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
214 .cra_init = ecb_paes_init,
215 .cra_exit = ecb_paes_exit,
216 .cra_u = {
217 .blkcipher = {
218 .min_keysize = PAES_MIN_KEYSIZE,
219 .max_keysize = PAES_MAX_KEYSIZE,
220 .setkey = ecb_paes_set_key,
221 .encrypt = ecb_paes_encrypt,
222 .decrypt = ecb_paes_decrypt,
223 }
224 }
225};
226
227static int cbc_paes_init(struct crypto_tfm *tfm)
228{
229 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
230
231 ctx->kb.key = NULL;
232
233 return 0;
234}
235
236static void cbc_paes_exit(struct crypto_tfm *tfm)
237{
238 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
239
240 _free_kb_keybuf(&ctx->kb);
241}
242
243static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
244{
245 unsigned long fc;
246
247 if (__paes_convert_key(&ctx->kb, &ctx->pk))
248 return -EINVAL;
249
250 /* Pick the correct function code based on the protected key type */
251 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
252 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
253 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
254
255 /* Check if the function code is available */
256 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
257
258 return ctx->fc ? 0 : -EINVAL;
259}
260
261static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
262 unsigned int key_len)
263{
264 int rc;
265 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
266
267 _free_kb_keybuf(&ctx->kb);
268 rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
269 if (rc)
270 return rc;
271
272 if (__cbc_paes_set_key(ctx)) {
273 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
274 return -EINVAL;
275 }
276 return 0;
277}
278
279static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
280 struct blkcipher_walk *walk)
281{
282 struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
283 unsigned int nbytes, n, k;
284 int ret;
285 struct {
286 u8 iv[AES_BLOCK_SIZE];
287 u8 key[MAXPROTKEYSIZE];
288 } param;
289
290 ret = blkcipher_walk_virt(desc, walk);
291 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
292 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
293 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
294 /* only use complete blocks */
295 n = nbytes & ~(AES_BLOCK_SIZE - 1);
296 k = cpacf_kmc(ctx->fc | modifier, ¶m,
297 walk->dst.virt.addr, walk->src.virt.addr, n);
298 if (k)
299 ret = blkcipher_walk_done(desc, walk, nbytes - k);
300 if (k < n) {
301 if (__cbc_paes_set_key(ctx) != 0)
302 return blkcipher_walk_done(desc, walk, -EIO);
303 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
304 }
305 }
306 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
307 return ret;
308}
309
310static int cbc_paes_encrypt(struct blkcipher_desc *desc,
311 struct scatterlist *dst, struct scatterlist *src,
312 unsigned int nbytes)
313{
314 struct blkcipher_walk walk;
315
316 blkcipher_walk_init(&walk, dst, src, nbytes);
317 return cbc_paes_crypt(desc, 0, &walk);
318}
319
320static int cbc_paes_decrypt(struct blkcipher_desc *desc,
321 struct scatterlist *dst, struct scatterlist *src,
322 unsigned int nbytes)
323{
324 struct blkcipher_walk walk;
325
326 blkcipher_walk_init(&walk, dst, src, nbytes);
327 return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
328}
329
330static struct crypto_alg cbc_paes_alg = {
331 .cra_name = "cbc(paes)",
332 .cra_driver_name = "cbc-paes-s390",
333 .cra_priority = 402, /* ecb-paes-s390 + 1 */
334 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
335 .cra_blocksize = AES_BLOCK_SIZE,
336 .cra_ctxsize = sizeof(struct s390_paes_ctx),
337 .cra_type = &crypto_blkcipher_type,
338 .cra_module = THIS_MODULE,
339 .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
340 .cra_init = cbc_paes_init,
341 .cra_exit = cbc_paes_exit,
342 .cra_u = {
343 .blkcipher = {
344 .min_keysize = PAES_MIN_KEYSIZE,
345 .max_keysize = PAES_MAX_KEYSIZE,
346 .ivsize = AES_BLOCK_SIZE,
347 .setkey = cbc_paes_set_key,
348 .encrypt = cbc_paes_encrypt,
349 .decrypt = cbc_paes_decrypt,
350 }
351 }
352};
353
354static int xts_paes_init(struct crypto_tfm *tfm)
355{
356 struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
357
358 ctx->kb[0].key = NULL;
359 ctx->kb[1].key = NULL;
360
361 return 0;
362}
363
364static void xts_paes_exit(struct crypto_tfm *tfm)
365{
366 struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
367
368 _free_kb_keybuf(&ctx->kb[0]);
369 _free_kb_keybuf(&ctx->kb[1]);
370}
371
372static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
373{
374 unsigned long fc;
375
376 if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
377 __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
378 return -EINVAL;
379
380 if (ctx->pk[0].type != ctx->pk[1].type)
381 return -EINVAL;
382
383 /* Pick the correct function code based on the protected key type */
384 fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
385 (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
386 CPACF_KM_PXTS_256 : 0;
387
388 /* Check if the function code is available */
389 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
390
391 return ctx->fc ? 0 : -EINVAL;
392}
393
394static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
395 unsigned int xts_key_len)
396{
397 int rc;
398 struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
399 u8 ckey[2 * AES_MAX_KEY_SIZE];
400 unsigned int ckey_len, key_len;
401
402 if (xts_key_len % 2)
403 return -EINVAL;
404
405 key_len = xts_key_len / 2;
406
407 _free_kb_keybuf(&ctx->kb[0]);
408 _free_kb_keybuf(&ctx->kb[1]);
409 rc = _copy_key_to_kb(&ctx->kb[0], in_key, key_len);
410 if (rc)
411 return rc;
412 rc = _copy_key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
413 if (rc)
414 return rc;
415
416 if (__xts_paes_set_key(ctx)) {
417 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
418 return -EINVAL;
419 }
420
421 /*
422 * xts_check_key verifies the key length is not odd and makes
423 * sure that the two keys are not the same. This can be done
424 * on the two protected keys as well
425 */
426 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
427 AES_KEYSIZE_128 : AES_KEYSIZE_256;
428 memcpy(ckey, ctx->pk[0].protkey, ckey_len);
429 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
430 return xts_check_key(tfm, ckey, 2*ckey_len);
431}
432
433static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
434 struct blkcipher_walk *walk)
435{
436 struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
437 unsigned int keylen, offset, nbytes, n, k;
438 int ret;
439 struct {
440 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
441 u8 tweak[16];
442 u8 block[16];
443 u8 bit[16];
444 u8 xts[16];
445 } pcc_param;
446 struct {
447 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
448 u8 init[16];
449 } xts_param;
450
451 ret = blkcipher_walk_virt(desc, walk);
452 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
453 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
454retry:
455 memset(&pcc_param, 0, sizeof(pcc_param));
456 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
457 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
458 cpacf_pcc(ctx->fc, pcc_param.key + offset);
459
460 memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
461 memcpy(xts_param.init, pcc_param.xts, 16);
462
463 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
464 /* only use complete blocks */
465 n = nbytes & ~(AES_BLOCK_SIZE - 1);
466 k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
467 walk->dst.virt.addr, walk->src.virt.addr, n);
468 if (k)
469 ret = blkcipher_walk_done(desc, walk, nbytes - k);
470 if (k < n) {
471 if (__xts_paes_set_key(ctx) != 0)
472 return blkcipher_walk_done(desc, walk, -EIO);
473 goto retry;
474 }
475 }
476 return ret;
477}
478
479static int xts_paes_encrypt(struct blkcipher_desc *desc,
480 struct scatterlist *dst, struct scatterlist *src,
481 unsigned int nbytes)
482{
483 struct blkcipher_walk walk;
484
485 blkcipher_walk_init(&walk, dst, src, nbytes);
486 return xts_paes_crypt(desc, 0, &walk);
487}
488
489static int xts_paes_decrypt(struct blkcipher_desc *desc,
490 struct scatterlist *dst, struct scatterlist *src,
491 unsigned int nbytes)
492{
493 struct blkcipher_walk walk;
494
495 blkcipher_walk_init(&walk, dst, src, nbytes);
496 return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
497}
498
499static struct crypto_alg xts_paes_alg = {
500 .cra_name = "xts(paes)",
501 .cra_driver_name = "xts-paes-s390",
502 .cra_priority = 402, /* ecb-paes-s390 + 1 */
503 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
504 .cra_blocksize = AES_BLOCK_SIZE,
505 .cra_ctxsize = sizeof(struct s390_pxts_ctx),
506 .cra_type = &crypto_blkcipher_type,
507 .cra_module = THIS_MODULE,
508 .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
509 .cra_init = xts_paes_init,
510 .cra_exit = xts_paes_exit,
511 .cra_u = {
512 .blkcipher = {
513 .min_keysize = 2 * PAES_MIN_KEYSIZE,
514 .max_keysize = 2 * PAES_MAX_KEYSIZE,
515 .ivsize = AES_BLOCK_SIZE,
516 .setkey = xts_paes_set_key,
517 .encrypt = xts_paes_encrypt,
518 .decrypt = xts_paes_decrypt,
519 }
520 }
521};
522
523static int ctr_paes_init(struct crypto_tfm *tfm)
524{
525 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
526
527 ctx->kb.key = NULL;
528
529 return 0;
530}
531
532static void ctr_paes_exit(struct crypto_tfm *tfm)
533{
534 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
535
536 _free_kb_keybuf(&ctx->kb);
537}
538
539static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
540{
541 unsigned long fc;
542
543 if (__paes_convert_key(&ctx->kb, &ctx->pk))
544 return -EINVAL;
545
546 /* Pick the correct function code based on the protected key type */
547 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
548 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
549 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
550 CPACF_KMCTR_PAES_256 : 0;
551
552 /* Check if the function code is available */
553 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
554
555 return ctx->fc ? 0 : -EINVAL;
556}
557
558static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
559 unsigned int key_len)
560{
561 int rc;
562 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
563
564 _free_kb_keybuf(&ctx->kb);
565 rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
566 if (rc)
567 return rc;
568
569 if (__ctr_paes_set_key(ctx)) {
570 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
571 return -EINVAL;
572 }
573 return 0;
574}
575
576static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
577{
578 unsigned int i, n;
579
580 /* only use complete blocks, max. PAGE_SIZE */
581 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
582 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
583 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
584 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
585 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
586 ctrptr += AES_BLOCK_SIZE;
587 }
588 return n;
589}
590
591static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
592 struct blkcipher_walk *walk)
593{
594 struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
595 u8 buf[AES_BLOCK_SIZE], *ctrptr;
596 unsigned int nbytes, n, k;
597 int ret, locked;
598
599 locked = spin_trylock(&ctrblk_lock);
600
601 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
602 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
603 n = AES_BLOCK_SIZE;
604 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
605 n = __ctrblk_init(ctrblk, walk->iv, nbytes);
606 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
607 k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
608 walk->dst.virt.addr, walk->src.virt.addr,
609 n, ctrptr);
610 if (k) {
611 if (ctrptr == ctrblk)
612 memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
613 AES_BLOCK_SIZE);
614 crypto_inc(walk->iv, AES_BLOCK_SIZE);
615 ret = blkcipher_walk_done(desc, walk, nbytes - n);
616 }
617 if (k < n) {
618 if (__ctr_paes_set_key(ctx) != 0) {
619 if (locked)
620 spin_unlock(&ctrblk_lock);
621 return blkcipher_walk_done(desc, walk, -EIO);
622 }
623 }
624 }
625 if (locked)
626 spin_unlock(&ctrblk_lock);
627 /*
628 * final block may be < AES_BLOCK_SIZE, copy only nbytes
629 */
630 if (nbytes) {
631 while (1) {
632 if (cpacf_kmctr(ctx->fc | modifier,
633 ctx->pk.protkey, buf,
634 walk->src.virt.addr, AES_BLOCK_SIZE,
635 walk->iv) == AES_BLOCK_SIZE)
636 break;
637 if (__ctr_paes_set_key(ctx) != 0)
638 return blkcipher_walk_done(desc, walk, -EIO);
639 }
640 memcpy(walk->dst.virt.addr, buf, nbytes);
641 crypto_inc(walk->iv, AES_BLOCK_SIZE);
642 ret = blkcipher_walk_done(desc, walk, 0);
643 }
644
645 return ret;
646}
647
648static int ctr_paes_encrypt(struct blkcipher_desc *desc,
649 struct scatterlist *dst, struct scatterlist *src,
650 unsigned int nbytes)
651{
652 struct blkcipher_walk walk;
653
654 blkcipher_walk_init(&walk, dst, src, nbytes);
655 return ctr_paes_crypt(desc, 0, &walk);
656}
657
658static int ctr_paes_decrypt(struct blkcipher_desc *desc,
659 struct scatterlist *dst, struct scatterlist *src,
660 unsigned int nbytes)
661{
662 struct blkcipher_walk walk;
663
664 blkcipher_walk_init(&walk, dst, src, nbytes);
665 return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
666}
667
668static struct crypto_alg ctr_paes_alg = {
669 .cra_name = "ctr(paes)",
670 .cra_driver_name = "ctr-paes-s390",
671 .cra_priority = 402, /* ecb-paes-s390 + 1 */
672 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
673 .cra_blocksize = 1,
674 .cra_ctxsize = sizeof(struct s390_paes_ctx),
675 .cra_type = &crypto_blkcipher_type,
676 .cra_module = THIS_MODULE,
677 .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
678 .cra_init = ctr_paes_init,
679 .cra_exit = ctr_paes_exit,
680 .cra_u = {
681 .blkcipher = {
682 .min_keysize = PAES_MIN_KEYSIZE,
683 .max_keysize = PAES_MAX_KEYSIZE,
684 .ivsize = AES_BLOCK_SIZE,
685 .setkey = ctr_paes_set_key,
686 .encrypt = ctr_paes_encrypt,
687 .decrypt = ctr_paes_decrypt,
688 }
689 }
690};
691
692static inline void __crypto_unregister_alg(struct crypto_alg *alg)
693{
694 if (!list_empty(&alg->cra_list))
695 crypto_unregister_alg(alg);
696}
697
698static void paes_s390_fini(void)
699{
700 if (ctrblk)
701 free_page((unsigned long) ctrblk);
702 __crypto_unregister_alg(&ctr_paes_alg);
703 __crypto_unregister_alg(&xts_paes_alg);
704 __crypto_unregister_alg(&cbc_paes_alg);
705 __crypto_unregister_alg(&ecb_paes_alg);
706}
707
708static int __init paes_s390_init(void)
709{
710 int ret;
711
712 /* Query available functions for KM, KMC and KMCTR */
713 cpacf_query(CPACF_KM, &km_functions);
714 cpacf_query(CPACF_KMC, &kmc_functions);
715 cpacf_query(CPACF_KMCTR, &kmctr_functions);
716
717 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
718 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
719 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
720 ret = crypto_register_alg(&ecb_paes_alg);
721 if (ret)
722 goto out_err;
723 }
724
725 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
726 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
727 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
728 ret = crypto_register_alg(&cbc_paes_alg);
729 if (ret)
730 goto out_err;
731 }
732
733 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
734 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
735 ret = crypto_register_alg(&xts_paes_alg);
736 if (ret)
737 goto out_err;
738 }
739
740 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
741 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
742 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
743 ret = crypto_register_alg(&ctr_paes_alg);
744 if (ret)
745 goto out_err;
746 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
747 if (!ctrblk) {
748 ret = -ENOMEM;
749 goto out_err;
750 }
751 }
752
753 return 0;
754out_err:
755 paes_s390_fini();
756 return ret;
757}
758
759module_init(paes_s390_init);
760module_exit(paes_s390_fini);
761
762MODULE_ALIAS_CRYPTO("paes");
763
764MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
765MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm with protected keys.
6 *
7 * s390 Version:
8 * Copyright IBM Corp. 2017,2020
9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Harald Freudenberger <freude@de.ibm.com>
11 */
12
13#define KMSG_COMPONENT "paes_s390"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16#include <crypto/aes.h>
17#include <crypto/algapi.h>
18#include <linux/bug.h>
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/cpufeature.h>
22#include <linux/init.h>
23#include <linux/mutex.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <crypto/internal/skcipher.h>
27#include <crypto/xts.h>
28#include <asm/cpacf.h>
29#include <asm/pkey.h>
30
31/*
32 * Key blobs smaller/bigger than these defines are rejected
33 * by the common code even before the individual setkey function
34 * is called. As paes can handle different kinds of key blobs
35 * and padding is also possible, the limits need to be generous.
36 */
37#define PAES_MIN_KEYSIZE 16
38#define PAES_MAX_KEYSIZE 320
39
40static u8 *ctrblk;
41static DEFINE_MUTEX(ctrblk_lock);
42
43static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
44
45struct key_blob {
46 /*
47 * Small keys will be stored in the keybuf. Larger keys are
48 * stored in extra allocated memory. In both cases does
49 * key point to the memory where the key is stored.
50 * The code distinguishes by checking keylen against
51 * sizeof(keybuf). See the two following helper functions.
52 */
53 u8 *key;
54 u8 keybuf[128];
55 unsigned int keylen;
56};
57
58static inline int _key_to_kb(struct key_blob *kb,
59 const u8 *key,
60 unsigned int keylen)
61{
62 struct clearkey_header {
63 u8 type;
64 u8 res0[3];
65 u8 version;
66 u8 res1[3];
67 u32 keytype;
68 u32 len;
69 } __packed * h;
70
71 switch (keylen) {
72 case 16:
73 case 24:
74 case 32:
75 /* clear key value, prepare pkey clear key token in keybuf */
76 memset(kb->keybuf, 0, sizeof(kb->keybuf));
77 h = (struct clearkey_header *) kb->keybuf;
78 h->version = 0x02; /* TOKVER_CLEAR_KEY */
79 h->keytype = (keylen - 8) >> 3;
80 h->len = keylen;
81 memcpy(kb->keybuf + sizeof(*h), key, keylen);
82 kb->keylen = sizeof(*h) + keylen;
83 kb->key = kb->keybuf;
84 break;
85 default:
86 /* other key material, let pkey handle this */
87 if (keylen <= sizeof(kb->keybuf))
88 kb->key = kb->keybuf;
89 else {
90 kb->key = kmalloc(keylen, GFP_KERNEL);
91 if (!kb->key)
92 return -ENOMEM;
93 }
94 memcpy(kb->key, key, keylen);
95 kb->keylen = keylen;
96 break;
97 }
98
99 return 0;
100}
101
102static inline void _free_kb_keybuf(struct key_blob *kb)
103{
104 if (kb->key && kb->key != kb->keybuf
105 && kb->keylen > sizeof(kb->keybuf)) {
106 kfree(kb->key);
107 kb->key = NULL;
108 }
109}
110
111struct s390_paes_ctx {
112 struct key_blob kb;
113 struct pkey_protkey pk;
114 spinlock_t pk_lock;
115 unsigned long fc;
116};
117
118struct s390_pxts_ctx {
119 struct key_blob kb[2];
120 struct pkey_protkey pk[2];
121 spinlock_t pk_lock;
122 unsigned long fc;
123};
124
125static inline int __paes_keyblob2pkey(struct key_blob *kb,
126 struct pkey_protkey *pk)
127{
128 int i, ret;
129
130 /* try three times in case of failure */
131 for (i = 0; i < 3; i++) {
132 if (i > 0 && ret == -EAGAIN && in_task())
133 if (msleep_interruptible(1000))
134 return -EINTR;
135 ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk);
136 if (ret == 0)
137 break;
138 }
139
140 return ret;
141}
142
143static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
144{
145 int ret;
146 struct pkey_protkey pkey;
147
148 ret = __paes_keyblob2pkey(&ctx->kb, &pkey);
149 if (ret)
150 return ret;
151
152 spin_lock_bh(&ctx->pk_lock);
153 memcpy(&ctx->pk, &pkey, sizeof(pkey));
154 spin_unlock_bh(&ctx->pk_lock);
155
156 return 0;
157}
158
159static int ecb_paes_init(struct crypto_skcipher *tfm)
160{
161 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
162
163 ctx->kb.key = NULL;
164 spin_lock_init(&ctx->pk_lock);
165
166 return 0;
167}
168
169static void ecb_paes_exit(struct crypto_skcipher *tfm)
170{
171 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
172
173 _free_kb_keybuf(&ctx->kb);
174}
175
176static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
177{
178 int rc;
179 unsigned long fc;
180
181 rc = __paes_convert_key(ctx);
182 if (rc)
183 return rc;
184
185 /* Pick the correct function code based on the protected key type */
186 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
187 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
188 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
189
190 /* Check if the function code is available */
191 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
192
193 return ctx->fc ? 0 : -EINVAL;
194}
195
196static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
197 unsigned int key_len)
198{
199 int rc;
200 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
201
202 _free_kb_keybuf(&ctx->kb);
203 rc = _key_to_kb(&ctx->kb, in_key, key_len);
204 if (rc)
205 return rc;
206
207 return __ecb_paes_set_key(ctx);
208}
209
210static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
211{
212 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
213 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
214 struct skcipher_walk walk;
215 unsigned int nbytes, n, k;
216 int ret;
217 struct {
218 u8 key[MAXPROTKEYSIZE];
219 } param;
220
221 ret = skcipher_walk_virt(&walk, req, false);
222 if (ret)
223 return ret;
224
225 spin_lock_bh(&ctx->pk_lock);
226 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
227 spin_unlock_bh(&ctx->pk_lock);
228
229 while ((nbytes = walk.nbytes) != 0) {
230 /* only use complete blocks */
231 n = nbytes & ~(AES_BLOCK_SIZE - 1);
232 k = cpacf_km(ctx->fc | modifier, ¶m,
233 walk.dst.virt.addr, walk.src.virt.addr, n);
234 if (k)
235 ret = skcipher_walk_done(&walk, nbytes - k);
236 if (k < n) {
237 if (__paes_convert_key(ctx))
238 return skcipher_walk_done(&walk, -EIO);
239 spin_lock_bh(&ctx->pk_lock);
240 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
241 spin_unlock_bh(&ctx->pk_lock);
242 }
243 }
244 return ret;
245}
246
247static int ecb_paes_encrypt(struct skcipher_request *req)
248{
249 return ecb_paes_crypt(req, 0);
250}
251
252static int ecb_paes_decrypt(struct skcipher_request *req)
253{
254 return ecb_paes_crypt(req, CPACF_DECRYPT);
255}
256
257static struct skcipher_alg ecb_paes_alg = {
258 .base.cra_name = "ecb(paes)",
259 .base.cra_driver_name = "ecb-paes-s390",
260 .base.cra_priority = 401, /* combo: aes + ecb + 1 */
261 .base.cra_blocksize = AES_BLOCK_SIZE,
262 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
263 .base.cra_module = THIS_MODULE,
264 .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
265 .init = ecb_paes_init,
266 .exit = ecb_paes_exit,
267 .min_keysize = PAES_MIN_KEYSIZE,
268 .max_keysize = PAES_MAX_KEYSIZE,
269 .setkey = ecb_paes_set_key,
270 .encrypt = ecb_paes_encrypt,
271 .decrypt = ecb_paes_decrypt,
272};
273
274static int cbc_paes_init(struct crypto_skcipher *tfm)
275{
276 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
277
278 ctx->kb.key = NULL;
279 spin_lock_init(&ctx->pk_lock);
280
281 return 0;
282}
283
284static void cbc_paes_exit(struct crypto_skcipher *tfm)
285{
286 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
287
288 _free_kb_keybuf(&ctx->kb);
289}
290
291static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
292{
293 int rc;
294 unsigned long fc;
295
296 rc = __paes_convert_key(ctx);
297 if (rc)
298 return rc;
299
300 /* Pick the correct function code based on the protected key type */
301 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
302 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
303 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
304
305 /* Check if the function code is available */
306 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
307
308 return ctx->fc ? 0 : -EINVAL;
309}
310
311static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
312 unsigned int key_len)
313{
314 int rc;
315 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
316
317 _free_kb_keybuf(&ctx->kb);
318 rc = _key_to_kb(&ctx->kb, in_key, key_len);
319 if (rc)
320 return rc;
321
322 return __cbc_paes_set_key(ctx);
323}
324
325static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
326{
327 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
328 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
329 struct skcipher_walk walk;
330 unsigned int nbytes, n, k;
331 int ret;
332 struct {
333 u8 iv[AES_BLOCK_SIZE];
334 u8 key[MAXPROTKEYSIZE];
335 } param;
336
337 ret = skcipher_walk_virt(&walk, req, false);
338 if (ret)
339 return ret;
340
341 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
342 spin_lock_bh(&ctx->pk_lock);
343 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
344 spin_unlock_bh(&ctx->pk_lock);
345
346 while ((nbytes = walk.nbytes) != 0) {
347 /* only use complete blocks */
348 n = nbytes & ~(AES_BLOCK_SIZE - 1);
349 k = cpacf_kmc(ctx->fc | modifier, ¶m,
350 walk.dst.virt.addr, walk.src.virt.addr, n);
351 if (k) {
352 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
353 ret = skcipher_walk_done(&walk, nbytes - k);
354 }
355 if (k < n) {
356 if (__paes_convert_key(ctx))
357 return skcipher_walk_done(&walk, -EIO);
358 spin_lock_bh(&ctx->pk_lock);
359 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
360 spin_unlock_bh(&ctx->pk_lock);
361 }
362 }
363 return ret;
364}
365
366static int cbc_paes_encrypt(struct skcipher_request *req)
367{
368 return cbc_paes_crypt(req, 0);
369}
370
371static int cbc_paes_decrypt(struct skcipher_request *req)
372{
373 return cbc_paes_crypt(req, CPACF_DECRYPT);
374}
375
376static struct skcipher_alg cbc_paes_alg = {
377 .base.cra_name = "cbc(paes)",
378 .base.cra_driver_name = "cbc-paes-s390",
379 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
380 .base.cra_blocksize = AES_BLOCK_SIZE,
381 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
382 .base.cra_module = THIS_MODULE,
383 .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
384 .init = cbc_paes_init,
385 .exit = cbc_paes_exit,
386 .min_keysize = PAES_MIN_KEYSIZE,
387 .max_keysize = PAES_MAX_KEYSIZE,
388 .ivsize = AES_BLOCK_SIZE,
389 .setkey = cbc_paes_set_key,
390 .encrypt = cbc_paes_encrypt,
391 .decrypt = cbc_paes_decrypt,
392};
393
394static int xts_paes_init(struct crypto_skcipher *tfm)
395{
396 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
397
398 ctx->kb[0].key = NULL;
399 ctx->kb[1].key = NULL;
400 spin_lock_init(&ctx->pk_lock);
401
402 return 0;
403}
404
405static void xts_paes_exit(struct crypto_skcipher *tfm)
406{
407 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
408
409 _free_kb_keybuf(&ctx->kb[0]);
410 _free_kb_keybuf(&ctx->kb[1]);
411}
412
413static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
414{
415 struct pkey_protkey pkey0, pkey1;
416
417 if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
418 __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
419 return -EINVAL;
420
421 spin_lock_bh(&ctx->pk_lock);
422 memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
423 memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
424 spin_unlock_bh(&ctx->pk_lock);
425
426 return 0;
427}
428
429static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
430{
431 unsigned long fc;
432
433 if (__xts_paes_convert_key(ctx))
434 return -EINVAL;
435
436 if (ctx->pk[0].type != ctx->pk[1].type)
437 return -EINVAL;
438
439 /* Pick the correct function code based on the protected key type */
440 fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 :
441 (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ?
442 CPACF_KM_PXTS_256 : 0;
443
444 /* Check if the function code is available */
445 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
446
447 return ctx->fc ? 0 : -EINVAL;
448}
449
450static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
451 unsigned int xts_key_len)
452{
453 int rc;
454 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
455 u8 ckey[2 * AES_MAX_KEY_SIZE];
456 unsigned int ckey_len, key_len;
457
458 if (xts_key_len % 2)
459 return -EINVAL;
460
461 key_len = xts_key_len / 2;
462
463 _free_kb_keybuf(&ctx->kb[0]);
464 _free_kb_keybuf(&ctx->kb[1]);
465 rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
466 if (rc)
467 return rc;
468 rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
469 if (rc)
470 return rc;
471
472 rc = __xts_paes_set_key(ctx);
473 if (rc)
474 return rc;
475
476 /*
477 * xts_check_key verifies the key length is not odd and makes
478 * sure that the two keys are not the same. This can be done
479 * on the two protected keys as well
480 */
481 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
482 AES_KEYSIZE_128 : AES_KEYSIZE_256;
483 memcpy(ckey, ctx->pk[0].protkey, ckey_len);
484 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
485 return xts_verify_key(tfm, ckey, 2*ckey_len);
486}
487
488static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
489{
490 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
491 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
492 struct skcipher_walk walk;
493 unsigned int keylen, offset, nbytes, n, k;
494 int ret;
495 struct {
496 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
497 u8 tweak[16];
498 u8 block[16];
499 u8 bit[16];
500 u8 xts[16];
501 } pcc_param;
502 struct {
503 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */
504 u8 init[16];
505 } xts_param;
506
507 ret = skcipher_walk_virt(&walk, req, false);
508 if (ret)
509 return ret;
510
511 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
512 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
513
514 memset(&pcc_param, 0, sizeof(pcc_param));
515 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
516 spin_lock_bh(&ctx->pk_lock);
517 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
518 memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
519 spin_unlock_bh(&ctx->pk_lock);
520 cpacf_pcc(ctx->fc, pcc_param.key + offset);
521 memcpy(xts_param.init, pcc_param.xts, 16);
522
523 while ((nbytes = walk.nbytes) != 0) {
524 /* only use complete blocks */
525 n = nbytes & ~(AES_BLOCK_SIZE - 1);
526 k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
527 walk.dst.virt.addr, walk.src.virt.addr, n);
528 if (k)
529 ret = skcipher_walk_done(&walk, nbytes - k);
530 if (k < n) {
531 if (__xts_paes_convert_key(ctx))
532 return skcipher_walk_done(&walk, -EIO);
533 spin_lock_bh(&ctx->pk_lock);
534 memcpy(xts_param.key + offset,
535 ctx->pk[0].protkey, keylen);
536 spin_unlock_bh(&ctx->pk_lock);
537 }
538 }
539
540 return ret;
541}
542
543static int xts_paes_encrypt(struct skcipher_request *req)
544{
545 return xts_paes_crypt(req, 0);
546}
547
548static int xts_paes_decrypt(struct skcipher_request *req)
549{
550 return xts_paes_crypt(req, CPACF_DECRYPT);
551}
552
553static struct skcipher_alg xts_paes_alg = {
554 .base.cra_name = "xts(paes)",
555 .base.cra_driver_name = "xts-paes-s390",
556 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
557 .base.cra_blocksize = AES_BLOCK_SIZE,
558 .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
559 .base.cra_module = THIS_MODULE,
560 .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
561 .init = xts_paes_init,
562 .exit = xts_paes_exit,
563 .min_keysize = 2 * PAES_MIN_KEYSIZE,
564 .max_keysize = 2 * PAES_MAX_KEYSIZE,
565 .ivsize = AES_BLOCK_SIZE,
566 .setkey = xts_paes_set_key,
567 .encrypt = xts_paes_encrypt,
568 .decrypt = xts_paes_decrypt,
569};
570
571static int ctr_paes_init(struct crypto_skcipher *tfm)
572{
573 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
574
575 ctx->kb.key = NULL;
576 spin_lock_init(&ctx->pk_lock);
577
578 return 0;
579}
580
581static void ctr_paes_exit(struct crypto_skcipher *tfm)
582{
583 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
584
585 _free_kb_keybuf(&ctx->kb);
586}
587
588static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
589{
590 int rc;
591 unsigned long fc;
592
593 rc = __paes_convert_key(ctx);
594 if (rc)
595 return rc;
596
597 /* Pick the correct function code based on the protected key type */
598 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
599 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
600 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
601 CPACF_KMCTR_PAES_256 : 0;
602
603 /* Check if the function code is available */
604 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
605
606 return ctx->fc ? 0 : -EINVAL;
607}
608
609static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
610 unsigned int key_len)
611{
612 int rc;
613 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
614
615 _free_kb_keybuf(&ctx->kb);
616 rc = _key_to_kb(&ctx->kb, in_key, key_len);
617 if (rc)
618 return rc;
619
620 return __ctr_paes_set_key(ctx);
621}
622
623static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
624{
625 unsigned int i, n;
626
627 /* only use complete blocks, max. PAGE_SIZE */
628 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
629 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
630 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
631 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
632 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
633 ctrptr += AES_BLOCK_SIZE;
634 }
635 return n;
636}
637
638static int ctr_paes_crypt(struct skcipher_request *req)
639{
640 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
641 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
642 u8 buf[AES_BLOCK_SIZE], *ctrptr;
643 struct skcipher_walk walk;
644 unsigned int nbytes, n, k;
645 int ret, locked;
646 struct {
647 u8 key[MAXPROTKEYSIZE];
648 } param;
649
650 ret = skcipher_walk_virt(&walk, req, false);
651 if (ret)
652 return ret;
653
654 spin_lock_bh(&ctx->pk_lock);
655 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
656 spin_unlock_bh(&ctx->pk_lock);
657
658 locked = mutex_trylock(&ctrblk_lock);
659
660 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
661 n = AES_BLOCK_SIZE;
662 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
663 n = __ctrblk_init(ctrblk, walk.iv, nbytes);
664 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
665 k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr,
666 walk.src.virt.addr, n, ctrptr);
667 if (k) {
668 if (ctrptr == ctrblk)
669 memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
670 AES_BLOCK_SIZE);
671 crypto_inc(walk.iv, AES_BLOCK_SIZE);
672 ret = skcipher_walk_done(&walk, nbytes - k);
673 }
674 if (k < n) {
675 if (__paes_convert_key(ctx)) {
676 if (locked)
677 mutex_unlock(&ctrblk_lock);
678 return skcipher_walk_done(&walk, -EIO);
679 }
680 spin_lock_bh(&ctx->pk_lock);
681 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
682 spin_unlock_bh(&ctx->pk_lock);
683 }
684 }
685 if (locked)
686 mutex_unlock(&ctrblk_lock);
687 /*
688 * final block may be < AES_BLOCK_SIZE, copy only nbytes
689 */
690 if (nbytes) {
691 while (1) {
692 if (cpacf_kmctr(ctx->fc, ¶m, buf,
693 walk.src.virt.addr, AES_BLOCK_SIZE,
694 walk.iv) == AES_BLOCK_SIZE)
695 break;
696 if (__paes_convert_key(ctx))
697 return skcipher_walk_done(&walk, -EIO);
698 spin_lock_bh(&ctx->pk_lock);
699 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
700 spin_unlock_bh(&ctx->pk_lock);
701 }
702 memcpy(walk.dst.virt.addr, buf, nbytes);
703 crypto_inc(walk.iv, AES_BLOCK_SIZE);
704 ret = skcipher_walk_done(&walk, nbytes);
705 }
706
707 return ret;
708}
709
710static struct skcipher_alg ctr_paes_alg = {
711 .base.cra_name = "ctr(paes)",
712 .base.cra_driver_name = "ctr-paes-s390",
713 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
714 .base.cra_blocksize = 1,
715 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
716 .base.cra_module = THIS_MODULE,
717 .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
718 .init = ctr_paes_init,
719 .exit = ctr_paes_exit,
720 .min_keysize = PAES_MIN_KEYSIZE,
721 .max_keysize = PAES_MAX_KEYSIZE,
722 .ivsize = AES_BLOCK_SIZE,
723 .setkey = ctr_paes_set_key,
724 .encrypt = ctr_paes_crypt,
725 .decrypt = ctr_paes_crypt,
726 .chunksize = AES_BLOCK_SIZE,
727};
728
729static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
730{
731 if (!list_empty(&alg->base.cra_list))
732 crypto_unregister_skcipher(alg);
733}
734
735static void paes_s390_fini(void)
736{
737 __crypto_unregister_skcipher(&ctr_paes_alg);
738 __crypto_unregister_skcipher(&xts_paes_alg);
739 __crypto_unregister_skcipher(&cbc_paes_alg);
740 __crypto_unregister_skcipher(&ecb_paes_alg);
741 if (ctrblk)
742 free_page((unsigned long) ctrblk);
743}
744
745static int __init paes_s390_init(void)
746{
747 int ret;
748
749 /* Query available functions for KM, KMC and KMCTR */
750 cpacf_query(CPACF_KM, &km_functions);
751 cpacf_query(CPACF_KMC, &kmc_functions);
752 cpacf_query(CPACF_KMCTR, &kmctr_functions);
753
754 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
755 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
756 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
757 ret = crypto_register_skcipher(&ecb_paes_alg);
758 if (ret)
759 goto out_err;
760 }
761
762 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
763 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
764 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
765 ret = crypto_register_skcipher(&cbc_paes_alg);
766 if (ret)
767 goto out_err;
768 }
769
770 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
771 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
772 ret = crypto_register_skcipher(&xts_paes_alg);
773 if (ret)
774 goto out_err;
775 }
776
777 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
778 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
779 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
780 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
781 if (!ctrblk) {
782 ret = -ENOMEM;
783 goto out_err;
784 }
785 ret = crypto_register_skcipher(&ctr_paes_alg);
786 if (ret)
787 goto out_err;
788 }
789
790 return 0;
791out_err:
792 paes_s390_fini();
793 return ret;
794}
795
796module_init(paes_s390_init);
797module_exit(paes_s390_fini);
798
799MODULE_ALIAS_CRYPTO("paes");
800
801MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
802MODULE_LICENSE("GPL");