Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm.
6 *
7 * s390 Version:
8 * Copyright IBM Corp. 2005, 2017
9 * Author(s): Jan Glauber (jang@de.ibm.com)
10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11 * Patrick Steuer <patrick.steuer@de.ibm.com>
12 * Harald Freudenberger <freude@de.ibm.com>
13 *
14 * Derived from "crypto/aes_generic.c"
15 */
16
17#define KMSG_COMPONENT "aes_s390"
18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19
20#include <crypto/aes.h>
21#include <crypto/algapi.h>
22#include <crypto/ghash.h>
23#include <crypto/internal/aead.h>
24#include <crypto/internal/cipher.h>
25#include <crypto/internal/skcipher.h>
26#include <crypto/scatterwalk.h>
27#include <linux/err.h>
28#include <linux/module.h>
29#include <linux/cpufeature.h>
30#include <linux/init.h>
31#include <linux/mutex.h>
32#include <linux/fips.h>
33#include <linux/string.h>
34#include <crypto/xts.h>
35#include <asm/cpacf.h>
36
37static u8 *ctrblk;
38static DEFINE_MUTEX(ctrblk_lock);
39
40static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
41 kma_functions;
42
43struct s390_aes_ctx {
44 u8 key[AES_MAX_KEY_SIZE];
45 int key_len;
46 unsigned long fc;
47 union {
48 struct crypto_skcipher *skcipher;
49 struct crypto_cipher *cip;
50 } fallback;
51};
52
53struct s390_xts_ctx {
54 u8 key[32];
55 u8 pcc_key[32];
56 int key_len;
57 unsigned long fc;
58 struct crypto_skcipher *fallback;
59};
60
61struct gcm_sg_walk {
62 struct scatter_walk walk;
63 unsigned int walk_bytes;
64 u8 *walk_ptr;
65 unsigned int walk_bytes_remain;
66 u8 buf[AES_BLOCK_SIZE];
67 unsigned int buf_bytes;
68 u8 *ptr;
69 unsigned int nbytes;
70};
71
72static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
73 unsigned int key_len)
74{
75 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
76
77 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
78 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
79 CRYPTO_TFM_REQ_MASK);
80
81 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
82}
83
84static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
85 unsigned int key_len)
86{
87 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
88 unsigned long fc;
89
90 /* Pick the correct function code based on the key length */
91 fc = (key_len == 16) ? CPACF_KM_AES_128 :
92 (key_len == 24) ? CPACF_KM_AES_192 :
93 (key_len == 32) ? CPACF_KM_AES_256 : 0;
94
95 /* Check if the function code is available */
96 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
97 if (!sctx->fc)
98 return setkey_fallback_cip(tfm, in_key, key_len);
99
100 sctx->key_len = key_len;
101 memcpy(sctx->key, in_key, key_len);
102 return 0;
103}
104
105static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
106{
107 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
108
109 if (unlikely(!sctx->fc)) {
110 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
111 return;
112 }
113 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
114}
115
116static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
117{
118 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
119
120 if (unlikely(!sctx->fc)) {
121 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
122 return;
123 }
124 cpacf_km(sctx->fc | CPACF_DECRYPT,
125 &sctx->key, out, in, AES_BLOCK_SIZE);
126}
127
128static int fallback_init_cip(struct crypto_tfm *tfm)
129{
130 const char *name = tfm->__crt_alg->cra_name;
131 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
132
133 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
134 CRYPTO_ALG_NEED_FALLBACK);
135
136 if (IS_ERR(sctx->fallback.cip)) {
137 pr_err("Allocating AES fallback algorithm %s failed\n",
138 name);
139 return PTR_ERR(sctx->fallback.cip);
140 }
141
142 return 0;
143}
144
145static void fallback_exit_cip(struct crypto_tfm *tfm)
146{
147 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
148
149 crypto_free_cipher(sctx->fallback.cip);
150 sctx->fallback.cip = NULL;
151}
152
153static struct crypto_alg aes_alg = {
154 .cra_name = "aes",
155 .cra_driver_name = "aes-s390",
156 .cra_priority = 300,
157 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
158 CRYPTO_ALG_NEED_FALLBACK,
159 .cra_blocksize = AES_BLOCK_SIZE,
160 .cra_ctxsize = sizeof(struct s390_aes_ctx),
161 .cra_module = THIS_MODULE,
162 .cra_init = fallback_init_cip,
163 .cra_exit = fallback_exit_cip,
164 .cra_u = {
165 .cipher = {
166 .cia_min_keysize = AES_MIN_KEY_SIZE,
167 .cia_max_keysize = AES_MAX_KEY_SIZE,
168 .cia_setkey = aes_set_key,
169 .cia_encrypt = crypto_aes_encrypt,
170 .cia_decrypt = crypto_aes_decrypt,
171 }
172 }
173};
174
175static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
176 unsigned int len)
177{
178 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
179
180 crypto_skcipher_clear_flags(sctx->fallback.skcipher,
181 CRYPTO_TFM_REQ_MASK);
182 crypto_skcipher_set_flags(sctx->fallback.skcipher,
183 crypto_skcipher_get_flags(tfm) &
184 CRYPTO_TFM_REQ_MASK);
185 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
186}
187
188static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
189 struct skcipher_request *req,
190 unsigned long modifier)
191{
192 struct skcipher_request *subreq = skcipher_request_ctx(req);
193
194 *subreq = *req;
195 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
196 return (modifier & CPACF_DECRYPT) ?
197 crypto_skcipher_decrypt(subreq) :
198 crypto_skcipher_encrypt(subreq);
199}
200
201static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
202 unsigned int key_len)
203{
204 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
205 unsigned long fc;
206
207 /* Pick the correct function code based on the key length */
208 fc = (key_len == 16) ? CPACF_KM_AES_128 :
209 (key_len == 24) ? CPACF_KM_AES_192 :
210 (key_len == 32) ? CPACF_KM_AES_256 : 0;
211
212 /* Check if the function code is available */
213 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
214 if (!sctx->fc)
215 return setkey_fallback_skcipher(tfm, in_key, key_len);
216
217 sctx->key_len = key_len;
218 memcpy(sctx->key, in_key, key_len);
219 return 0;
220}
221
222static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
223{
224 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
225 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
226 struct skcipher_walk walk;
227 unsigned int nbytes, n;
228 int ret;
229
230 if (unlikely(!sctx->fc))
231 return fallback_skcipher_crypt(sctx, req, modifier);
232
233 ret = skcipher_walk_virt(&walk, req, false);
234 while ((nbytes = walk.nbytes) != 0) {
235 /* only use complete blocks */
236 n = nbytes & ~(AES_BLOCK_SIZE - 1);
237 cpacf_km(sctx->fc | modifier, sctx->key,
238 walk.dst.virt.addr, walk.src.virt.addr, n);
239 ret = skcipher_walk_done(&walk, nbytes - n);
240 }
241 return ret;
242}
243
244static int ecb_aes_encrypt(struct skcipher_request *req)
245{
246 return ecb_aes_crypt(req, 0);
247}
248
249static int ecb_aes_decrypt(struct skcipher_request *req)
250{
251 return ecb_aes_crypt(req, CPACF_DECRYPT);
252}
253
254static int fallback_init_skcipher(struct crypto_skcipher *tfm)
255{
256 const char *name = crypto_tfm_alg_name(&tfm->base);
257 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
258
259 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
260 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
261
262 if (IS_ERR(sctx->fallback.skcipher)) {
263 pr_err("Allocating AES fallback algorithm %s failed\n",
264 name);
265 return PTR_ERR(sctx->fallback.skcipher);
266 }
267
268 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
269 crypto_skcipher_reqsize(sctx->fallback.skcipher));
270 return 0;
271}
272
273static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
274{
275 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
276
277 crypto_free_skcipher(sctx->fallback.skcipher);
278}
279
280static struct skcipher_alg ecb_aes_alg = {
281 .base.cra_name = "ecb(aes)",
282 .base.cra_driver_name = "ecb-aes-s390",
283 .base.cra_priority = 401, /* combo: aes + ecb + 1 */
284 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
285 .base.cra_blocksize = AES_BLOCK_SIZE,
286 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
287 .base.cra_module = THIS_MODULE,
288 .init = fallback_init_skcipher,
289 .exit = fallback_exit_skcipher,
290 .min_keysize = AES_MIN_KEY_SIZE,
291 .max_keysize = AES_MAX_KEY_SIZE,
292 .setkey = ecb_aes_set_key,
293 .encrypt = ecb_aes_encrypt,
294 .decrypt = ecb_aes_decrypt,
295};
296
297static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
298 unsigned int key_len)
299{
300 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
301 unsigned long fc;
302
303 /* Pick the correct function code based on the key length */
304 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
305 (key_len == 24) ? CPACF_KMC_AES_192 :
306 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
307
308 /* Check if the function code is available */
309 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
310 if (!sctx->fc)
311 return setkey_fallback_skcipher(tfm, in_key, key_len);
312
313 sctx->key_len = key_len;
314 memcpy(sctx->key, in_key, key_len);
315 return 0;
316}
317
318static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
319{
320 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
321 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
322 struct skcipher_walk walk;
323 unsigned int nbytes, n;
324 int ret;
325 struct {
326 u8 iv[AES_BLOCK_SIZE];
327 u8 key[AES_MAX_KEY_SIZE];
328 } param;
329
330 if (unlikely(!sctx->fc))
331 return fallback_skcipher_crypt(sctx, req, modifier);
332
333 ret = skcipher_walk_virt(&walk, req, false);
334 if (ret)
335 return ret;
336 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
337 memcpy(param.key, sctx->key, sctx->key_len);
338 while ((nbytes = walk.nbytes) != 0) {
339 /* only use complete blocks */
340 n = nbytes & ~(AES_BLOCK_SIZE - 1);
341 cpacf_kmc(sctx->fc | modifier, ¶m,
342 walk.dst.virt.addr, walk.src.virt.addr, n);
343 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
344 ret = skcipher_walk_done(&walk, nbytes - n);
345 }
346 memzero_explicit(¶m, sizeof(param));
347 return ret;
348}
349
350static int cbc_aes_encrypt(struct skcipher_request *req)
351{
352 return cbc_aes_crypt(req, 0);
353}
354
355static int cbc_aes_decrypt(struct skcipher_request *req)
356{
357 return cbc_aes_crypt(req, CPACF_DECRYPT);
358}
359
360static struct skcipher_alg cbc_aes_alg = {
361 .base.cra_name = "cbc(aes)",
362 .base.cra_driver_name = "cbc-aes-s390",
363 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
364 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
365 .base.cra_blocksize = AES_BLOCK_SIZE,
366 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
367 .base.cra_module = THIS_MODULE,
368 .init = fallback_init_skcipher,
369 .exit = fallback_exit_skcipher,
370 .min_keysize = AES_MIN_KEY_SIZE,
371 .max_keysize = AES_MAX_KEY_SIZE,
372 .ivsize = AES_BLOCK_SIZE,
373 .setkey = cbc_aes_set_key,
374 .encrypt = cbc_aes_encrypt,
375 .decrypt = cbc_aes_decrypt,
376};
377
378static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
379 unsigned int len)
380{
381 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
382
383 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
384 crypto_skcipher_set_flags(xts_ctx->fallback,
385 crypto_skcipher_get_flags(tfm) &
386 CRYPTO_TFM_REQ_MASK);
387 return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
388}
389
390static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
391 unsigned int key_len)
392{
393 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
394 unsigned long fc;
395 int err;
396
397 err = xts_fallback_setkey(tfm, in_key, key_len);
398 if (err)
399 return err;
400
401 /* Pick the correct function code based on the key length */
402 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
403 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
404
405 /* Check if the function code is available */
406 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
407 if (!xts_ctx->fc)
408 return 0;
409
410 /* Split the XTS key into the two subkeys */
411 key_len = key_len / 2;
412 xts_ctx->key_len = key_len;
413 memcpy(xts_ctx->key, in_key, key_len);
414 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
415 return 0;
416}
417
418static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
419{
420 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
421 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
422 struct skcipher_walk walk;
423 unsigned int offset, nbytes, n;
424 int ret;
425 struct {
426 u8 key[32];
427 u8 tweak[16];
428 u8 block[16];
429 u8 bit[16];
430 u8 xts[16];
431 } pcc_param;
432 struct {
433 u8 key[32];
434 u8 init[16];
435 } xts_param;
436
437 if (req->cryptlen < AES_BLOCK_SIZE)
438 return -EINVAL;
439
440 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
441 struct skcipher_request *subreq = skcipher_request_ctx(req);
442
443 *subreq = *req;
444 skcipher_request_set_tfm(subreq, xts_ctx->fallback);
445 return (modifier & CPACF_DECRYPT) ?
446 crypto_skcipher_decrypt(subreq) :
447 crypto_skcipher_encrypt(subreq);
448 }
449
450 ret = skcipher_walk_virt(&walk, req, false);
451 if (ret)
452 return ret;
453 offset = xts_ctx->key_len & 0x10;
454 memset(pcc_param.block, 0, sizeof(pcc_param.block));
455 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
456 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
457 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
458 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
459 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
460
461 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
462 memcpy(xts_param.init, pcc_param.xts, 16);
463
464 while ((nbytes = walk.nbytes) != 0) {
465 /* only use complete blocks */
466 n = nbytes & ~(AES_BLOCK_SIZE - 1);
467 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
468 walk.dst.virt.addr, walk.src.virt.addr, n);
469 ret = skcipher_walk_done(&walk, nbytes - n);
470 }
471 memzero_explicit(&pcc_param, sizeof(pcc_param));
472 memzero_explicit(&xts_param, sizeof(xts_param));
473 return ret;
474}
475
476static int xts_aes_encrypt(struct skcipher_request *req)
477{
478 return xts_aes_crypt(req, 0);
479}
480
481static int xts_aes_decrypt(struct skcipher_request *req)
482{
483 return xts_aes_crypt(req, CPACF_DECRYPT);
484}
485
486static int xts_fallback_init(struct crypto_skcipher *tfm)
487{
488 const char *name = crypto_tfm_alg_name(&tfm->base);
489 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
490
491 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
492 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
493
494 if (IS_ERR(xts_ctx->fallback)) {
495 pr_err("Allocating XTS fallback algorithm %s failed\n",
496 name);
497 return PTR_ERR(xts_ctx->fallback);
498 }
499 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
500 crypto_skcipher_reqsize(xts_ctx->fallback));
501 return 0;
502}
503
504static void xts_fallback_exit(struct crypto_skcipher *tfm)
505{
506 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
507
508 crypto_free_skcipher(xts_ctx->fallback);
509}
510
511static struct skcipher_alg xts_aes_alg = {
512 .base.cra_name = "xts(aes)",
513 .base.cra_driver_name = "xts-aes-s390",
514 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
515 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
516 .base.cra_blocksize = AES_BLOCK_SIZE,
517 .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
518 .base.cra_module = THIS_MODULE,
519 .init = xts_fallback_init,
520 .exit = xts_fallback_exit,
521 .min_keysize = 2 * AES_MIN_KEY_SIZE,
522 .max_keysize = 2 * AES_MAX_KEY_SIZE,
523 .ivsize = AES_BLOCK_SIZE,
524 .setkey = xts_aes_set_key,
525 .encrypt = xts_aes_encrypt,
526 .decrypt = xts_aes_decrypt,
527};
528
529static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
530 unsigned int key_len)
531{
532 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
533 unsigned long fc;
534
535 /* Pick the correct function code based on the key length */
536 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
537 (key_len == 24) ? CPACF_KMCTR_AES_192 :
538 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
539
540 /* Check if the function code is available */
541 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
542 if (!sctx->fc)
543 return setkey_fallback_skcipher(tfm, in_key, key_len);
544
545 sctx->key_len = key_len;
546 memcpy(sctx->key, in_key, key_len);
547 return 0;
548}
549
550static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
551{
552 unsigned int i, n;
553
554 /* only use complete blocks, max. PAGE_SIZE */
555 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
556 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
557 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
558 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
559 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
560 ctrptr += AES_BLOCK_SIZE;
561 }
562 return n;
563}
564
565static int ctr_aes_crypt(struct skcipher_request *req)
566{
567 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
568 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
569 u8 buf[AES_BLOCK_SIZE], *ctrptr;
570 struct skcipher_walk walk;
571 unsigned int n, nbytes;
572 int ret, locked;
573
574 if (unlikely(!sctx->fc))
575 return fallback_skcipher_crypt(sctx, req, 0);
576
577 locked = mutex_trylock(&ctrblk_lock);
578
579 ret = skcipher_walk_virt(&walk, req, false);
580 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
581 n = AES_BLOCK_SIZE;
582
583 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
584 n = __ctrblk_init(ctrblk, walk.iv, nbytes);
585 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
586 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
587 walk.src.virt.addr, n, ctrptr);
588 if (ctrptr == ctrblk)
589 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
590 AES_BLOCK_SIZE);
591 crypto_inc(walk.iv, AES_BLOCK_SIZE);
592 ret = skcipher_walk_done(&walk, nbytes - n);
593 }
594 if (locked)
595 mutex_unlock(&ctrblk_lock);
596 /*
597 * final block may be < AES_BLOCK_SIZE, copy only nbytes
598 */
599 if (nbytes) {
600 memset(buf, 0, AES_BLOCK_SIZE);
601 memcpy(buf, walk.src.virt.addr, nbytes);
602 cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
603 AES_BLOCK_SIZE, walk.iv);
604 memcpy(walk.dst.virt.addr, buf, nbytes);
605 crypto_inc(walk.iv, AES_BLOCK_SIZE);
606 ret = skcipher_walk_done(&walk, 0);
607 }
608
609 return ret;
610}
611
612static struct skcipher_alg ctr_aes_alg = {
613 .base.cra_name = "ctr(aes)",
614 .base.cra_driver_name = "ctr-aes-s390",
615 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
616 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
617 .base.cra_blocksize = 1,
618 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
619 .base.cra_module = THIS_MODULE,
620 .init = fallback_init_skcipher,
621 .exit = fallback_exit_skcipher,
622 .min_keysize = AES_MIN_KEY_SIZE,
623 .max_keysize = AES_MAX_KEY_SIZE,
624 .ivsize = AES_BLOCK_SIZE,
625 .setkey = ctr_aes_set_key,
626 .encrypt = ctr_aes_crypt,
627 .decrypt = ctr_aes_crypt,
628 .chunksize = AES_BLOCK_SIZE,
629};
630
631static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
632 unsigned int keylen)
633{
634 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
635
636 switch (keylen) {
637 case AES_KEYSIZE_128:
638 ctx->fc = CPACF_KMA_GCM_AES_128;
639 break;
640 case AES_KEYSIZE_192:
641 ctx->fc = CPACF_KMA_GCM_AES_192;
642 break;
643 case AES_KEYSIZE_256:
644 ctx->fc = CPACF_KMA_GCM_AES_256;
645 break;
646 default:
647 return -EINVAL;
648 }
649
650 memcpy(ctx->key, key, keylen);
651 ctx->key_len = keylen;
652 return 0;
653}
654
655static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
656{
657 switch (authsize) {
658 case 4:
659 case 8:
660 case 12:
661 case 13:
662 case 14:
663 case 15:
664 case 16:
665 break;
666 default:
667 return -EINVAL;
668 }
669
670 return 0;
671}
672
673static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
674 unsigned int len)
675{
676 memset(gw, 0, sizeof(*gw));
677 gw->walk_bytes_remain = len;
678 scatterwalk_start(&gw->walk, sg);
679}
680
681static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
682{
683 struct scatterlist *nextsg;
684
685 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
686 while (!gw->walk_bytes) {
687 nextsg = sg_next(gw->walk.sg);
688 if (!nextsg)
689 return 0;
690 scatterwalk_start(&gw->walk, nextsg);
691 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
692 gw->walk_bytes_remain);
693 }
694 gw->walk_ptr = scatterwalk_map(&gw->walk);
695 return gw->walk_bytes;
696}
697
698static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
699 unsigned int nbytes)
700{
701 gw->walk_bytes_remain -= nbytes;
702 scatterwalk_unmap(gw->walk_ptr);
703 scatterwalk_advance(&gw->walk, nbytes);
704 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
705 gw->walk_ptr = NULL;
706}
707
708static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
709{
710 int n;
711
712 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
713 gw->ptr = gw->buf;
714 gw->nbytes = gw->buf_bytes;
715 goto out;
716 }
717
718 if (gw->walk_bytes_remain == 0) {
719 gw->ptr = NULL;
720 gw->nbytes = 0;
721 goto out;
722 }
723
724 if (!_gcm_sg_clamp_and_map(gw)) {
725 gw->ptr = NULL;
726 gw->nbytes = 0;
727 goto out;
728 }
729
730 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
731 gw->ptr = gw->walk_ptr;
732 gw->nbytes = gw->walk_bytes;
733 goto out;
734 }
735
736 while (1) {
737 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
738 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
739 gw->buf_bytes += n;
740 _gcm_sg_unmap_and_advance(gw, n);
741 if (gw->buf_bytes >= minbytesneeded) {
742 gw->ptr = gw->buf;
743 gw->nbytes = gw->buf_bytes;
744 goto out;
745 }
746 if (!_gcm_sg_clamp_and_map(gw)) {
747 gw->ptr = NULL;
748 gw->nbytes = 0;
749 goto out;
750 }
751 }
752
753out:
754 return gw->nbytes;
755}
756
757static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
758{
759 if (gw->walk_bytes_remain == 0) {
760 gw->ptr = NULL;
761 gw->nbytes = 0;
762 goto out;
763 }
764
765 if (!_gcm_sg_clamp_and_map(gw)) {
766 gw->ptr = NULL;
767 gw->nbytes = 0;
768 goto out;
769 }
770
771 if (gw->walk_bytes >= minbytesneeded) {
772 gw->ptr = gw->walk_ptr;
773 gw->nbytes = gw->walk_bytes;
774 goto out;
775 }
776
777 scatterwalk_unmap(gw->walk_ptr);
778 gw->walk_ptr = NULL;
779
780 gw->ptr = gw->buf;
781 gw->nbytes = sizeof(gw->buf);
782
783out:
784 return gw->nbytes;
785}
786
787static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
788{
789 if (gw->ptr == NULL)
790 return 0;
791
792 if (gw->ptr == gw->buf) {
793 int n = gw->buf_bytes - bytesdone;
794 if (n > 0) {
795 memmove(gw->buf, gw->buf + bytesdone, n);
796 gw->buf_bytes = n;
797 } else
798 gw->buf_bytes = 0;
799 } else
800 _gcm_sg_unmap_and_advance(gw, bytesdone);
801
802 return bytesdone;
803}
804
805static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
806{
807 int i, n;
808
809 if (gw->ptr == NULL)
810 return 0;
811
812 if (gw->ptr == gw->buf) {
813 for (i = 0; i < bytesdone; i += n) {
814 if (!_gcm_sg_clamp_and_map(gw))
815 return i;
816 n = min(gw->walk_bytes, bytesdone - i);
817 memcpy(gw->walk_ptr, gw->buf + i, n);
818 _gcm_sg_unmap_and_advance(gw, n);
819 }
820 } else
821 _gcm_sg_unmap_and_advance(gw, bytesdone);
822
823 return bytesdone;
824}
825
826static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
827{
828 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
829 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
830 unsigned int ivsize = crypto_aead_ivsize(tfm);
831 unsigned int taglen = crypto_aead_authsize(tfm);
832 unsigned int aadlen = req->assoclen;
833 unsigned int pclen = req->cryptlen;
834 int ret = 0;
835
836 unsigned int n, len, in_bytes, out_bytes,
837 min_bytes, bytes, aad_bytes, pc_bytes;
838 struct gcm_sg_walk gw_in, gw_out;
839 u8 tag[GHASH_DIGEST_SIZE];
840
841 struct {
842 u32 _[3]; /* reserved */
843 u32 cv; /* Counter Value */
844 u8 t[GHASH_DIGEST_SIZE];/* Tag */
845 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
846 u64 taadl; /* Total AAD Length */
847 u64 tpcl; /* Total Plain-/Cipher-text Length */
848 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
849 u8 k[AES_MAX_KEY_SIZE]; /* Key */
850 } param;
851
852 /*
853 * encrypt
854 * req->src: aad||plaintext
855 * req->dst: aad||ciphertext||tag
856 * decrypt
857 * req->src: aad||ciphertext||tag
858 * req->dst: aad||plaintext, return 0 or -EBADMSG
859 * aad, plaintext and ciphertext may be empty.
860 */
861 if (flags & CPACF_DECRYPT)
862 pclen -= taglen;
863 len = aadlen + pclen;
864
865 memset(¶m, 0, sizeof(param));
866 param.cv = 1;
867 param.taadl = aadlen * 8;
868 param.tpcl = pclen * 8;
869 memcpy(param.j0, req->iv, ivsize);
870 *(u32 *)(param.j0 + ivsize) = 1;
871 memcpy(param.k, ctx->key, ctx->key_len);
872
873 gcm_walk_start(&gw_in, req->src, len);
874 gcm_walk_start(&gw_out, req->dst, len);
875
876 do {
877 min_bytes = min_t(unsigned int,
878 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
879 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
880 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
881 bytes = min(in_bytes, out_bytes);
882
883 if (aadlen + pclen <= bytes) {
884 aad_bytes = aadlen;
885 pc_bytes = pclen;
886 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
887 } else {
888 if (aadlen <= bytes) {
889 aad_bytes = aadlen;
890 pc_bytes = (bytes - aadlen) &
891 ~(AES_BLOCK_SIZE - 1);
892 flags |= CPACF_KMA_LAAD;
893 } else {
894 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
895 pc_bytes = 0;
896 }
897 }
898
899 if (aad_bytes > 0)
900 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
901
902 cpacf_kma(ctx->fc | flags, ¶m,
903 gw_out.ptr + aad_bytes,
904 gw_in.ptr + aad_bytes, pc_bytes,
905 gw_in.ptr, aad_bytes);
906
907 n = aad_bytes + pc_bytes;
908 if (gcm_in_walk_done(&gw_in, n) != n)
909 return -ENOMEM;
910 if (gcm_out_walk_done(&gw_out, n) != n)
911 return -ENOMEM;
912 aadlen -= aad_bytes;
913 pclen -= pc_bytes;
914 } while (aadlen + pclen > 0);
915
916 if (flags & CPACF_DECRYPT) {
917 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
918 if (crypto_memneq(tag, param.t, taglen))
919 ret = -EBADMSG;
920 } else
921 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
922
923 memzero_explicit(¶m, sizeof(param));
924 return ret;
925}
926
927static int gcm_aes_encrypt(struct aead_request *req)
928{
929 return gcm_aes_crypt(req, CPACF_ENCRYPT);
930}
931
932static int gcm_aes_decrypt(struct aead_request *req)
933{
934 return gcm_aes_crypt(req, CPACF_DECRYPT);
935}
936
937static struct aead_alg gcm_aes_aead = {
938 .setkey = gcm_aes_setkey,
939 .setauthsize = gcm_aes_setauthsize,
940 .encrypt = gcm_aes_encrypt,
941 .decrypt = gcm_aes_decrypt,
942
943 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
944 .maxauthsize = GHASH_DIGEST_SIZE,
945 .chunksize = AES_BLOCK_SIZE,
946
947 .base = {
948 .cra_blocksize = 1,
949 .cra_ctxsize = sizeof(struct s390_aes_ctx),
950 .cra_priority = 900,
951 .cra_name = "gcm(aes)",
952 .cra_driver_name = "gcm-aes-s390",
953 .cra_module = THIS_MODULE,
954 },
955};
956
957static struct crypto_alg *aes_s390_alg;
958static struct skcipher_alg *aes_s390_skcipher_algs[4];
959static int aes_s390_skciphers_num;
960static struct aead_alg *aes_s390_aead_alg;
961
962static int aes_s390_register_skcipher(struct skcipher_alg *alg)
963{
964 int ret;
965
966 ret = crypto_register_skcipher(alg);
967 if (!ret)
968 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
969 return ret;
970}
971
972static void aes_s390_fini(void)
973{
974 if (aes_s390_alg)
975 crypto_unregister_alg(aes_s390_alg);
976 while (aes_s390_skciphers_num--)
977 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
978 if (ctrblk)
979 free_page((unsigned long) ctrblk);
980
981 if (aes_s390_aead_alg)
982 crypto_unregister_aead(aes_s390_aead_alg);
983}
984
985static int __init aes_s390_init(void)
986{
987 int ret;
988
989 /* Query available functions for KM, KMC, KMCTR and KMA */
990 cpacf_query(CPACF_KM, &km_functions);
991 cpacf_query(CPACF_KMC, &kmc_functions);
992 cpacf_query(CPACF_KMCTR, &kmctr_functions);
993 cpacf_query(CPACF_KMA, &kma_functions);
994
995 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
996 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
997 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
998 ret = crypto_register_alg(&aes_alg);
999 if (ret)
1000 goto out_err;
1001 aes_s390_alg = &aes_alg;
1002 ret = aes_s390_register_skcipher(&ecb_aes_alg);
1003 if (ret)
1004 goto out_err;
1005 }
1006
1007 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1008 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1009 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1010 ret = aes_s390_register_skcipher(&cbc_aes_alg);
1011 if (ret)
1012 goto out_err;
1013 }
1014
1015 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1016 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1017 ret = aes_s390_register_skcipher(&xts_aes_alg);
1018 if (ret)
1019 goto out_err;
1020 }
1021
1022 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1023 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1024 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1025 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1026 if (!ctrblk) {
1027 ret = -ENOMEM;
1028 goto out_err;
1029 }
1030 ret = aes_s390_register_skcipher(&ctr_aes_alg);
1031 if (ret)
1032 goto out_err;
1033 }
1034
1035 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1036 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1037 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1038 ret = crypto_register_aead(&gcm_aes_aead);
1039 if (ret)
1040 goto out_err;
1041 aes_s390_aead_alg = &gcm_aes_aead;
1042 }
1043
1044 return 0;
1045out_err:
1046 aes_s390_fini();
1047 return ret;
1048}
1049
1050module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1051module_exit(aes_s390_fini);
1052
1053MODULE_ALIAS_CRYPTO("aes-all");
1054
1055MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1056MODULE_LICENSE("GPL");
1057MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the AES Cipher Algorithm.
5 *
6 * s390 Version:
7 * Copyright IBM Corp. 2005, 2007
8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10 *
11 * Derived from "crypto/aes_generic.c"
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19
20#define KMSG_COMPONENT "aes_s390"
21#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <linux/err.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/spinlock.h>
29#include "crypt_s390.h"
30
31#define AES_KEYLEN_128 1
32#define AES_KEYLEN_192 2
33#define AES_KEYLEN_256 4
34
35static u8 *ctrblk;
36static DEFINE_SPINLOCK(ctrblk_lock);
37static char keylen_flag;
38
39struct s390_aes_ctx {
40 u8 key[AES_MAX_KEY_SIZE];
41 long enc;
42 long dec;
43 int key_len;
44 union {
45 struct crypto_blkcipher *blk;
46 struct crypto_cipher *cip;
47 } fallback;
48};
49
50struct pcc_param {
51 u8 key[32];
52 u8 tweak[16];
53 u8 block[16];
54 u8 bit[16];
55 u8 xts[16];
56};
57
58struct s390_xts_ctx {
59 u8 key[32];
60 u8 pcc_key[32];
61 long enc;
62 long dec;
63 int key_len;
64 struct crypto_blkcipher *fallback;
65};
66
67/*
68 * Check if the key_len is supported by the HW.
69 * Returns 0 if it is, a positive number if it is not and software fallback is
70 * required or a negative number in case the key size is not valid
71 */
72static int need_fallback(unsigned int key_len)
73{
74 switch (key_len) {
75 case 16:
76 if (!(keylen_flag & AES_KEYLEN_128))
77 return 1;
78 break;
79 case 24:
80 if (!(keylen_flag & AES_KEYLEN_192))
81 return 1;
82 break;
83 case 32:
84 if (!(keylen_flag & AES_KEYLEN_256))
85 return 1;
86 break;
87 default:
88 return -1;
89 break;
90 }
91 return 0;
92}
93
94static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
95 unsigned int key_len)
96{
97 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
98 int ret;
99
100 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
101 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
102 CRYPTO_TFM_REQ_MASK);
103
104 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
105 if (ret) {
106 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
107 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
108 CRYPTO_TFM_RES_MASK);
109 }
110 return ret;
111}
112
113static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
114 unsigned int key_len)
115{
116 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
117 u32 *flags = &tfm->crt_flags;
118 int ret;
119
120 ret = need_fallback(key_len);
121 if (ret < 0) {
122 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
123 return -EINVAL;
124 }
125
126 sctx->key_len = key_len;
127 if (!ret) {
128 memcpy(sctx->key, in_key, key_len);
129 return 0;
130 }
131
132 return setkey_fallback_cip(tfm, in_key, key_len);
133}
134
135static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
136{
137 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
138
139 if (unlikely(need_fallback(sctx->key_len))) {
140 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
141 return;
142 }
143
144 switch (sctx->key_len) {
145 case 16:
146 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
147 AES_BLOCK_SIZE);
148 break;
149 case 24:
150 crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
151 AES_BLOCK_SIZE);
152 break;
153 case 32:
154 crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
155 AES_BLOCK_SIZE);
156 break;
157 }
158}
159
160static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
161{
162 const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
163
164 if (unlikely(need_fallback(sctx->key_len))) {
165 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
166 return;
167 }
168
169 switch (sctx->key_len) {
170 case 16:
171 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
172 AES_BLOCK_SIZE);
173 break;
174 case 24:
175 crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
176 AES_BLOCK_SIZE);
177 break;
178 case 32:
179 crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
180 AES_BLOCK_SIZE);
181 break;
182 }
183}
184
185static int fallback_init_cip(struct crypto_tfm *tfm)
186{
187 const char *name = tfm->__crt_alg->cra_name;
188 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
189
190 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
191 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
192
193 if (IS_ERR(sctx->fallback.cip)) {
194 pr_err("Allocating AES fallback algorithm %s failed\n",
195 name);
196 return PTR_ERR(sctx->fallback.cip);
197 }
198
199 return 0;
200}
201
202static void fallback_exit_cip(struct crypto_tfm *tfm)
203{
204 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
205
206 crypto_free_cipher(sctx->fallback.cip);
207 sctx->fallback.cip = NULL;
208}
209
210static struct crypto_alg aes_alg = {
211 .cra_name = "aes",
212 .cra_driver_name = "aes-s390",
213 .cra_priority = CRYPT_S390_PRIORITY,
214 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
215 CRYPTO_ALG_NEED_FALLBACK,
216 .cra_blocksize = AES_BLOCK_SIZE,
217 .cra_ctxsize = sizeof(struct s390_aes_ctx),
218 .cra_module = THIS_MODULE,
219 .cra_init = fallback_init_cip,
220 .cra_exit = fallback_exit_cip,
221 .cra_u = {
222 .cipher = {
223 .cia_min_keysize = AES_MIN_KEY_SIZE,
224 .cia_max_keysize = AES_MAX_KEY_SIZE,
225 .cia_setkey = aes_set_key,
226 .cia_encrypt = aes_encrypt,
227 .cia_decrypt = aes_decrypt,
228 }
229 }
230};
231
232static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
233 unsigned int len)
234{
235 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
236 unsigned int ret;
237
238 sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
239 sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
240 CRYPTO_TFM_REQ_MASK);
241
242 ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
243 if (ret) {
244 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
245 tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
246 CRYPTO_TFM_RES_MASK);
247 }
248 return ret;
249}
250
251static int fallback_blk_dec(struct blkcipher_desc *desc,
252 struct scatterlist *dst, struct scatterlist *src,
253 unsigned int nbytes)
254{
255 unsigned int ret;
256 struct crypto_blkcipher *tfm;
257 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
258
259 tfm = desc->tfm;
260 desc->tfm = sctx->fallback.blk;
261
262 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
263
264 desc->tfm = tfm;
265 return ret;
266}
267
268static int fallback_blk_enc(struct blkcipher_desc *desc,
269 struct scatterlist *dst, struct scatterlist *src,
270 unsigned int nbytes)
271{
272 unsigned int ret;
273 struct crypto_blkcipher *tfm;
274 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
275
276 tfm = desc->tfm;
277 desc->tfm = sctx->fallback.blk;
278
279 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
280
281 desc->tfm = tfm;
282 return ret;
283}
284
285static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
286 unsigned int key_len)
287{
288 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
289 int ret;
290
291 ret = need_fallback(key_len);
292 if (ret > 0) {
293 sctx->key_len = key_len;
294 return setkey_fallback_blk(tfm, in_key, key_len);
295 }
296
297 switch (key_len) {
298 case 16:
299 sctx->enc = KM_AES_128_ENCRYPT;
300 sctx->dec = KM_AES_128_DECRYPT;
301 break;
302 case 24:
303 sctx->enc = KM_AES_192_ENCRYPT;
304 sctx->dec = KM_AES_192_DECRYPT;
305 break;
306 case 32:
307 sctx->enc = KM_AES_256_ENCRYPT;
308 sctx->dec = KM_AES_256_DECRYPT;
309 break;
310 }
311
312 return aes_set_key(tfm, in_key, key_len);
313}
314
315static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
316 struct blkcipher_walk *walk)
317{
318 int ret = blkcipher_walk_virt(desc, walk);
319 unsigned int nbytes;
320
321 while ((nbytes = walk->nbytes)) {
322 /* only use complete blocks */
323 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
324 u8 *out = walk->dst.virt.addr;
325 u8 *in = walk->src.virt.addr;
326
327 ret = crypt_s390_km(func, param, out, in, n);
328 if (ret < 0 || ret != n)
329 return -EIO;
330
331 nbytes &= AES_BLOCK_SIZE - 1;
332 ret = blkcipher_walk_done(desc, walk, nbytes);
333 }
334
335 return ret;
336}
337
338static int ecb_aes_encrypt(struct blkcipher_desc *desc,
339 struct scatterlist *dst, struct scatterlist *src,
340 unsigned int nbytes)
341{
342 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
343 struct blkcipher_walk walk;
344
345 if (unlikely(need_fallback(sctx->key_len)))
346 return fallback_blk_enc(desc, dst, src, nbytes);
347
348 blkcipher_walk_init(&walk, dst, src, nbytes);
349 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
350}
351
352static int ecb_aes_decrypt(struct blkcipher_desc *desc,
353 struct scatterlist *dst, struct scatterlist *src,
354 unsigned int nbytes)
355{
356 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
357 struct blkcipher_walk walk;
358
359 if (unlikely(need_fallback(sctx->key_len)))
360 return fallback_blk_dec(desc, dst, src, nbytes);
361
362 blkcipher_walk_init(&walk, dst, src, nbytes);
363 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
364}
365
366static int fallback_init_blk(struct crypto_tfm *tfm)
367{
368 const char *name = tfm->__crt_alg->cra_name;
369 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
370
371 sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
372 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
373
374 if (IS_ERR(sctx->fallback.blk)) {
375 pr_err("Allocating AES fallback algorithm %s failed\n",
376 name);
377 return PTR_ERR(sctx->fallback.blk);
378 }
379
380 return 0;
381}
382
383static void fallback_exit_blk(struct crypto_tfm *tfm)
384{
385 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
386
387 crypto_free_blkcipher(sctx->fallback.blk);
388 sctx->fallback.blk = NULL;
389}
390
391static struct crypto_alg ecb_aes_alg = {
392 .cra_name = "ecb(aes)",
393 .cra_driver_name = "ecb-aes-s390",
394 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
395 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
396 CRYPTO_ALG_NEED_FALLBACK,
397 .cra_blocksize = AES_BLOCK_SIZE,
398 .cra_ctxsize = sizeof(struct s390_aes_ctx),
399 .cra_type = &crypto_blkcipher_type,
400 .cra_module = THIS_MODULE,
401 .cra_init = fallback_init_blk,
402 .cra_exit = fallback_exit_blk,
403 .cra_u = {
404 .blkcipher = {
405 .min_keysize = AES_MIN_KEY_SIZE,
406 .max_keysize = AES_MAX_KEY_SIZE,
407 .setkey = ecb_aes_set_key,
408 .encrypt = ecb_aes_encrypt,
409 .decrypt = ecb_aes_decrypt,
410 }
411 }
412};
413
414static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
415 unsigned int key_len)
416{
417 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
418 int ret;
419
420 ret = need_fallback(key_len);
421 if (ret > 0) {
422 sctx->key_len = key_len;
423 return setkey_fallback_blk(tfm, in_key, key_len);
424 }
425
426 switch (key_len) {
427 case 16:
428 sctx->enc = KMC_AES_128_ENCRYPT;
429 sctx->dec = KMC_AES_128_DECRYPT;
430 break;
431 case 24:
432 sctx->enc = KMC_AES_192_ENCRYPT;
433 sctx->dec = KMC_AES_192_DECRYPT;
434 break;
435 case 32:
436 sctx->enc = KMC_AES_256_ENCRYPT;
437 sctx->dec = KMC_AES_256_DECRYPT;
438 break;
439 }
440
441 return aes_set_key(tfm, in_key, key_len);
442}
443
444static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
445 struct blkcipher_walk *walk)
446{
447 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
448 int ret = blkcipher_walk_virt(desc, walk);
449 unsigned int nbytes = walk->nbytes;
450 struct {
451 u8 iv[AES_BLOCK_SIZE];
452 u8 key[AES_MAX_KEY_SIZE];
453 } param;
454
455 if (!nbytes)
456 goto out;
457
458 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
459 memcpy(param.key, sctx->key, sctx->key_len);
460 do {
461 /* only use complete blocks */
462 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
463 u8 *out = walk->dst.virt.addr;
464 u8 *in = walk->src.virt.addr;
465
466 ret = crypt_s390_kmc(func, ¶m, out, in, n);
467 if (ret < 0 || ret != n)
468 return -EIO;
469
470 nbytes &= AES_BLOCK_SIZE - 1;
471 ret = blkcipher_walk_done(desc, walk, nbytes);
472 } while ((nbytes = walk->nbytes));
473 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
474
475out:
476 return ret;
477}
478
479static int cbc_aes_encrypt(struct blkcipher_desc *desc,
480 struct scatterlist *dst, struct scatterlist *src,
481 unsigned int nbytes)
482{
483 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
484 struct blkcipher_walk walk;
485
486 if (unlikely(need_fallback(sctx->key_len)))
487 return fallback_blk_enc(desc, dst, src, nbytes);
488
489 blkcipher_walk_init(&walk, dst, src, nbytes);
490 return cbc_aes_crypt(desc, sctx->enc, &walk);
491}
492
493static int cbc_aes_decrypt(struct blkcipher_desc *desc,
494 struct scatterlist *dst, struct scatterlist *src,
495 unsigned int nbytes)
496{
497 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
498 struct blkcipher_walk walk;
499
500 if (unlikely(need_fallback(sctx->key_len)))
501 return fallback_blk_dec(desc, dst, src, nbytes);
502
503 blkcipher_walk_init(&walk, dst, src, nbytes);
504 return cbc_aes_crypt(desc, sctx->dec, &walk);
505}
506
507static struct crypto_alg cbc_aes_alg = {
508 .cra_name = "cbc(aes)",
509 .cra_driver_name = "cbc-aes-s390",
510 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
511 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
512 CRYPTO_ALG_NEED_FALLBACK,
513 .cra_blocksize = AES_BLOCK_SIZE,
514 .cra_ctxsize = sizeof(struct s390_aes_ctx),
515 .cra_type = &crypto_blkcipher_type,
516 .cra_module = THIS_MODULE,
517 .cra_init = fallback_init_blk,
518 .cra_exit = fallback_exit_blk,
519 .cra_u = {
520 .blkcipher = {
521 .min_keysize = AES_MIN_KEY_SIZE,
522 .max_keysize = AES_MAX_KEY_SIZE,
523 .ivsize = AES_BLOCK_SIZE,
524 .setkey = cbc_aes_set_key,
525 .encrypt = cbc_aes_encrypt,
526 .decrypt = cbc_aes_decrypt,
527 }
528 }
529};
530
531static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
532 unsigned int len)
533{
534 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
535 unsigned int ret;
536
537 xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
538 xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
539 CRYPTO_TFM_REQ_MASK);
540
541 ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
542 if (ret) {
543 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
544 tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
545 CRYPTO_TFM_RES_MASK);
546 }
547 return ret;
548}
549
550static int xts_fallback_decrypt(struct blkcipher_desc *desc,
551 struct scatterlist *dst, struct scatterlist *src,
552 unsigned int nbytes)
553{
554 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
555 struct crypto_blkcipher *tfm;
556 unsigned int ret;
557
558 tfm = desc->tfm;
559 desc->tfm = xts_ctx->fallback;
560
561 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
562
563 desc->tfm = tfm;
564 return ret;
565}
566
567static int xts_fallback_encrypt(struct blkcipher_desc *desc,
568 struct scatterlist *dst, struct scatterlist *src,
569 unsigned int nbytes)
570{
571 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
572 struct crypto_blkcipher *tfm;
573 unsigned int ret;
574
575 tfm = desc->tfm;
576 desc->tfm = xts_ctx->fallback;
577
578 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
579
580 desc->tfm = tfm;
581 return ret;
582}
583
584static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
585 unsigned int key_len)
586{
587 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
588 u32 *flags = &tfm->crt_flags;
589
590 switch (key_len) {
591 case 32:
592 xts_ctx->enc = KM_XTS_128_ENCRYPT;
593 xts_ctx->dec = KM_XTS_128_DECRYPT;
594 memcpy(xts_ctx->key + 16, in_key, 16);
595 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
596 break;
597 case 48:
598 xts_ctx->enc = 0;
599 xts_ctx->dec = 0;
600 xts_fallback_setkey(tfm, in_key, key_len);
601 break;
602 case 64:
603 xts_ctx->enc = KM_XTS_256_ENCRYPT;
604 xts_ctx->dec = KM_XTS_256_DECRYPT;
605 memcpy(xts_ctx->key, in_key, 32);
606 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
607 break;
608 default:
609 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
610 return -EINVAL;
611 }
612 xts_ctx->key_len = key_len;
613 return 0;
614}
615
616static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
617 struct s390_xts_ctx *xts_ctx,
618 struct blkcipher_walk *walk)
619{
620 unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
621 int ret = blkcipher_walk_virt(desc, walk);
622 unsigned int nbytes = walk->nbytes;
623 unsigned int n;
624 u8 *in, *out;
625 struct pcc_param pcc_param;
626 struct {
627 u8 key[32];
628 u8 init[16];
629 } xts_param;
630
631 if (!nbytes)
632 goto out;
633
634 memset(pcc_param.block, 0, sizeof(pcc_param.block));
635 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
636 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
637 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
638 memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
639 ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
640 if (ret < 0)
641 return -EIO;
642
643 memcpy(xts_param.key, xts_ctx->key, 32);
644 memcpy(xts_param.init, pcc_param.xts, 16);
645 do {
646 /* only use complete blocks */
647 n = nbytes & ~(AES_BLOCK_SIZE - 1);
648 out = walk->dst.virt.addr;
649 in = walk->src.virt.addr;
650
651 ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
652 if (ret < 0 || ret != n)
653 return -EIO;
654
655 nbytes &= AES_BLOCK_SIZE - 1;
656 ret = blkcipher_walk_done(desc, walk, nbytes);
657 } while ((nbytes = walk->nbytes));
658out:
659 return ret;
660}
661
662static int xts_aes_encrypt(struct blkcipher_desc *desc,
663 struct scatterlist *dst, struct scatterlist *src,
664 unsigned int nbytes)
665{
666 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
667 struct blkcipher_walk walk;
668
669 if (unlikely(xts_ctx->key_len == 48))
670 return xts_fallback_encrypt(desc, dst, src, nbytes);
671
672 blkcipher_walk_init(&walk, dst, src, nbytes);
673 return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
674}
675
676static int xts_aes_decrypt(struct blkcipher_desc *desc,
677 struct scatterlist *dst, struct scatterlist *src,
678 unsigned int nbytes)
679{
680 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
681 struct blkcipher_walk walk;
682
683 if (unlikely(xts_ctx->key_len == 48))
684 return xts_fallback_decrypt(desc, dst, src, nbytes);
685
686 blkcipher_walk_init(&walk, dst, src, nbytes);
687 return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
688}
689
690static int xts_fallback_init(struct crypto_tfm *tfm)
691{
692 const char *name = tfm->__crt_alg->cra_name;
693 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
694
695 xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
696 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
697
698 if (IS_ERR(xts_ctx->fallback)) {
699 pr_err("Allocating XTS fallback algorithm %s failed\n",
700 name);
701 return PTR_ERR(xts_ctx->fallback);
702 }
703 return 0;
704}
705
706static void xts_fallback_exit(struct crypto_tfm *tfm)
707{
708 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
709
710 crypto_free_blkcipher(xts_ctx->fallback);
711 xts_ctx->fallback = NULL;
712}
713
714static struct crypto_alg xts_aes_alg = {
715 .cra_name = "xts(aes)",
716 .cra_driver_name = "xts-aes-s390",
717 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
718 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
719 CRYPTO_ALG_NEED_FALLBACK,
720 .cra_blocksize = AES_BLOCK_SIZE,
721 .cra_ctxsize = sizeof(struct s390_xts_ctx),
722 .cra_type = &crypto_blkcipher_type,
723 .cra_module = THIS_MODULE,
724 .cra_init = xts_fallback_init,
725 .cra_exit = xts_fallback_exit,
726 .cra_u = {
727 .blkcipher = {
728 .min_keysize = 2 * AES_MIN_KEY_SIZE,
729 .max_keysize = 2 * AES_MAX_KEY_SIZE,
730 .ivsize = AES_BLOCK_SIZE,
731 .setkey = xts_aes_set_key,
732 .encrypt = xts_aes_encrypt,
733 .decrypt = xts_aes_decrypt,
734 }
735 }
736};
737
738static int xts_aes_alg_reg;
739
740static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
741 unsigned int key_len)
742{
743 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
744
745 switch (key_len) {
746 case 16:
747 sctx->enc = KMCTR_AES_128_ENCRYPT;
748 sctx->dec = KMCTR_AES_128_DECRYPT;
749 break;
750 case 24:
751 sctx->enc = KMCTR_AES_192_ENCRYPT;
752 sctx->dec = KMCTR_AES_192_DECRYPT;
753 break;
754 case 32:
755 sctx->enc = KMCTR_AES_256_ENCRYPT;
756 sctx->dec = KMCTR_AES_256_DECRYPT;
757 break;
758 }
759
760 return aes_set_key(tfm, in_key, key_len);
761}
762
763static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
764{
765 unsigned int i, n;
766
767 /* only use complete blocks, max. PAGE_SIZE */
768 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
769 for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
770 memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
771 AES_BLOCK_SIZE);
772 crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
773 }
774 return n;
775}
776
777static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
778 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
779{
780 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
781 unsigned int n, nbytes;
782 u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
783 u8 *out, *in, *ctrptr = ctrbuf;
784
785 if (!walk->nbytes)
786 return ret;
787
788 if (spin_trylock(&ctrblk_lock))
789 ctrptr = ctrblk;
790
791 memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
792 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
793 out = walk->dst.virt.addr;
794 in = walk->src.virt.addr;
795 while (nbytes >= AES_BLOCK_SIZE) {
796 if (ctrptr == ctrblk)
797 n = __ctrblk_init(ctrptr, nbytes);
798 else
799 n = AES_BLOCK_SIZE;
800 ret = crypt_s390_kmctr(func, sctx->key, out, in,
801 n, ctrptr);
802 if (ret < 0 || ret != n) {
803 if (ctrptr == ctrblk)
804 spin_unlock(&ctrblk_lock);
805 return -EIO;
806 }
807 if (n > AES_BLOCK_SIZE)
808 memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
809 AES_BLOCK_SIZE);
810 crypto_inc(ctrptr, AES_BLOCK_SIZE);
811 out += n;
812 in += n;
813 nbytes -= n;
814 }
815 ret = blkcipher_walk_done(desc, walk, nbytes);
816 }
817 if (ctrptr == ctrblk) {
818 if (nbytes)
819 memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
820 else
821 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
822 spin_unlock(&ctrblk_lock);
823 } else {
824 if (!nbytes)
825 memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
826 }
827 /*
828 * final block may be < AES_BLOCK_SIZE, copy only nbytes
829 */
830 if (nbytes) {
831 out = walk->dst.virt.addr;
832 in = walk->src.virt.addr;
833 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
834 AES_BLOCK_SIZE, ctrbuf);
835 if (ret < 0 || ret != AES_BLOCK_SIZE)
836 return -EIO;
837 memcpy(out, buf, nbytes);
838 crypto_inc(ctrbuf, AES_BLOCK_SIZE);
839 ret = blkcipher_walk_done(desc, walk, 0);
840 memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
841 }
842
843 return ret;
844}
845
846static int ctr_aes_encrypt(struct blkcipher_desc *desc,
847 struct scatterlist *dst, struct scatterlist *src,
848 unsigned int nbytes)
849{
850 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
851 struct blkcipher_walk walk;
852
853 blkcipher_walk_init(&walk, dst, src, nbytes);
854 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
855}
856
857static int ctr_aes_decrypt(struct blkcipher_desc *desc,
858 struct scatterlist *dst, struct scatterlist *src,
859 unsigned int nbytes)
860{
861 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
862 struct blkcipher_walk walk;
863
864 blkcipher_walk_init(&walk, dst, src, nbytes);
865 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
866}
867
868static struct crypto_alg ctr_aes_alg = {
869 .cra_name = "ctr(aes)",
870 .cra_driver_name = "ctr-aes-s390",
871 .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
872 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
873 .cra_blocksize = 1,
874 .cra_ctxsize = sizeof(struct s390_aes_ctx),
875 .cra_type = &crypto_blkcipher_type,
876 .cra_module = THIS_MODULE,
877 .cra_u = {
878 .blkcipher = {
879 .min_keysize = AES_MIN_KEY_SIZE,
880 .max_keysize = AES_MAX_KEY_SIZE,
881 .ivsize = AES_BLOCK_SIZE,
882 .setkey = ctr_aes_set_key,
883 .encrypt = ctr_aes_encrypt,
884 .decrypt = ctr_aes_decrypt,
885 }
886 }
887};
888
889static int ctr_aes_alg_reg;
890
891static int __init aes_s390_init(void)
892{
893 int ret;
894
895 if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
896 keylen_flag |= AES_KEYLEN_128;
897 if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
898 keylen_flag |= AES_KEYLEN_192;
899 if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
900 keylen_flag |= AES_KEYLEN_256;
901
902 if (!keylen_flag)
903 return -EOPNOTSUPP;
904
905 /* z9 109 and z9 BC/EC only support 128 bit key length */
906 if (keylen_flag == AES_KEYLEN_128)
907 pr_info("AES hardware acceleration is only available for"
908 " 128-bit keys\n");
909
910 ret = crypto_register_alg(&aes_alg);
911 if (ret)
912 goto aes_err;
913
914 ret = crypto_register_alg(&ecb_aes_alg);
915 if (ret)
916 goto ecb_aes_err;
917
918 ret = crypto_register_alg(&cbc_aes_alg);
919 if (ret)
920 goto cbc_aes_err;
921
922 if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
923 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
924 crypt_s390_func_available(KM_XTS_256_ENCRYPT,
925 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
926 ret = crypto_register_alg(&xts_aes_alg);
927 if (ret)
928 goto xts_aes_err;
929 xts_aes_alg_reg = 1;
930 }
931
932 if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
933 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
934 crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
935 CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
936 crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
937 CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
938 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
939 if (!ctrblk) {
940 ret = -ENOMEM;
941 goto ctr_aes_err;
942 }
943 ret = crypto_register_alg(&ctr_aes_alg);
944 if (ret) {
945 free_page((unsigned long) ctrblk);
946 goto ctr_aes_err;
947 }
948 ctr_aes_alg_reg = 1;
949 }
950
951out:
952 return ret;
953
954ctr_aes_err:
955 crypto_unregister_alg(&xts_aes_alg);
956xts_aes_err:
957 crypto_unregister_alg(&cbc_aes_alg);
958cbc_aes_err:
959 crypto_unregister_alg(&ecb_aes_alg);
960ecb_aes_err:
961 crypto_unregister_alg(&aes_alg);
962aes_err:
963 goto out;
964}
965
966static void __exit aes_s390_fini(void)
967{
968 if (ctr_aes_alg_reg) {
969 crypto_unregister_alg(&ctr_aes_alg);
970 free_page((unsigned long) ctrblk);
971 }
972 if (xts_aes_alg_reg)
973 crypto_unregister_alg(&xts_aes_alg);
974 crypto_unregister_alg(&cbc_aes_alg);
975 crypto_unregister_alg(&ecb_aes_alg);
976 crypto_unregister_alg(&aes_alg);
977}
978
979module_init(aes_s390_init);
980module_exit(aes_s390_fini);
981
982MODULE_ALIAS("aes-all");
983
984MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
985MODULE_LICENSE("GPL");