Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel IXP4xx NPE-C crypto driver
4 *
5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 */
7
8#include <linux/platform_device.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/crypto.h>
12#include <linux/kernel.h>
13#include <linux/rtnetlink.h>
14#include <linux/interrupt.h>
15#include <linux/spinlock.h>
16#include <linux/gfp.h>
17#include <linux/module.h>
18#include <linux/of.h>
19
20#include <crypto/ctr.h>
21#include <crypto/internal/des.h>
22#include <crypto/aes.h>
23#include <crypto/hmac.h>
24#include <crypto/sha1.h>
25#include <crypto/algapi.h>
26#include <crypto/internal/aead.h>
27#include <crypto/internal/skcipher.h>
28#include <crypto/authenc.h>
29#include <crypto/scatterwalk.h>
30
31#include <linux/soc/ixp4xx/npe.h>
32#include <linux/soc/ixp4xx/qmgr.h>
33
34/* Intermittent includes, delete this after v5.14-rc1 */
35#include <linux/soc/ixp4xx/cpu.h>
36
37#define MAX_KEYLEN 32
38
39/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
40#define NPE_CTX_LEN 80
41#define AES_BLOCK128 16
42
43#define NPE_OP_HASH_VERIFY 0x01
44#define NPE_OP_CCM_ENABLE 0x04
45#define NPE_OP_CRYPT_ENABLE 0x08
46#define NPE_OP_HASH_ENABLE 0x10
47#define NPE_OP_NOT_IN_PLACE 0x20
48#define NPE_OP_HMAC_DISABLE 0x40
49#define NPE_OP_CRYPT_ENCRYPT 0x80
50
51#define NPE_OP_CCM_GEN_MIC 0xcc
52#define NPE_OP_HASH_GEN_ICV 0x50
53#define NPE_OP_ENC_GEN_KEY 0xc9
54
55#define MOD_ECB 0x0000
56#define MOD_CTR 0x1000
57#define MOD_CBC_ENC 0x2000
58#define MOD_CBC_DEC 0x3000
59#define MOD_CCM_ENC 0x4000
60#define MOD_CCM_DEC 0x5000
61
62#define KEYLEN_128 4
63#define KEYLEN_192 6
64#define KEYLEN_256 8
65
66#define CIPH_DECR 0x0000
67#define CIPH_ENCR 0x0400
68
69#define MOD_DES 0x0000
70#define MOD_TDEA2 0x0100
71#define MOD_3DES 0x0200
72#define MOD_AES 0x0800
73#define MOD_AES128 (0x0800 | KEYLEN_128)
74#define MOD_AES192 (0x0900 | KEYLEN_192)
75#define MOD_AES256 (0x0a00 | KEYLEN_256)
76
77#define MAX_IVLEN 16
78#define NPE_QLEN 16
79/* Space for registering when the first
80 * NPE_QLEN crypt_ctl are busy */
81#define NPE_QLEN_TOTAL 64
82
83#define CTL_FLAG_UNUSED 0x0000
84#define CTL_FLAG_USED 0x1000
85#define CTL_FLAG_PERFORM_ABLK 0x0001
86#define CTL_FLAG_GEN_ICV 0x0002
87#define CTL_FLAG_GEN_REVAES 0x0004
88#define CTL_FLAG_PERFORM_AEAD 0x0008
89#define CTL_FLAG_MASK 0x000f
90
91#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
92
93#define MD5_DIGEST_SIZE 16
94
95struct buffer_desc {
96 u32 phys_next;
97#ifdef __ARMEB__
98 u16 buf_len;
99 u16 pkt_len;
100#else
101 u16 pkt_len;
102 u16 buf_len;
103#endif
104 dma_addr_t phys_addr;
105 u32 __reserved[4];
106 struct buffer_desc *next;
107 enum dma_data_direction dir;
108};
109
110struct crypt_ctl {
111#ifdef __ARMEB__
112 u8 mode; /* NPE_OP_* operation mode */
113 u8 init_len;
114 u16 reserved;
115#else
116 u16 reserved;
117 u8 init_len;
118 u8 mode; /* NPE_OP_* operation mode */
119#endif
120 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
121 u32 icv_rev_aes; /* icv or rev aes */
122 u32 src_buf;
123 u32 dst_buf;
124#ifdef __ARMEB__
125 u16 auth_offs; /* Authentication start offset */
126 u16 auth_len; /* Authentication data length */
127 u16 crypt_offs; /* Cryption start offset */
128 u16 crypt_len; /* Cryption data length */
129#else
130 u16 auth_len; /* Authentication data length */
131 u16 auth_offs; /* Authentication start offset */
132 u16 crypt_len; /* Cryption data length */
133 u16 crypt_offs; /* Cryption start offset */
134#endif
135 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
136 u32 crypto_ctx; /* NPE Crypto Param structure address */
137
138 /* Used by Host: 4*4 bytes*/
139 unsigned int ctl_flags;
140 union {
141 struct skcipher_request *ablk_req;
142 struct aead_request *aead_req;
143 struct crypto_tfm *tfm;
144 } data;
145 struct buffer_desc *regist_buf;
146 u8 *regist_ptr;
147};
148
149struct ablk_ctx {
150 struct buffer_desc *src;
151 struct buffer_desc *dst;
152 u8 iv[MAX_IVLEN];
153 bool encrypt;
154 struct skcipher_request fallback_req; // keep at the end
155};
156
157struct aead_ctx {
158 struct buffer_desc *src;
159 struct buffer_desc *dst;
160 struct scatterlist ivlist;
161 /* used when the hmac is not on one sg entry */
162 u8 *hmac_virt;
163 int encrypt;
164};
165
166struct ix_hash_algo {
167 u32 cfgword;
168 unsigned char *icv;
169};
170
171struct ix_sa_dir {
172 unsigned char *npe_ctx;
173 dma_addr_t npe_ctx_phys;
174 int npe_ctx_idx;
175 u8 npe_mode;
176};
177
178struct ixp_ctx {
179 struct ix_sa_dir encrypt;
180 struct ix_sa_dir decrypt;
181 int authkey_len;
182 u8 authkey[MAX_KEYLEN];
183 int enckey_len;
184 u8 enckey[MAX_KEYLEN];
185 u8 salt[MAX_IVLEN];
186 u8 nonce[CTR_RFC3686_NONCE_SIZE];
187 unsigned int salted;
188 atomic_t configuring;
189 struct completion completion;
190 struct crypto_skcipher *fallback_tfm;
191};
192
193struct ixp_alg {
194 struct skcipher_alg crypto;
195 const struct ix_hash_algo *hash;
196 u32 cfg_enc;
197 u32 cfg_dec;
198
199 int registered;
200};
201
202struct ixp_aead_alg {
203 struct aead_alg crypto;
204 const struct ix_hash_algo *hash;
205 u32 cfg_enc;
206 u32 cfg_dec;
207
208 int registered;
209};
210
211static const struct ix_hash_algo hash_alg_md5 = {
212 .cfgword = 0xAA010004,
213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
215};
216
217static const struct ix_hash_algo hash_alg_sha1 = {
218 .cfgword = 0x00000005,
219 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
220 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
221};
222
223static struct npe *npe_c;
224
225static unsigned int send_qid;
226static unsigned int recv_qid;
227static struct dma_pool *buffer_pool;
228static struct dma_pool *ctx_pool;
229
230static struct crypt_ctl *crypt_virt;
231static dma_addr_t crypt_phys;
232
233static int support_aes = 1;
234
235static struct platform_device *pdev;
236
237static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
238{
239 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
240}
241
242static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
243{
244 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
245}
246
247static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
248{
249 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
250}
251
252static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
253{
254 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
255}
256
257static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
258{
259 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
260}
261
262static int setup_crypt_desc(void)
263{
264 struct device *dev = &pdev->dev;
265
266 BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
267 IS_ENABLED(CONFIG_64BIT)) &&
268 sizeof(struct crypt_ctl) != 64);
269 crypt_virt = dma_alloc_coherent(dev,
270 NPE_QLEN * sizeof(struct crypt_ctl),
271 &crypt_phys, GFP_ATOMIC);
272 if (!crypt_virt)
273 return -ENOMEM;
274 return 0;
275}
276
277static DEFINE_SPINLOCK(desc_lock);
278static struct crypt_ctl *get_crypt_desc(void)
279{
280 int i;
281 static int idx;
282 unsigned long flags;
283
284 spin_lock_irqsave(&desc_lock, flags);
285
286 if (unlikely(!crypt_virt))
287 setup_crypt_desc();
288 if (unlikely(!crypt_virt)) {
289 spin_unlock_irqrestore(&desc_lock, flags);
290 return NULL;
291 }
292 i = idx;
293 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
294 if (++idx >= NPE_QLEN)
295 idx = 0;
296 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
297 spin_unlock_irqrestore(&desc_lock, flags);
298 return crypt_virt + i;
299 } else {
300 spin_unlock_irqrestore(&desc_lock, flags);
301 return NULL;
302 }
303}
304
305static DEFINE_SPINLOCK(emerg_lock);
306static struct crypt_ctl *get_crypt_desc_emerg(void)
307{
308 int i;
309 static int idx = NPE_QLEN;
310 struct crypt_ctl *desc;
311 unsigned long flags;
312
313 desc = get_crypt_desc();
314 if (desc)
315 return desc;
316 if (unlikely(!crypt_virt))
317 return NULL;
318
319 spin_lock_irqsave(&emerg_lock, flags);
320 i = idx;
321 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
322 if (++idx >= NPE_QLEN_TOTAL)
323 idx = NPE_QLEN;
324 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
325 spin_unlock_irqrestore(&emerg_lock, flags);
326 return crypt_virt + i;
327 } else {
328 spin_unlock_irqrestore(&emerg_lock, flags);
329 return NULL;
330 }
331}
332
333static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
334 dma_addr_t phys)
335{
336 while (buf) {
337 struct buffer_desc *buf1;
338 u32 phys1;
339
340 buf1 = buf->next;
341 phys1 = buf->phys_next;
342 dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
343 dma_pool_free(buffer_pool, buf, phys);
344 buf = buf1;
345 phys = phys1;
346 }
347}
348
349static struct tasklet_struct crypto_done_tasklet;
350
351static void finish_scattered_hmac(struct crypt_ctl *crypt)
352{
353 struct aead_request *req = crypt->data.aead_req;
354 struct aead_ctx *req_ctx = aead_request_ctx(req);
355 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
356 int authsize = crypto_aead_authsize(tfm);
357 int decryptlen = req->assoclen + req->cryptlen - authsize;
358
359 if (req_ctx->encrypt) {
360 scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
361 decryptlen, authsize, 1);
362 }
363 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
364}
365
366static void one_packet(dma_addr_t phys)
367{
368 struct device *dev = &pdev->dev;
369 struct crypt_ctl *crypt;
370 struct ixp_ctx *ctx;
371 int failed;
372
373 failed = phys & 0x1 ? -EBADMSG : 0;
374 phys &= ~0x3;
375 crypt = crypt_phys2virt(phys);
376
377 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
378 case CTL_FLAG_PERFORM_AEAD: {
379 struct aead_request *req = crypt->data.aead_req;
380 struct aead_ctx *req_ctx = aead_request_ctx(req);
381
382 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
383 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
384 if (req_ctx->hmac_virt)
385 finish_scattered_hmac(crypt);
386
387 aead_request_complete(req, failed);
388 break;
389 }
390 case CTL_FLAG_PERFORM_ABLK: {
391 struct skcipher_request *req = crypt->data.ablk_req;
392 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
393 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
394 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
395 unsigned int offset;
396
397 if (ivsize > 0) {
398 offset = req->cryptlen - ivsize;
399 if (req_ctx->encrypt) {
400 scatterwalk_map_and_copy(req->iv, req->dst,
401 offset, ivsize, 0);
402 } else {
403 memcpy(req->iv, req_ctx->iv, ivsize);
404 memzero_explicit(req_ctx->iv, ivsize);
405 }
406 }
407
408 if (req_ctx->dst)
409 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
410
411 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
412 skcipher_request_complete(req, failed);
413 break;
414 }
415 case CTL_FLAG_GEN_ICV:
416 ctx = crypto_tfm_ctx(crypt->data.tfm);
417 dma_pool_free(ctx_pool, crypt->regist_ptr,
418 crypt->regist_buf->phys_addr);
419 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
420 if (atomic_dec_and_test(&ctx->configuring))
421 complete(&ctx->completion);
422 break;
423 case CTL_FLAG_GEN_REVAES:
424 ctx = crypto_tfm_ctx(crypt->data.tfm);
425 *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
426 if (atomic_dec_and_test(&ctx->configuring))
427 complete(&ctx->completion);
428 break;
429 default:
430 BUG();
431 }
432 crypt->ctl_flags = CTL_FLAG_UNUSED;
433}
434
435static void irqhandler(void *_unused)
436{
437 tasklet_schedule(&crypto_done_tasklet);
438}
439
440static void crypto_done_action(unsigned long arg)
441{
442 int i;
443
444 for (i = 0; i < 4; i++) {
445 dma_addr_t phys = qmgr_get_entry(recv_qid);
446 if (!phys)
447 return;
448 one_packet(phys);
449 }
450 tasklet_schedule(&crypto_done_tasklet);
451}
452
453static int init_ixp_crypto(struct device *dev)
454{
455 struct device_node *np = dev->of_node;
456 u32 msg[2] = { 0, 0 };
457 int ret = -ENODEV;
458 u32 npe_id;
459
460 dev_info(dev, "probing...\n");
461
462 /* Locate the NPE and queue manager to use from device tree */
463 if (IS_ENABLED(CONFIG_OF) && np) {
464 struct of_phandle_args queue_spec;
465 struct of_phandle_args npe_spec;
466
467 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
468 1, 0, &npe_spec);
469 if (ret) {
470 dev_err(dev, "no NPE engine specified\n");
471 return -ENODEV;
472 }
473 npe_id = npe_spec.args[0];
474
475 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
476 &queue_spec);
477 if (ret) {
478 dev_err(dev, "no rx queue phandle\n");
479 return -ENODEV;
480 }
481 recv_qid = queue_spec.args[0];
482
483 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
484 &queue_spec);
485 if (ret) {
486 dev_err(dev, "no txready queue phandle\n");
487 return -ENODEV;
488 }
489 send_qid = queue_spec.args[0];
490 } else {
491 /*
492 * Hardcoded engine when using platform data, this goes away
493 * when we switch to using DT only.
494 */
495 npe_id = 2;
496 send_qid = 29;
497 recv_qid = 30;
498 }
499
500 npe_c = npe_request(npe_id);
501 if (!npe_c)
502 return ret;
503
504 if (!npe_running(npe_c)) {
505 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
506 if (ret)
507 goto npe_release;
508 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
509 goto npe_error;
510 } else {
511 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
512 goto npe_error;
513
514 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
515 goto npe_error;
516 }
517
518 switch ((msg[1] >> 16) & 0xff) {
519 case 3:
520 dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
521 support_aes = 0;
522 break;
523 case 4:
524 case 5:
525 support_aes = 1;
526 break;
527 default:
528 dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
529 ret = -ENODEV;
530 goto npe_release;
531 }
532 /* buffer_pool will also be used to sometimes store the hmac,
533 * so assure it is large enough
534 */
535 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
536 buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
537 32, 0);
538 ret = -ENOMEM;
539 if (!buffer_pool)
540 goto err;
541
542 ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
543 if (!ctx_pool)
544 goto err;
545
546 ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
547 "ixp_crypto:out", NULL);
548 if (ret)
549 goto err;
550 ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
551 "ixp_crypto:in", NULL);
552 if (ret) {
553 qmgr_release_queue(send_qid);
554 goto err;
555 }
556 qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
557 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
558
559 qmgr_enable_irq(recv_qid);
560 return 0;
561
562npe_error:
563 dev_err(dev, "%s not responding\n", npe_name(npe_c));
564 ret = -EIO;
565err:
566 dma_pool_destroy(ctx_pool);
567 dma_pool_destroy(buffer_pool);
568npe_release:
569 npe_release(npe_c);
570 return ret;
571}
572
573static void release_ixp_crypto(struct device *dev)
574{
575 qmgr_disable_irq(recv_qid);
576 tasklet_kill(&crypto_done_tasklet);
577
578 qmgr_release_queue(send_qid);
579 qmgr_release_queue(recv_qid);
580
581 dma_pool_destroy(ctx_pool);
582 dma_pool_destroy(buffer_pool);
583
584 npe_release(npe_c);
585
586 if (crypt_virt)
587 dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
588 crypt_virt, crypt_phys);
589}
590
591static void reset_sa_dir(struct ix_sa_dir *dir)
592{
593 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
594 dir->npe_ctx_idx = 0;
595 dir->npe_mode = 0;
596}
597
598static int init_sa_dir(struct ix_sa_dir *dir)
599{
600 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
601 if (!dir->npe_ctx)
602 return -ENOMEM;
603
604 reset_sa_dir(dir);
605 return 0;
606}
607
608static void free_sa_dir(struct ix_sa_dir *dir)
609{
610 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
611 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
612}
613
614static int init_tfm(struct crypto_tfm *tfm)
615{
616 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
617 int ret;
618
619 atomic_set(&ctx->configuring, 0);
620 ret = init_sa_dir(&ctx->encrypt);
621 if (ret)
622 return ret;
623 ret = init_sa_dir(&ctx->decrypt);
624 if (ret)
625 free_sa_dir(&ctx->encrypt);
626
627 return ret;
628}
629
630static int init_tfm_ablk(struct crypto_skcipher *tfm)
631{
632 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
633 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
634 const char *name = crypto_tfm_alg_name(ctfm);
635
636 ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
637 if (IS_ERR(ctx->fallback_tfm)) {
638 pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
639 name, PTR_ERR(ctx->fallback_tfm));
640 return PTR_ERR(ctx->fallback_tfm);
641 }
642
643 pr_info("Fallback for %s is %s\n",
644 crypto_tfm_alg_driver_name(&tfm->base),
645 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
646 );
647
648 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
649 return init_tfm(crypto_skcipher_tfm(tfm));
650}
651
652static int init_tfm_aead(struct crypto_aead *tfm)
653{
654 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
655 return init_tfm(crypto_aead_tfm(tfm));
656}
657
658static void exit_tfm(struct crypto_tfm *tfm)
659{
660 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
661
662 free_sa_dir(&ctx->encrypt);
663 free_sa_dir(&ctx->decrypt);
664}
665
666static void exit_tfm_ablk(struct crypto_skcipher *tfm)
667{
668 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
669 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
670
671 crypto_free_skcipher(ctx->fallback_tfm);
672 exit_tfm(crypto_skcipher_tfm(tfm));
673}
674
675static void exit_tfm_aead(struct crypto_aead *tfm)
676{
677 exit_tfm(crypto_aead_tfm(tfm));
678}
679
680static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
681 int init_len, u32 ctx_addr, const u8 *key,
682 int key_len)
683{
684 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
685 struct crypt_ctl *crypt;
686 struct buffer_desc *buf;
687 int i;
688 u8 *pad;
689 dma_addr_t pad_phys, buf_phys;
690
691 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
692 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
693 if (!pad)
694 return -ENOMEM;
695 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
696 if (!buf) {
697 dma_pool_free(ctx_pool, pad, pad_phys);
698 return -ENOMEM;
699 }
700 crypt = get_crypt_desc_emerg();
701 if (!crypt) {
702 dma_pool_free(ctx_pool, pad, pad_phys);
703 dma_pool_free(buffer_pool, buf, buf_phys);
704 return -EAGAIN;
705 }
706
707 memcpy(pad, key, key_len);
708 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
709 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
710 pad[i] ^= xpad;
711
712 crypt->data.tfm = tfm;
713 crypt->regist_ptr = pad;
714 crypt->regist_buf = buf;
715
716 crypt->auth_offs = 0;
717 crypt->auth_len = HMAC_PAD_BLOCKLEN;
718 crypt->crypto_ctx = ctx_addr;
719 crypt->src_buf = buf_phys;
720 crypt->icv_rev_aes = target;
721 crypt->mode = NPE_OP_HASH_GEN_ICV;
722 crypt->init_len = init_len;
723 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
724
725 buf->next = NULL;
726 buf->buf_len = HMAC_PAD_BLOCKLEN;
727 buf->pkt_len = 0;
728 buf->phys_addr = pad_phys;
729
730 atomic_inc(&ctx->configuring);
731 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
732 BUG_ON(qmgr_stat_overflow(send_qid));
733 return 0;
734}
735
736static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
737 const u8 *key, int key_len, unsigned int digest_len)
738{
739 u32 itarget, otarget, npe_ctx_addr;
740 unsigned char *cinfo;
741 int init_len, ret = 0;
742 u32 cfgword;
743 struct ix_sa_dir *dir;
744 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
745 const struct ix_hash_algo *algo;
746
747 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
748 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
749 algo = ix_hash(tfm);
750
751 /* write cfg word to cryptinfo */
752 cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
753#ifndef __ARMEB__
754 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
755#endif
756 *(__be32 *)cinfo = cpu_to_be32(cfgword);
757 cinfo += sizeof(cfgword);
758
759 /* write ICV to cryptinfo */
760 memcpy(cinfo, algo->icv, digest_len);
761 cinfo += digest_len;
762
763 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
764 + sizeof(algo->cfgword);
765 otarget = itarget + digest_len;
766 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
767 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
768
769 dir->npe_ctx_idx += init_len;
770 dir->npe_mode |= NPE_OP_HASH_ENABLE;
771
772 if (!encrypt)
773 dir->npe_mode |= NPE_OP_HASH_VERIFY;
774
775 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
776 init_len, npe_ctx_addr, key, key_len);
777 if (ret)
778 return ret;
779 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
780 init_len, npe_ctx_addr, key, key_len);
781}
782
783static int gen_rev_aes_key(struct crypto_tfm *tfm)
784{
785 struct crypt_ctl *crypt;
786 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
787 struct ix_sa_dir *dir = &ctx->decrypt;
788
789 crypt = get_crypt_desc_emerg();
790 if (!crypt)
791 return -EAGAIN;
792
793 *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
794
795 crypt->data.tfm = tfm;
796 crypt->crypt_offs = 0;
797 crypt->crypt_len = AES_BLOCK128;
798 crypt->src_buf = 0;
799 crypt->crypto_ctx = dir->npe_ctx_phys;
800 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
801 crypt->mode = NPE_OP_ENC_GEN_KEY;
802 crypt->init_len = dir->npe_ctx_idx;
803 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
804
805 atomic_inc(&ctx->configuring);
806 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
807 BUG_ON(qmgr_stat_overflow(send_qid));
808 return 0;
809}
810
811static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
812 int key_len)
813{
814 u8 *cinfo;
815 u32 cipher_cfg;
816 u32 keylen_cfg = 0;
817 struct ix_sa_dir *dir;
818 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
819 int err;
820
821 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
822 cinfo = dir->npe_ctx;
823
824 if (encrypt) {
825 cipher_cfg = cipher_cfg_enc(tfm);
826 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
827 } else {
828 cipher_cfg = cipher_cfg_dec(tfm);
829 }
830 if (cipher_cfg & MOD_AES) {
831 switch (key_len) {
832 case 16:
833 keylen_cfg = MOD_AES128;
834 break;
835 case 24:
836 keylen_cfg = MOD_AES192;
837 break;
838 case 32:
839 keylen_cfg = MOD_AES256;
840 break;
841 default:
842 return -EINVAL;
843 }
844 cipher_cfg |= keylen_cfg;
845 } else {
846 err = crypto_des_verify_key(tfm, key);
847 if (err)
848 return err;
849 }
850 /* write cfg word to cryptinfo */
851 *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
852 cinfo += sizeof(cipher_cfg);
853
854 /* write cipher key to cryptinfo */
855 memcpy(cinfo, key, key_len);
856 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
857 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
858 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
859 key_len = DES3_EDE_KEY_SIZE;
860 }
861 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
862 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
863 if ((cipher_cfg & MOD_AES) && !encrypt)
864 return gen_rev_aes_key(tfm);
865
866 return 0;
867}
868
869static struct buffer_desc *chainup_buffers(struct device *dev,
870 struct scatterlist *sg, unsigned int nbytes,
871 struct buffer_desc *buf, gfp_t flags,
872 enum dma_data_direction dir)
873{
874 for (; nbytes > 0; sg = sg_next(sg)) {
875 unsigned int len = min(nbytes, sg->length);
876 struct buffer_desc *next_buf;
877 dma_addr_t next_buf_phys;
878 void *ptr;
879
880 nbytes -= len;
881 ptr = sg_virt(sg);
882 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
883 if (!next_buf) {
884 buf = NULL;
885 break;
886 }
887 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
888 buf->next = next_buf;
889 buf->phys_next = next_buf_phys;
890 buf = next_buf;
891
892 buf->phys_addr = sg_dma_address(sg);
893 buf->buf_len = len;
894 buf->dir = dir;
895 }
896 buf->next = NULL;
897 buf->phys_next = 0;
898 return buf;
899}
900
901static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
902 unsigned int key_len)
903{
904 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
905 int ret;
906
907 init_completion(&ctx->completion);
908 atomic_inc(&ctx->configuring);
909
910 reset_sa_dir(&ctx->encrypt);
911 reset_sa_dir(&ctx->decrypt);
912
913 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
914 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
915
916 ret = setup_cipher(&tfm->base, 0, key, key_len);
917 if (ret)
918 goto out;
919 ret = setup_cipher(&tfm->base, 1, key, key_len);
920out:
921 if (!atomic_dec_and_test(&ctx->configuring))
922 wait_for_completion(&ctx->completion);
923 if (ret)
924 return ret;
925 crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
926 crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
927
928 return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
929}
930
931static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
932 unsigned int key_len)
933{
934 return verify_skcipher_des3_key(tfm, key) ?:
935 ablk_setkey(tfm, key, key_len);
936}
937
938static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
939 unsigned int key_len)
940{
941 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
942
943 /* the nonce is stored in bytes at end of key */
944 if (key_len < CTR_RFC3686_NONCE_SIZE)
945 return -EINVAL;
946
947 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
948 CTR_RFC3686_NONCE_SIZE);
949
950 key_len -= CTR_RFC3686_NONCE_SIZE;
951 return ablk_setkey(tfm, key, key_len);
952}
953
954static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
955{
956 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
957 struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
958 struct ablk_ctx *rctx = skcipher_request_ctx(areq);
959 int err;
960
961 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
962 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
963 areq->base.complete, areq->base.data);
964 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
965 areq->cryptlen, areq->iv);
966 if (encrypt)
967 err = crypto_skcipher_encrypt(&rctx->fallback_req);
968 else
969 err = crypto_skcipher_decrypt(&rctx->fallback_req);
970 return err;
971}
972
973static int ablk_perform(struct skcipher_request *req, int encrypt)
974{
975 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
976 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
977 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
978 struct ix_sa_dir *dir;
979 struct crypt_ctl *crypt;
980 unsigned int nbytes = req->cryptlen;
981 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
982 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
983 struct buffer_desc src_hook;
984 struct device *dev = &pdev->dev;
985 unsigned int offset;
986 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
987 GFP_KERNEL : GFP_ATOMIC;
988
989 if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
990 return ixp4xx_cipher_fallback(req, encrypt);
991
992 if (qmgr_stat_full(send_qid))
993 return -EAGAIN;
994 if (atomic_read(&ctx->configuring))
995 return -EAGAIN;
996
997 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
998 req_ctx->encrypt = encrypt;
999
1000 crypt = get_crypt_desc();
1001 if (!crypt)
1002 return -ENOMEM;
1003
1004 crypt->data.ablk_req = req;
1005 crypt->crypto_ctx = dir->npe_ctx_phys;
1006 crypt->mode = dir->npe_mode;
1007 crypt->init_len = dir->npe_ctx_idx;
1008
1009 crypt->crypt_offs = 0;
1010 crypt->crypt_len = nbytes;
1011
1012 BUG_ON(ivsize && !req->iv);
1013 memcpy(crypt->iv, req->iv, ivsize);
1014 if (ivsize > 0 && !encrypt) {
1015 offset = req->cryptlen - ivsize;
1016 scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1017 }
1018 if (req->src != req->dst) {
1019 struct buffer_desc dst_hook;
1020
1021 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1022 /* This was never tested by Intel
1023 * for more than one dst buffer, I think. */
1024 req_ctx->dst = NULL;
1025 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1026 flags, DMA_FROM_DEVICE))
1027 goto free_buf_dest;
1028 src_direction = DMA_TO_DEVICE;
1029 req_ctx->dst = dst_hook.next;
1030 crypt->dst_buf = dst_hook.phys_next;
1031 } else {
1032 req_ctx->dst = NULL;
1033 }
1034 req_ctx->src = NULL;
1035 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1036 src_direction))
1037 goto free_buf_src;
1038
1039 req_ctx->src = src_hook.next;
1040 crypt->src_buf = src_hook.phys_next;
1041 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1042 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1043 BUG_ON(qmgr_stat_overflow(send_qid));
1044 return -EINPROGRESS;
1045
1046free_buf_src:
1047 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1048free_buf_dest:
1049 if (req->src != req->dst)
1050 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1051
1052 crypt->ctl_flags = CTL_FLAG_UNUSED;
1053 return -ENOMEM;
1054}
1055
1056static int ablk_encrypt(struct skcipher_request *req)
1057{
1058 return ablk_perform(req, 1);
1059}
1060
1061static int ablk_decrypt(struct skcipher_request *req)
1062{
1063 return ablk_perform(req, 0);
1064}
1065
1066static int ablk_rfc3686_crypt(struct skcipher_request *req)
1067{
1068 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1069 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1070 u8 iv[CTR_RFC3686_BLOCK_SIZE];
1071 u8 *info = req->iv;
1072 int ret;
1073
1074 /* set up counter block */
1075 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1076 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1077
1078 /* initialize counter portion of counter block */
1079 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1080 cpu_to_be32(1);
1081
1082 req->iv = iv;
1083 ret = ablk_perform(req, 1);
1084 req->iv = info;
1085 return ret;
1086}
1087
1088static int aead_perform(struct aead_request *req, int encrypt,
1089 int cryptoffset, int eff_cryptlen, u8 *iv)
1090{
1091 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1092 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1093 unsigned int ivsize = crypto_aead_ivsize(tfm);
1094 unsigned int authsize = crypto_aead_authsize(tfm);
1095 struct ix_sa_dir *dir;
1096 struct crypt_ctl *crypt;
1097 unsigned int cryptlen;
1098 struct buffer_desc *buf, src_hook;
1099 struct aead_ctx *req_ctx = aead_request_ctx(req);
1100 struct device *dev = &pdev->dev;
1101 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1102 GFP_KERNEL : GFP_ATOMIC;
1103 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1104 unsigned int lastlen;
1105
1106 if (qmgr_stat_full(send_qid))
1107 return -EAGAIN;
1108 if (atomic_read(&ctx->configuring))
1109 return -EAGAIN;
1110
1111 if (encrypt) {
1112 dir = &ctx->encrypt;
1113 cryptlen = req->cryptlen;
1114 } else {
1115 dir = &ctx->decrypt;
1116 /* req->cryptlen includes the authsize when decrypting */
1117 cryptlen = req->cryptlen - authsize;
1118 eff_cryptlen -= authsize;
1119 }
1120 crypt = get_crypt_desc();
1121 if (!crypt)
1122 return -ENOMEM;
1123
1124 crypt->data.aead_req = req;
1125 crypt->crypto_ctx = dir->npe_ctx_phys;
1126 crypt->mode = dir->npe_mode;
1127 crypt->init_len = dir->npe_ctx_idx;
1128
1129 crypt->crypt_offs = cryptoffset;
1130 crypt->crypt_len = eff_cryptlen;
1131
1132 crypt->auth_offs = 0;
1133 crypt->auth_len = req->assoclen + cryptlen;
1134 BUG_ON(ivsize && !req->iv);
1135 memcpy(crypt->iv, req->iv, ivsize);
1136
1137 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1138 &src_hook, flags, src_direction);
1139 req_ctx->src = src_hook.next;
1140 crypt->src_buf = src_hook.phys_next;
1141 if (!buf)
1142 goto free_buf_src;
1143
1144 lastlen = buf->buf_len;
1145 if (lastlen >= authsize)
1146 crypt->icv_rev_aes = buf->phys_addr +
1147 buf->buf_len - authsize;
1148
1149 req_ctx->dst = NULL;
1150
1151 if (req->src != req->dst) {
1152 struct buffer_desc dst_hook;
1153
1154 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1155 src_direction = DMA_TO_DEVICE;
1156
1157 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1158 &dst_hook, flags, DMA_FROM_DEVICE);
1159 req_ctx->dst = dst_hook.next;
1160 crypt->dst_buf = dst_hook.phys_next;
1161
1162 if (!buf)
1163 goto free_buf_dst;
1164
1165 if (encrypt) {
1166 lastlen = buf->buf_len;
1167 if (lastlen >= authsize)
1168 crypt->icv_rev_aes = buf->phys_addr +
1169 buf->buf_len - authsize;
1170 }
1171 }
1172
1173 if (unlikely(lastlen < authsize)) {
1174 dma_addr_t dma;
1175 /* The 12 hmac bytes are scattered,
1176 * we need to copy them into a safe buffer */
1177 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
1178 if (unlikely(!req_ctx->hmac_virt))
1179 goto free_buf_dst;
1180 crypt->icv_rev_aes = dma;
1181 if (!encrypt) {
1182 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1183 req->src, cryptlen, authsize, 0);
1184 }
1185 req_ctx->encrypt = encrypt;
1186 } else {
1187 req_ctx->hmac_virt = NULL;
1188 }
1189
1190 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1191 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1192 BUG_ON(qmgr_stat_overflow(send_qid));
1193 return -EINPROGRESS;
1194
1195free_buf_dst:
1196 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1197free_buf_src:
1198 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1199 crypt->ctl_flags = CTL_FLAG_UNUSED;
1200 return -ENOMEM;
1201}
1202
1203static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1204{
1205 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1206 unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1207 int ret;
1208
1209 if (!ctx->enckey_len && !ctx->authkey_len)
1210 return 0;
1211 init_completion(&ctx->completion);
1212 atomic_inc(&ctx->configuring);
1213
1214 reset_sa_dir(&ctx->encrypt);
1215 reset_sa_dir(&ctx->decrypt);
1216
1217 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1218 if (ret)
1219 goto out;
1220 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1221 if (ret)
1222 goto out;
1223 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1224 ctx->authkey_len, digest_len);
1225 if (ret)
1226 goto out;
1227 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1228 ctx->authkey_len, digest_len);
1229out:
1230 if (!atomic_dec_and_test(&ctx->configuring))
1231 wait_for_completion(&ctx->completion);
1232 return ret;
1233}
1234
1235static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1236{
1237 int max = crypto_aead_maxauthsize(tfm) >> 2;
1238
1239 if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1240 return -EINVAL;
1241 return aead_setup(tfm, authsize);
1242}
1243
1244static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1245 unsigned int keylen)
1246{
1247 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1248 struct crypto_authenc_keys keys;
1249
1250 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1251 goto badkey;
1252
1253 if (keys.authkeylen > sizeof(ctx->authkey))
1254 goto badkey;
1255
1256 if (keys.enckeylen > sizeof(ctx->enckey))
1257 goto badkey;
1258
1259 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1260 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1261 ctx->authkey_len = keys.authkeylen;
1262 ctx->enckey_len = keys.enckeylen;
1263
1264 memzero_explicit(&keys, sizeof(keys));
1265 return aead_setup(tfm, crypto_aead_authsize(tfm));
1266badkey:
1267 memzero_explicit(&keys, sizeof(keys));
1268 return -EINVAL;
1269}
1270
1271static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1272 unsigned int keylen)
1273{
1274 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1275 struct crypto_authenc_keys keys;
1276 int err;
1277
1278 err = crypto_authenc_extractkeys(&keys, key, keylen);
1279 if (unlikely(err))
1280 goto badkey;
1281
1282 err = -EINVAL;
1283 if (keys.authkeylen > sizeof(ctx->authkey))
1284 goto badkey;
1285
1286 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1287 if (err)
1288 goto badkey;
1289
1290 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1291 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1292 ctx->authkey_len = keys.authkeylen;
1293 ctx->enckey_len = keys.enckeylen;
1294
1295 memzero_explicit(&keys, sizeof(keys));
1296 return aead_setup(tfm, crypto_aead_authsize(tfm));
1297badkey:
1298 memzero_explicit(&keys, sizeof(keys));
1299 return err;
1300}
1301
1302static int aead_encrypt(struct aead_request *req)
1303{
1304 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1305}
1306
1307static int aead_decrypt(struct aead_request *req)
1308{
1309 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1310}
1311
1312static struct ixp_alg ixp4xx_algos[] = {
1313{
1314 .crypto = {
1315 .base.cra_name = "cbc(des)",
1316 .base.cra_blocksize = DES_BLOCK_SIZE,
1317
1318 .min_keysize = DES_KEY_SIZE,
1319 .max_keysize = DES_KEY_SIZE,
1320 .ivsize = DES_BLOCK_SIZE,
1321 },
1322 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1323 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1324
1325}, {
1326 .crypto = {
1327 .base.cra_name = "ecb(des)",
1328 .base.cra_blocksize = DES_BLOCK_SIZE,
1329 .min_keysize = DES_KEY_SIZE,
1330 .max_keysize = DES_KEY_SIZE,
1331 },
1332 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1333 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1334}, {
1335 .crypto = {
1336 .base.cra_name = "cbc(des3_ede)",
1337 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1338
1339 .min_keysize = DES3_EDE_KEY_SIZE,
1340 .max_keysize = DES3_EDE_KEY_SIZE,
1341 .ivsize = DES3_EDE_BLOCK_SIZE,
1342 .setkey = ablk_des3_setkey,
1343 },
1344 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1345 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1346}, {
1347 .crypto = {
1348 .base.cra_name = "ecb(des3_ede)",
1349 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1350
1351 .min_keysize = DES3_EDE_KEY_SIZE,
1352 .max_keysize = DES3_EDE_KEY_SIZE,
1353 .setkey = ablk_des3_setkey,
1354 },
1355 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1356 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1357}, {
1358 .crypto = {
1359 .base.cra_name = "cbc(aes)",
1360 .base.cra_blocksize = AES_BLOCK_SIZE,
1361
1362 .min_keysize = AES_MIN_KEY_SIZE,
1363 .max_keysize = AES_MAX_KEY_SIZE,
1364 .ivsize = AES_BLOCK_SIZE,
1365 },
1366 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1367 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1368}, {
1369 .crypto = {
1370 .base.cra_name = "ecb(aes)",
1371 .base.cra_blocksize = AES_BLOCK_SIZE,
1372
1373 .min_keysize = AES_MIN_KEY_SIZE,
1374 .max_keysize = AES_MAX_KEY_SIZE,
1375 },
1376 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1377 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1378}, {
1379 .crypto = {
1380 .base.cra_name = "ctr(aes)",
1381 .base.cra_blocksize = 1,
1382
1383 .min_keysize = AES_MIN_KEY_SIZE,
1384 .max_keysize = AES_MAX_KEY_SIZE,
1385 .ivsize = AES_BLOCK_SIZE,
1386 },
1387 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1388 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1389}, {
1390 .crypto = {
1391 .base.cra_name = "rfc3686(ctr(aes))",
1392 .base.cra_blocksize = 1,
1393
1394 .min_keysize = AES_MIN_KEY_SIZE,
1395 .max_keysize = AES_MAX_KEY_SIZE,
1396 .ivsize = AES_BLOCK_SIZE,
1397 .setkey = ablk_rfc3686_setkey,
1398 .encrypt = ablk_rfc3686_crypt,
1399 .decrypt = ablk_rfc3686_crypt,
1400 },
1401 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1402 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1403} };
1404
1405static struct ixp_aead_alg ixp4xx_aeads[] = {
1406{
1407 .crypto = {
1408 .base = {
1409 .cra_name = "authenc(hmac(md5),cbc(des))",
1410 .cra_blocksize = DES_BLOCK_SIZE,
1411 },
1412 .ivsize = DES_BLOCK_SIZE,
1413 .maxauthsize = MD5_DIGEST_SIZE,
1414 },
1415 .hash = &hash_alg_md5,
1416 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1417 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1418}, {
1419 .crypto = {
1420 .base = {
1421 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1422 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1423 },
1424 .ivsize = DES3_EDE_BLOCK_SIZE,
1425 .maxauthsize = MD5_DIGEST_SIZE,
1426 .setkey = des3_aead_setkey,
1427 },
1428 .hash = &hash_alg_md5,
1429 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1430 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1431}, {
1432 .crypto = {
1433 .base = {
1434 .cra_name = "authenc(hmac(sha1),cbc(des))",
1435 .cra_blocksize = DES_BLOCK_SIZE,
1436 },
1437 .ivsize = DES_BLOCK_SIZE,
1438 .maxauthsize = SHA1_DIGEST_SIZE,
1439 },
1440 .hash = &hash_alg_sha1,
1441 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1442 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1443}, {
1444 .crypto = {
1445 .base = {
1446 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1447 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1448 },
1449 .ivsize = DES3_EDE_BLOCK_SIZE,
1450 .maxauthsize = SHA1_DIGEST_SIZE,
1451 .setkey = des3_aead_setkey,
1452 },
1453 .hash = &hash_alg_sha1,
1454 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1455 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1456}, {
1457 .crypto = {
1458 .base = {
1459 .cra_name = "authenc(hmac(md5),cbc(aes))",
1460 .cra_blocksize = AES_BLOCK_SIZE,
1461 },
1462 .ivsize = AES_BLOCK_SIZE,
1463 .maxauthsize = MD5_DIGEST_SIZE,
1464 },
1465 .hash = &hash_alg_md5,
1466 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1467 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1468}, {
1469 .crypto = {
1470 .base = {
1471 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1472 .cra_blocksize = AES_BLOCK_SIZE,
1473 },
1474 .ivsize = AES_BLOCK_SIZE,
1475 .maxauthsize = SHA1_DIGEST_SIZE,
1476 },
1477 .hash = &hash_alg_sha1,
1478 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1479 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1480} };
1481
1482#define IXP_POSTFIX "-ixp4xx"
1483
1484static int ixp_crypto_probe(struct platform_device *_pdev)
1485{
1486 struct device *dev = &_pdev->dev;
1487 int num = ARRAY_SIZE(ixp4xx_algos);
1488 int i, err;
1489
1490 pdev = _pdev;
1491
1492 err = init_ixp_crypto(dev);
1493 if (err)
1494 return err;
1495
1496 for (i = 0; i < num; i++) {
1497 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1498
1499 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1500 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1501 CRYPTO_MAX_ALG_NAME)
1502 continue;
1503 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1504 continue;
1505
1506 /* block ciphers */
1507 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1508 CRYPTO_ALG_ASYNC |
1509 CRYPTO_ALG_ALLOCATES_MEMORY |
1510 CRYPTO_ALG_NEED_FALLBACK;
1511 if (!cra->setkey)
1512 cra->setkey = ablk_setkey;
1513 if (!cra->encrypt)
1514 cra->encrypt = ablk_encrypt;
1515 if (!cra->decrypt)
1516 cra->decrypt = ablk_decrypt;
1517 cra->init = init_tfm_ablk;
1518 cra->exit = exit_tfm_ablk;
1519
1520 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1521 cra->base.cra_module = THIS_MODULE;
1522 cra->base.cra_alignmask = 3;
1523 cra->base.cra_priority = 300;
1524 if (crypto_register_skcipher(cra))
1525 dev_err(&pdev->dev, "Failed to register '%s'\n",
1526 cra->base.cra_name);
1527 else
1528 ixp4xx_algos[i].registered = 1;
1529 }
1530
1531 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1532 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1533
1534 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1535 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1536 CRYPTO_MAX_ALG_NAME)
1537 continue;
1538 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1539 continue;
1540
1541 /* authenc */
1542 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1543 CRYPTO_ALG_ASYNC |
1544 CRYPTO_ALG_ALLOCATES_MEMORY;
1545 cra->setkey = cra->setkey ?: aead_setkey;
1546 cra->setauthsize = aead_setauthsize;
1547 cra->encrypt = aead_encrypt;
1548 cra->decrypt = aead_decrypt;
1549 cra->init = init_tfm_aead;
1550 cra->exit = exit_tfm_aead;
1551
1552 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1553 cra->base.cra_module = THIS_MODULE;
1554 cra->base.cra_alignmask = 3;
1555 cra->base.cra_priority = 300;
1556
1557 if (crypto_register_aead(cra))
1558 dev_err(&pdev->dev, "Failed to register '%s'\n",
1559 cra->base.cra_driver_name);
1560 else
1561 ixp4xx_aeads[i].registered = 1;
1562 }
1563 return 0;
1564}
1565
1566static void ixp_crypto_remove(struct platform_device *pdev)
1567{
1568 int num = ARRAY_SIZE(ixp4xx_algos);
1569 int i;
1570
1571 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1572 if (ixp4xx_aeads[i].registered)
1573 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1574 }
1575
1576 for (i = 0; i < num; i++) {
1577 if (ixp4xx_algos[i].registered)
1578 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1579 }
1580 release_ixp_crypto(&pdev->dev);
1581}
1582static const struct of_device_id ixp4xx_crypto_of_match[] = {
1583 {
1584 .compatible = "intel,ixp4xx-crypto",
1585 },
1586 {},
1587};
1588
1589static struct platform_driver ixp_crypto_driver = {
1590 .probe = ixp_crypto_probe,
1591 .remove_new = ixp_crypto_remove,
1592 .driver = {
1593 .name = "ixp4xx_crypto",
1594 .of_match_table = ixp4xx_crypto_of_match,
1595 },
1596};
1597module_platform_driver(ixp_crypto_driver);
1598
1599MODULE_LICENSE("GPL");
1600MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1601MODULE_DESCRIPTION("IXP4xx hardware crypto");
1602
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel IXP4xx NPE-C crypto driver
4 *
5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 */
7
8#include <linux/platform_device.h>
9#include <linux/dma-mapping.h>
10#include <linux/dmapool.h>
11#include <linux/crypto.h>
12#include <linux/kernel.h>
13#include <linux/rtnetlink.h>
14#include <linux/interrupt.h>
15#include <linux/spinlock.h>
16#include <linux/gfp.h>
17#include <linux/module.h>
18#include <linux/of.h>
19
20#include <crypto/ctr.h>
21#include <crypto/internal/des.h>
22#include <crypto/aes.h>
23#include <crypto/hmac.h>
24#include <crypto/sha1.h>
25#include <crypto/algapi.h>
26#include <crypto/internal/aead.h>
27#include <crypto/internal/skcipher.h>
28#include <crypto/authenc.h>
29#include <crypto/scatterwalk.h>
30
31#include <linux/soc/ixp4xx/npe.h>
32#include <linux/soc/ixp4xx/qmgr.h>
33
34/* Intermittent includes, delete this after v5.14-rc1 */
35#include <linux/soc/ixp4xx/cpu.h>
36
37#define MAX_KEYLEN 32
38
39/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
40#define NPE_CTX_LEN 80
41#define AES_BLOCK128 16
42
43#define NPE_OP_HASH_VERIFY 0x01
44#define NPE_OP_CCM_ENABLE 0x04
45#define NPE_OP_CRYPT_ENABLE 0x08
46#define NPE_OP_HASH_ENABLE 0x10
47#define NPE_OP_NOT_IN_PLACE 0x20
48#define NPE_OP_HMAC_DISABLE 0x40
49#define NPE_OP_CRYPT_ENCRYPT 0x80
50
51#define NPE_OP_CCM_GEN_MIC 0xcc
52#define NPE_OP_HASH_GEN_ICV 0x50
53#define NPE_OP_ENC_GEN_KEY 0xc9
54
55#define MOD_ECB 0x0000
56#define MOD_CTR 0x1000
57#define MOD_CBC_ENC 0x2000
58#define MOD_CBC_DEC 0x3000
59#define MOD_CCM_ENC 0x4000
60#define MOD_CCM_DEC 0x5000
61
62#define KEYLEN_128 4
63#define KEYLEN_192 6
64#define KEYLEN_256 8
65
66#define CIPH_DECR 0x0000
67#define CIPH_ENCR 0x0400
68
69#define MOD_DES 0x0000
70#define MOD_TDEA2 0x0100
71#define MOD_3DES 0x0200
72#define MOD_AES 0x0800
73#define MOD_AES128 (0x0800 | KEYLEN_128)
74#define MOD_AES192 (0x0900 | KEYLEN_192)
75#define MOD_AES256 (0x0a00 | KEYLEN_256)
76
77#define MAX_IVLEN 16
78#define NPE_QLEN 16
79/* Space for registering when the first
80 * NPE_QLEN crypt_ctl are busy */
81#define NPE_QLEN_TOTAL 64
82
83#define CTL_FLAG_UNUSED 0x0000
84#define CTL_FLAG_USED 0x1000
85#define CTL_FLAG_PERFORM_ABLK 0x0001
86#define CTL_FLAG_GEN_ICV 0x0002
87#define CTL_FLAG_GEN_REVAES 0x0004
88#define CTL_FLAG_PERFORM_AEAD 0x0008
89#define CTL_FLAG_MASK 0x000f
90
91#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
92
93#define MD5_DIGEST_SIZE 16
94
95struct buffer_desc {
96 u32 phys_next;
97#ifdef __ARMEB__
98 u16 buf_len;
99 u16 pkt_len;
100#else
101 u16 pkt_len;
102 u16 buf_len;
103#endif
104 dma_addr_t phys_addr;
105 u32 __reserved[4];
106 struct buffer_desc *next;
107 enum dma_data_direction dir;
108};
109
110struct crypt_ctl {
111#ifdef __ARMEB__
112 u8 mode; /* NPE_OP_* operation mode */
113 u8 init_len;
114 u16 reserved;
115#else
116 u16 reserved;
117 u8 init_len;
118 u8 mode; /* NPE_OP_* operation mode */
119#endif
120 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
121 u32 icv_rev_aes; /* icv or rev aes */
122 u32 src_buf;
123 u32 dst_buf;
124#ifdef __ARMEB__
125 u16 auth_offs; /* Authentication start offset */
126 u16 auth_len; /* Authentication data length */
127 u16 crypt_offs; /* Cryption start offset */
128 u16 crypt_len; /* Cryption data length */
129#else
130 u16 auth_len; /* Authentication data length */
131 u16 auth_offs; /* Authentication start offset */
132 u16 crypt_len; /* Cryption data length */
133 u16 crypt_offs; /* Cryption start offset */
134#endif
135 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
136 u32 crypto_ctx; /* NPE Crypto Param structure address */
137
138 /* Used by Host: 4*4 bytes*/
139 unsigned int ctl_flags;
140 union {
141 struct skcipher_request *ablk_req;
142 struct aead_request *aead_req;
143 struct crypto_tfm *tfm;
144 } data;
145 struct buffer_desc *regist_buf;
146 u8 *regist_ptr;
147};
148
149struct ablk_ctx {
150 struct buffer_desc *src;
151 struct buffer_desc *dst;
152 u8 iv[MAX_IVLEN];
153 bool encrypt;
154 struct skcipher_request fallback_req; // keep at the end
155};
156
157struct aead_ctx {
158 struct buffer_desc *src;
159 struct buffer_desc *dst;
160 struct scatterlist ivlist;
161 /* used when the hmac is not on one sg entry */
162 u8 *hmac_virt;
163 int encrypt;
164};
165
166struct ix_hash_algo {
167 u32 cfgword;
168 unsigned char *icv;
169};
170
171struct ix_sa_dir {
172 unsigned char *npe_ctx;
173 dma_addr_t npe_ctx_phys;
174 int npe_ctx_idx;
175 u8 npe_mode;
176};
177
178struct ixp_ctx {
179 struct ix_sa_dir encrypt;
180 struct ix_sa_dir decrypt;
181 int authkey_len;
182 u8 authkey[MAX_KEYLEN];
183 int enckey_len;
184 u8 enckey[MAX_KEYLEN];
185 u8 salt[MAX_IVLEN];
186 u8 nonce[CTR_RFC3686_NONCE_SIZE];
187 unsigned int salted;
188 atomic_t configuring;
189 struct completion completion;
190 struct crypto_skcipher *fallback_tfm;
191};
192
193struct ixp_alg {
194 struct skcipher_alg crypto;
195 const struct ix_hash_algo *hash;
196 u32 cfg_enc;
197 u32 cfg_dec;
198
199 int registered;
200};
201
202struct ixp_aead_alg {
203 struct aead_alg crypto;
204 const struct ix_hash_algo *hash;
205 u32 cfg_enc;
206 u32 cfg_dec;
207
208 int registered;
209};
210
211static const struct ix_hash_algo hash_alg_md5 = {
212 .cfgword = 0xAA010004,
213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
215};
216
217static const struct ix_hash_algo hash_alg_sha1 = {
218 .cfgword = 0x00000005,
219 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
220 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
221};
222
223static struct npe *npe_c;
224
225static unsigned int send_qid;
226static unsigned int recv_qid;
227static struct dma_pool *buffer_pool;
228static struct dma_pool *ctx_pool;
229
230static struct crypt_ctl *crypt_virt;
231static dma_addr_t crypt_phys;
232
233static int support_aes = 1;
234
235static struct platform_device *pdev;
236
237static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
238{
239 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
240}
241
242static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
243{
244 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
245}
246
247static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
248{
249 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
250}
251
252static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
253{
254 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
255}
256
257static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
258{
259 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
260}
261
262static int setup_crypt_desc(void)
263{
264 struct device *dev = &pdev->dev;
265
266 BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
267 IS_ENABLED(CONFIG_64BIT)) &&
268 sizeof(struct crypt_ctl) != 64);
269 crypt_virt = dma_alloc_coherent(dev,
270 NPE_QLEN * sizeof(struct crypt_ctl),
271 &crypt_phys, GFP_ATOMIC);
272 if (!crypt_virt)
273 return -ENOMEM;
274 return 0;
275}
276
277static DEFINE_SPINLOCK(desc_lock);
278static struct crypt_ctl *get_crypt_desc(void)
279{
280 int i;
281 static int idx;
282 unsigned long flags;
283
284 spin_lock_irqsave(&desc_lock, flags);
285
286 if (unlikely(!crypt_virt))
287 setup_crypt_desc();
288 if (unlikely(!crypt_virt)) {
289 spin_unlock_irqrestore(&desc_lock, flags);
290 return NULL;
291 }
292 i = idx;
293 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
294 if (++idx >= NPE_QLEN)
295 idx = 0;
296 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
297 spin_unlock_irqrestore(&desc_lock, flags);
298 return crypt_virt + i;
299 } else {
300 spin_unlock_irqrestore(&desc_lock, flags);
301 return NULL;
302 }
303}
304
305static DEFINE_SPINLOCK(emerg_lock);
306static struct crypt_ctl *get_crypt_desc_emerg(void)
307{
308 int i;
309 static int idx = NPE_QLEN;
310 struct crypt_ctl *desc;
311 unsigned long flags;
312
313 desc = get_crypt_desc();
314 if (desc)
315 return desc;
316 if (unlikely(!crypt_virt))
317 return NULL;
318
319 spin_lock_irqsave(&emerg_lock, flags);
320 i = idx;
321 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
322 if (++idx >= NPE_QLEN_TOTAL)
323 idx = NPE_QLEN;
324 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
325 spin_unlock_irqrestore(&emerg_lock, flags);
326 return crypt_virt + i;
327 } else {
328 spin_unlock_irqrestore(&emerg_lock, flags);
329 return NULL;
330 }
331}
332
333static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
334 dma_addr_t phys)
335{
336 while (buf) {
337 struct buffer_desc *buf1;
338 u32 phys1;
339
340 buf1 = buf->next;
341 phys1 = buf->phys_next;
342 dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
343 dma_pool_free(buffer_pool, buf, phys);
344 buf = buf1;
345 phys = phys1;
346 }
347}
348
349static struct tasklet_struct crypto_done_tasklet;
350
351static void finish_scattered_hmac(struct crypt_ctl *crypt)
352{
353 struct aead_request *req = crypt->data.aead_req;
354 struct aead_ctx *req_ctx = aead_request_ctx(req);
355 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
356 int authsize = crypto_aead_authsize(tfm);
357 int decryptlen = req->assoclen + req->cryptlen - authsize;
358
359 if (req_ctx->encrypt) {
360 scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
361 decryptlen, authsize, 1);
362 }
363 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
364}
365
366static void one_packet(dma_addr_t phys)
367{
368 struct device *dev = &pdev->dev;
369 struct crypt_ctl *crypt;
370 struct ixp_ctx *ctx;
371 int failed;
372
373 failed = phys & 0x1 ? -EBADMSG : 0;
374 phys &= ~0x3;
375 crypt = crypt_phys2virt(phys);
376
377 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
378 case CTL_FLAG_PERFORM_AEAD: {
379 struct aead_request *req = crypt->data.aead_req;
380 struct aead_ctx *req_ctx = aead_request_ctx(req);
381
382 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
383 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
384 if (req_ctx->hmac_virt)
385 finish_scattered_hmac(crypt);
386
387 aead_request_complete(req, failed);
388 break;
389 }
390 case CTL_FLAG_PERFORM_ABLK: {
391 struct skcipher_request *req = crypt->data.ablk_req;
392 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
393 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
394 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
395 unsigned int offset;
396
397 if (ivsize > 0) {
398 offset = req->cryptlen - ivsize;
399 if (req_ctx->encrypt) {
400 scatterwalk_map_and_copy(req->iv, req->dst,
401 offset, ivsize, 0);
402 } else {
403 memcpy(req->iv, req_ctx->iv, ivsize);
404 memzero_explicit(req_ctx->iv, ivsize);
405 }
406 }
407
408 if (req_ctx->dst)
409 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
410
411 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
412 skcipher_request_complete(req, failed);
413 break;
414 }
415 case CTL_FLAG_GEN_ICV:
416 ctx = crypto_tfm_ctx(crypt->data.tfm);
417 dma_pool_free(ctx_pool, crypt->regist_ptr,
418 crypt->regist_buf->phys_addr);
419 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
420 if (atomic_dec_and_test(&ctx->configuring))
421 complete(&ctx->completion);
422 break;
423 case CTL_FLAG_GEN_REVAES:
424 ctx = crypto_tfm_ctx(crypt->data.tfm);
425 *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
426 if (atomic_dec_and_test(&ctx->configuring))
427 complete(&ctx->completion);
428 break;
429 default:
430 BUG();
431 }
432 crypt->ctl_flags = CTL_FLAG_UNUSED;
433}
434
435static void irqhandler(void *_unused)
436{
437 tasklet_schedule(&crypto_done_tasklet);
438}
439
440static void crypto_done_action(unsigned long arg)
441{
442 int i;
443
444 for (i = 0; i < 4; i++) {
445 dma_addr_t phys = qmgr_get_entry(recv_qid);
446 if (!phys)
447 return;
448 one_packet(phys);
449 }
450 tasklet_schedule(&crypto_done_tasklet);
451}
452
453static int init_ixp_crypto(struct device *dev)
454{
455 struct device_node *np = dev->of_node;
456 u32 msg[2] = { 0, 0 };
457 int ret = -ENODEV;
458 u32 npe_id;
459
460 dev_info(dev, "probing...\n");
461
462 /* Locate the NPE and queue manager to use from device tree */
463 if (IS_ENABLED(CONFIG_OF) && np) {
464 struct of_phandle_args queue_spec;
465 struct of_phandle_args npe_spec;
466
467 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
468 1, 0, &npe_spec);
469 if (ret) {
470 dev_err(dev, "no NPE engine specified\n");
471 return -ENODEV;
472 }
473 npe_id = npe_spec.args[0];
474 of_node_put(npe_spec.np);
475
476 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
477 &queue_spec);
478 if (ret) {
479 dev_err(dev, "no rx queue phandle\n");
480 return -ENODEV;
481 }
482 recv_qid = queue_spec.args[0];
483 of_node_put(queue_spec.np);
484
485 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
486 &queue_spec);
487 if (ret) {
488 dev_err(dev, "no txready queue phandle\n");
489 return -ENODEV;
490 }
491 send_qid = queue_spec.args[0];
492 of_node_put(queue_spec.np);
493 } else {
494 /*
495 * Hardcoded engine when using platform data, this goes away
496 * when we switch to using DT only.
497 */
498 npe_id = 2;
499 send_qid = 29;
500 recv_qid = 30;
501 }
502
503 npe_c = npe_request(npe_id);
504 if (!npe_c)
505 return ret;
506
507 if (!npe_running(npe_c)) {
508 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
509 if (ret)
510 goto npe_release;
511 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
512 goto npe_error;
513 } else {
514 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
515 goto npe_error;
516
517 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
518 goto npe_error;
519 }
520
521 switch ((msg[1] >> 16) & 0xff) {
522 case 3:
523 dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
524 support_aes = 0;
525 break;
526 case 4:
527 case 5:
528 support_aes = 1;
529 break;
530 default:
531 dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
532 ret = -ENODEV;
533 goto npe_release;
534 }
535 /* buffer_pool will also be used to sometimes store the hmac,
536 * so assure it is large enough
537 */
538 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
539 buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
540 32, 0);
541 ret = -ENOMEM;
542 if (!buffer_pool)
543 goto err;
544
545 ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
546 if (!ctx_pool)
547 goto err;
548
549 ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
550 "ixp_crypto:out", NULL);
551 if (ret)
552 goto err;
553 ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
554 "ixp_crypto:in", NULL);
555 if (ret) {
556 qmgr_release_queue(send_qid);
557 goto err;
558 }
559 qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
560 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
561
562 qmgr_enable_irq(recv_qid);
563 return 0;
564
565npe_error:
566 dev_err(dev, "%s not responding\n", npe_name(npe_c));
567 ret = -EIO;
568err:
569 dma_pool_destroy(ctx_pool);
570 dma_pool_destroy(buffer_pool);
571npe_release:
572 npe_release(npe_c);
573 return ret;
574}
575
576static void release_ixp_crypto(struct device *dev)
577{
578 qmgr_disable_irq(recv_qid);
579 tasklet_kill(&crypto_done_tasklet);
580
581 qmgr_release_queue(send_qid);
582 qmgr_release_queue(recv_qid);
583
584 dma_pool_destroy(ctx_pool);
585 dma_pool_destroy(buffer_pool);
586
587 npe_release(npe_c);
588
589 if (crypt_virt)
590 dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
591 crypt_virt, crypt_phys);
592}
593
594static void reset_sa_dir(struct ix_sa_dir *dir)
595{
596 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
597 dir->npe_ctx_idx = 0;
598 dir->npe_mode = 0;
599}
600
601static int init_sa_dir(struct ix_sa_dir *dir)
602{
603 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
604 if (!dir->npe_ctx)
605 return -ENOMEM;
606
607 reset_sa_dir(dir);
608 return 0;
609}
610
611static void free_sa_dir(struct ix_sa_dir *dir)
612{
613 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
614 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
615}
616
617static int init_tfm(struct crypto_tfm *tfm)
618{
619 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
620 int ret;
621
622 atomic_set(&ctx->configuring, 0);
623 ret = init_sa_dir(&ctx->encrypt);
624 if (ret)
625 return ret;
626 ret = init_sa_dir(&ctx->decrypt);
627 if (ret)
628 free_sa_dir(&ctx->encrypt);
629
630 return ret;
631}
632
633static int init_tfm_ablk(struct crypto_skcipher *tfm)
634{
635 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
636 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
637 const char *name = crypto_tfm_alg_name(ctfm);
638
639 ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
640 if (IS_ERR(ctx->fallback_tfm)) {
641 pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
642 name, PTR_ERR(ctx->fallback_tfm));
643 return PTR_ERR(ctx->fallback_tfm);
644 }
645
646 pr_info("Fallback for %s is %s\n",
647 crypto_tfm_alg_driver_name(&tfm->base),
648 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
649 );
650
651 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
652 return init_tfm(crypto_skcipher_tfm(tfm));
653}
654
655static int init_tfm_aead(struct crypto_aead *tfm)
656{
657 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
658 return init_tfm(crypto_aead_tfm(tfm));
659}
660
661static void exit_tfm(struct crypto_tfm *tfm)
662{
663 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
664
665 free_sa_dir(&ctx->encrypt);
666 free_sa_dir(&ctx->decrypt);
667}
668
669static void exit_tfm_ablk(struct crypto_skcipher *tfm)
670{
671 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
672 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
673
674 crypto_free_skcipher(ctx->fallback_tfm);
675 exit_tfm(crypto_skcipher_tfm(tfm));
676}
677
678static void exit_tfm_aead(struct crypto_aead *tfm)
679{
680 exit_tfm(crypto_aead_tfm(tfm));
681}
682
683static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
684 int init_len, u32 ctx_addr, const u8 *key,
685 int key_len)
686{
687 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
688 struct crypt_ctl *crypt;
689 struct buffer_desc *buf;
690 int i;
691 u8 *pad;
692 dma_addr_t pad_phys, buf_phys;
693
694 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
695 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
696 if (!pad)
697 return -ENOMEM;
698 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
699 if (!buf) {
700 dma_pool_free(ctx_pool, pad, pad_phys);
701 return -ENOMEM;
702 }
703 crypt = get_crypt_desc_emerg();
704 if (!crypt) {
705 dma_pool_free(ctx_pool, pad, pad_phys);
706 dma_pool_free(buffer_pool, buf, buf_phys);
707 return -EAGAIN;
708 }
709
710 memcpy(pad, key, key_len);
711 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
712 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
713 pad[i] ^= xpad;
714
715 crypt->data.tfm = tfm;
716 crypt->regist_ptr = pad;
717 crypt->regist_buf = buf;
718
719 crypt->auth_offs = 0;
720 crypt->auth_len = HMAC_PAD_BLOCKLEN;
721 crypt->crypto_ctx = ctx_addr;
722 crypt->src_buf = buf_phys;
723 crypt->icv_rev_aes = target;
724 crypt->mode = NPE_OP_HASH_GEN_ICV;
725 crypt->init_len = init_len;
726 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
727
728 buf->next = NULL;
729 buf->buf_len = HMAC_PAD_BLOCKLEN;
730 buf->pkt_len = 0;
731 buf->phys_addr = pad_phys;
732
733 atomic_inc(&ctx->configuring);
734 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
735 BUG_ON(qmgr_stat_overflow(send_qid));
736 return 0;
737}
738
739static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
740 const u8 *key, int key_len, unsigned int digest_len)
741{
742 u32 itarget, otarget, npe_ctx_addr;
743 unsigned char *cinfo;
744 int init_len, ret = 0;
745 u32 cfgword;
746 struct ix_sa_dir *dir;
747 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
748 const struct ix_hash_algo *algo;
749
750 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
751 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
752 algo = ix_hash(tfm);
753
754 /* write cfg word to cryptinfo */
755 cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
756#ifndef __ARMEB__
757 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
758#endif
759 *(__be32 *)cinfo = cpu_to_be32(cfgword);
760 cinfo += sizeof(cfgword);
761
762 /* write ICV to cryptinfo */
763 memcpy(cinfo, algo->icv, digest_len);
764 cinfo += digest_len;
765
766 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
767 + sizeof(algo->cfgword);
768 otarget = itarget + digest_len;
769 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
770 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
771
772 dir->npe_ctx_idx += init_len;
773 dir->npe_mode |= NPE_OP_HASH_ENABLE;
774
775 if (!encrypt)
776 dir->npe_mode |= NPE_OP_HASH_VERIFY;
777
778 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
779 init_len, npe_ctx_addr, key, key_len);
780 if (ret)
781 return ret;
782 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
783 init_len, npe_ctx_addr, key, key_len);
784}
785
786static int gen_rev_aes_key(struct crypto_tfm *tfm)
787{
788 struct crypt_ctl *crypt;
789 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
790 struct ix_sa_dir *dir = &ctx->decrypt;
791
792 crypt = get_crypt_desc_emerg();
793 if (!crypt)
794 return -EAGAIN;
795
796 *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
797
798 crypt->data.tfm = tfm;
799 crypt->crypt_offs = 0;
800 crypt->crypt_len = AES_BLOCK128;
801 crypt->src_buf = 0;
802 crypt->crypto_ctx = dir->npe_ctx_phys;
803 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
804 crypt->mode = NPE_OP_ENC_GEN_KEY;
805 crypt->init_len = dir->npe_ctx_idx;
806 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
807
808 atomic_inc(&ctx->configuring);
809 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
810 BUG_ON(qmgr_stat_overflow(send_qid));
811 return 0;
812}
813
814static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
815 int key_len)
816{
817 u8 *cinfo;
818 u32 cipher_cfg;
819 u32 keylen_cfg = 0;
820 struct ix_sa_dir *dir;
821 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
822 int err;
823
824 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
825 cinfo = dir->npe_ctx;
826
827 if (encrypt) {
828 cipher_cfg = cipher_cfg_enc(tfm);
829 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
830 } else {
831 cipher_cfg = cipher_cfg_dec(tfm);
832 }
833 if (cipher_cfg & MOD_AES) {
834 switch (key_len) {
835 case 16:
836 keylen_cfg = MOD_AES128;
837 break;
838 case 24:
839 keylen_cfg = MOD_AES192;
840 break;
841 case 32:
842 keylen_cfg = MOD_AES256;
843 break;
844 default:
845 return -EINVAL;
846 }
847 cipher_cfg |= keylen_cfg;
848 } else {
849 err = crypto_des_verify_key(tfm, key);
850 if (err)
851 return err;
852 }
853 /* write cfg word to cryptinfo */
854 *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
855 cinfo += sizeof(cipher_cfg);
856
857 /* write cipher key to cryptinfo */
858 memcpy(cinfo, key, key_len);
859 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
860 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
861 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
862 key_len = DES3_EDE_KEY_SIZE;
863 }
864 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
865 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
866 if ((cipher_cfg & MOD_AES) && !encrypt)
867 return gen_rev_aes_key(tfm);
868
869 return 0;
870}
871
872static struct buffer_desc *chainup_buffers(struct device *dev,
873 struct scatterlist *sg, unsigned int nbytes,
874 struct buffer_desc *buf, gfp_t flags,
875 enum dma_data_direction dir)
876{
877 for (; nbytes > 0; sg = sg_next(sg)) {
878 unsigned int len = min(nbytes, sg->length);
879 struct buffer_desc *next_buf;
880 dma_addr_t next_buf_phys;
881 void *ptr;
882
883 nbytes -= len;
884 ptr = sg_virt(sg);
885 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
886 if (!next_buf) {
887 buf = NULL;
888 break;
889 }
890 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
891 buf->next = next_buf;
892 buf->phys_next = next_buf_phys;
893 buf = next_buf;
894
895 buf->phys_addr = sg_dma_address(sg);
896 buf->buf_len = len;
897 buf->dir = dir;
898 }
899 buf->next = NULL;
900 buf->phys_next = 0;
901 return buf;
902}
903
904static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
905 unsigned int key_len)
906{
907 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
908 int ret;
909
910 init_completion(&ctx->completion);
911 atomic_inc(&ctx->configuring);
912
913 reset_sa_dir(&ctx->encrypt);
914 reset_sa_dir(&ctx->decrypt);
915
916 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
917 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
918
919 ret = setup_cipher(&tfm->base, 0, key, key_len);
920 if (ret)
921 goto out;
922 ret = setup_cipher(&tfm->base, 1, key, key_len);
923out:
924 if (!atomic_dec_and_test(&ctx->configuring))
925 wait_for_completion(&ctx->completion);
926 if (ret)
927 return ret;
928 crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
929 crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
930
931 return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
932}
933
934static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
935 unsigned int key_len)
936{
937 return verify_skcipher_des3_key(tfm, key) ?:
938 ablk_setkey(tfm, key, key_len);
939}
940
941static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
942 unsigned int key_len)
943{
944 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
945
946 /* the nonce is stored in bytes at end of key */
947 if (key_len < CTR_RFC3686_NONCE_SIZE)
948 return -EINVAL;
949
950 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
951 CTR_RFC3686_NONCE_SIZE);
952
953 key_len -= CTR_RFC3686_NONCE_SIZE;
954 return ablk_setkey(tfm, key, key_len);
955}
956
957static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
958{
959 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
960 struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
961 struct ablk_ctx *rctx = skcipher_request_ctx(areq);
962 int err;
963
964 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
965 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
966 areq->base.complete, areq->base.data);
967 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
968 areq->cryptlen, areq->iv);
969 if (encrypt)
970 err = crypto_skcipher_encrypt(&rctx->fallback_req);
971 else
972 err = crypto_skcipher_decrypt(&rctx->fallback_req);
973 return err;
974}
975
976static int ablk_perform(struct skcipher_request *req, int encrypt)
977{
978 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
979 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
980 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
981 struct ix_sa_dir *dir;
982 struct crypt_ctl *crypt;
983 unsigned int nbytes = req->cryptlen;
984 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
985 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
986 struct buffer_desc src_hook;
987 struct device *dev = &pdev->dev;
988 unsigned int offset;
989 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
990 GFP_KERNEL : GFP_ATOMIC;
991
992 if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
993 return ixp4xx_cipher_fallback(req, encrypt);
994
995 if (qmgr_stat_full(send_qid))
996 return -EAGAIN;
997 if (atomic_read(&ctx->configuring))
998 return -EAGAIN;
999
1000 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
1001 req_ctx->encrypt = encrypt;
1002
1003 crypt = get_crypt_desc();
1004 if (!crypt)
1005 return -ENOMEM;
1006
1007 crypt->data.ablk_req = req;
1008 crypt->crypto_ctx = dir->npe_ctx_phys;
1009 crypt->mode = dir->npe_mode;
1010 crypt->init_len = dir->npe_ctx_idx;
1011
1012 crypt->crypt_offs = 0;
1013 crypt->crypt_len = nbytes;
1014
1015 BUG_ON(ivsize && !req->iv);
1016 memcpy(crypt->iv, req->iv, ivsize);
1017 if (ivsize > 0 && !encrypt) {
1018 offset = req->cryptlen - ivsize;
1019 scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1020 }
1021 if (req->src != req->dst) {
1022 struct buffer_desc dst_hook;
1023
1024 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1025 /* This was never tested by Intel
1026 * for more than one dst buffer, I think. */
1027 req_ctx->dst = NULL;
1028 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1029 flags, DMA_FROM_DEVICE))
1030 goto free_buf_dest;
1031 src_direction = DMA_TO_DEVICE;
1032 req_ctx->dst = dst_hook.next;
1033 crypt->dst_buf = dst_hook.phys_next;
1034 } else {
1035 req_ctx->dst = NULL;
1036 }
1037 req_ctx->src = NULL;
1038 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1039 src_direction))
1040 goto free_buf_src;
1041
1042 req_ctx->src = src_hook.next;
1043 crypt->src_buf = src_hook.phys_next;
1044 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1045 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1046 BUG_ON(qmgr_stat_overflow(send_qid));
1047 return -EINPROGRESS;
1048
1049free_buf_src:
1050 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1051free_buf_dest:
1052 if (req->src != req->dst)
1053 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1054
1055 crypt->ctl_flags = CTL_FLAG_UNUSED;
1056 return -ENOMEM;
1057}
1058
1059static int ablk_encrypt(struct skcipher_request *req)
1060{
1061 return ablk_perform(req, 1);
1062}
1063
1064static int ablk_decrypt(struct skcipher_request *req)
1065{
1066 return ablk_perform(req, 0);
1067}
1068
1069static int ablk_rfc3686_crypt(struct skcipher_request *req)
1070{
1071 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1072 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1073 u8 iv[CTR_RFC3686_BLOCK_SIZE];
1074 u8 *info = req->iv;
1075 int ret;
1076
1077 /* set up counter block */
1078 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1079 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1080
1081 /* initialize counter portion of counter block */
1082 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1083 cpu_to_be32(1);
1084
1085 req->iv = iv;
1086 ret = ablk_perform(req, 1);
1087 req->iv = info;
1088 return ret;
1089}
1090
1091static int aead_perform(struct aead_request *req, int encrypt,
1092 int cryptoffset, int eff_cryptlen, u8 *iv)
1093{
1094 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1095 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1096 unsigned int ivsize = crypto_aead_ivsize(tfm);
1097 unsigned int authsize = crypto_aead_authsize(tfm);
1098 struct ix_sa_dir *dir;
1099 struct crypt_ctl *crypt;
1100 unsigned int cryptlen;
1101 struct buffer_desc *buf, src_hook;
1102 struct aead_ctx *req_ctx = aead_request_ctx(req);
1103 struct device *dev = &pdev->dev;
1104 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1105 GFP_KERNEL : GFP_ATOMIC;
1106 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1107 unsigned int lastlen;
1108
1109 if (qmgr_stat_full(send_qid))
1110 return -EAGAIN;
1111 if (atomic_read(&ctx->configuring))
1112 return -EAGAIN;
1113
1114 if (encrypt) {
1115 dir = &ctx->encrypt;
1116 cryptlen = req->cryptlen;
1117 } else {
1118 dir = &ctx->decrypt;
1119 /* req->cryptlen includes the authsize when decrypting */
1120 cryptlen = req->cryptlen - authsize;
1121 eff_cryptlen -= authsize;
1122 }
1123 crypt = get_crypt_desc();
1124 if (!crypt)
1125 return -ENOMEM;
1126
1127 crypt->data.aead_req = req;
1128 crypt->crypto_ctx = dir->npe_ctx_phys;
1129 crypt->mode = dir->npe_mode;
1130 crypt->init_len = dir->npe_ctx_idx;
1131
1132 crypt->crypt_offs = cryptoffset;
1133 crypt->crypt_len = eff_cryptlen;
1134
1135 crypt->auth_offs = 0;
1136 crypt->auth_len = req->assoclen + cryptlen;
1137 BUG_ON(ivsize && !req->iv);
1138 memcpy(crypt->iv, req->iv, ivsize);
1139
1140 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1141 &src_hook, flags, src_direction);
1142 req_ctx->src = src_hook.next;
1143 crypt->src_buf = src_hook.phys_next;
1144 if (!buf)
1145 goto free_buf_src;
1146
1147 lastlen = buf->buf_len;
1148 if (lastlen >= authsize)
1149 crypt->icv_rev_aes = buf->phys_addr +
1150 buf->buf_len - authsize;
1151
1152 req_ctx->dst = NULL;
1153
1154 if (req->src != req->dst) {
1155 struct buffer_desc dst_hook;
1156
1157 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1158 src_direction = DMA_TO_DEVICE;
1159
1160 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1161 &dst_hook, flags, DMA_FROM_DEVICE);
1162 req_ctx->dst = dst_hook.next;
1163 crypt->dst_buf = dst_hook.phys_next;
1164
1165 if (!buf)
1166 goto free_buf_dst;
1167
1168 if (encrypt) {
1169 lastlen = buf->buf_len;
1170 if (lastlen >= authsize)
1171 crypt->icv_rev_aes = buf->phys_addr +
1172 buf->buf_len - authsize;
1173 }
1174 }
1175
1176 if (unlikely(lastlen < authsize)) {
1177 dma_addr_t dma;
1178 /* The 12 hmac bytes are scattered,
1179 * we need to copy them into a safe buffer */
1180 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
1181 if (unlikely(!req_ctx->hmac_virt))
1182 goto free_buf_dst;
1183 crypt->icv_rev_aes = dma;
1184 if (!encrypt) {
1185 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1186 req->src, cryptlen, authsize, 0);
1187 }
1188 req_ctx->encrypt = encrypt;
1189 } else {
1190 req_ctx->hmac_virt = NULL;
1191 }
1192
1193 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1194 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1195 BUG_ON(qmgr_stat_overflow(send_qid));
1196 return -EINPROGRESS;
1197
1198free_buf_dst:
1199 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1200free_buf_src:
1201 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1202 crypt->ctl_flags = CTL_FLAG_UNUSED;
1203 return -ENOMEM;
1204}
1205
1206static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1207{
1208 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1209 unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1210 int ret;
1211
1212 if (!ctx->enckey_len && !ctx->authkey_len)
1213 return 0;
1214 init_completion(&ctx->completion);
1215 atomic_inc(&ctx->configuring);
1216
1217 reset_sa_dir(&ctx->encrypt);
1218 reset_sa_dir(&ctx->decrypt);
1219
1220 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1221 if (ret)
1222 goto out;
1223 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1224 if (ret)
1225 goto out;
1226 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1227 ctx->authkey_len, digest_len);
1228 if (ret)
1229 goto out;
1230 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1231 ctx->authkey_len, digest_len);
1232out:
1233 if (!atomic_dec_and_test(&ctx->configuring))
1234 wait_for_completion(&ctx->completion);
1235 return ret;
1236}
1237
1238static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1239{
1240 int max = crypto_aead_maxauthsize(tfm) >> 2;
1241
1242 if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1243 return -EINVAL;
1244 return aead_setup(tfm, authsize);
1245}
1246
1247static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1248 unsigned int keylen)
1249{
1250 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1251 struct crypto_authenc_keys keys;
1252
1253 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1254 goto badkey;
1255
1256 if (keys.authkeylen > sizeof(ctx->authkey))
1257 goto badkey;
1258
1259 if (keys.enckeylen > sizeof(ctx->enckey))
1260 goto badkey;
1261
1262 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1263 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1264 ctx->authkey_len = keys.authkeylen;
1265 ctx->enckey_len = keys.enckeylen;
1266
1267 memzero_explicit(&keys, sizeof(keys));
1268 return aead_setup(tfm, crypto_aead_authsize(tfm));
1269badkey:
1270 memzero_explicit(&keys, sizeof(keys));
1271 return -EINVAL;
1272}
1273
1274static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1275 unsigned int keylen)
1276{
1277 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1278 struct crypto_authenc_keys keys;
1279 int err;
1280
1281 err = crypto_authenc_extractkeys(&keys, key, keylen);
1282 if (unlikely(err))
1283 goto badkey;
1284
1285 err = -EINVAL;
1286 if (keys.authkeylen > sizeof(ctx->authkey))
1287 goto badkey;
1288
1289 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1290 if (err)
1291 goto badkey;
1292
1293 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1294 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1295 ctx->authkey_len = keys.authkeylen;
1296 ctx->enckey_len = keys.enckeylen;
1297
1298 memzero_explicit(&keys, sizeof(keys));
1299 return aead_setup(tfm, crypto_aead_authsize(tfm));
1300badkey:
1301 memzero_explicit(&keys, sizeof(keys));
1302 return err;
1303}
1304
1305static int aead_encrypt(struct aead_request *req)
1306{
1307 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1308}
1309
1310static int aead_decrypt(struct aead_request *req)
1311{
1312 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1313}
1314
1315static struct ixp_alg ixp4xx_algos[] = {
1316{
1317 .crypto = {
1318 .base.cra_name = "cbc(des)",
1319 .base.cra_blocksize = DES_BLOCK_SIZE,
1320
1321 .min_keysize = DES_KEY_SIZE,
1322 .max_keysize = DES_KEY_SIZE,
1323 .ivsize = DES_BLOCK_SIZE,
1324 },
1325 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1326 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1327
1328}, {
1329 .crypto = {
1330 .base.cra_name = "ecb(des)",
1331 .base.cra_blocksize = DES_BLOCK_SIZE,
1332 .min_keysize = DES_KEY_SIZE,
1333 .max_keysize = DES_KEY_SIZE,
1334 },
1335 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1336 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1337}, {
1338 .crypto = {
1339 .base.cra_name = "cbc(des3_ede)",
1340 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1341
1342 .min_keysize = DES3_EDE_KEY_SIZE,
1343 .max_keysize = DES3_EDE_KEY_SIZE,
1344 .ivsize = DES3_EDE_BLOCK_SIZE,
1345 .setkey = ablk_des3_setkey,
1346 },
1347 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1348 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1349}, {
1350 .crypto = {
1351 .base.cra_name = "ecb(des3_ede)",
1352 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1353
1354 .min_keysize = DES3_EDE_KEY_SIZE,
1355 .max_keysize = DES3_EDE_KEY_SIZE,
1356 .setkey = ablk_des3_setkey,
1357 },
1358 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1359 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1360}, {
1361 .crypto = {
1362 .base.cra_name = "cbc(aes)",
1363 .base.cra_blocksize = AES_BLOCK_SIZE,
1364
1365 .min_keysize = AES_MIN_KEY_SIZE,
1366 .max_keysize = AES_MAX_KEY_SIZE,
1367 .ivsize = AES_BLOCK_SIZE,
1368 },
1369 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1370 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1371}, {
1372 .crypto = {
1373 .base.cra_name = "ecb(aes)",
1374 .base.cra_blocksize = AES_BLOCK_SIZE,
1375
1376 .min_keysize = AES_MIN_KEY_SIZE,
1377 .max_keysize = AES_MAX_KEY_SIZE,
1378 },
1379 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1380 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1381}, {
1382 .crypto = {
1383 .base.cra_name = "ctr(aes)",
1384 .base.cra_blocksize = 1,
1385
1386 .min_keysize = AES_MIN_KEY_SIZE,
1387 .max_keysize = AES_MAX_KEY_SIZE,
1388 .ivsize = AES_BLOCK_SIZE,
1389 },
1390 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1391 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1392}, {
1393 .crypto = {
1394 .base.cra_name = "rfc3686(ctr(aes))",
1395 .base.cra_blocksize = 1,
1396
1397 .min_keysize = AES_MIN_KEY_SIZE,
1398 .max_keysize = AES_MAX_KEY_SIZE,
1399 .ivsize = AES_BLOCK_SIZE,
1400 .setkey = ablk_rfc3686_setkey,
1401 .encrypt = ablk_rfc3686_crypt,
1402 .decrypt = ablk_rfc3686_crypt,
1403 },
1404 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1405 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1406} };
1407
1408static struct ixp_aead_alg ixp4xx_aeads[] = {
1409{
1410 .crypto = {
1411 .base = {
1412 .cra_name = "authenc(hmac(md5),cbc(des))",
1413 .cra_blocksize = DES_BLOCK_SIZE,
1414 },
1415 .ivsize = DES_BLOCK_SIZE,
1416 .maxauthsize = MD5_DIGEST_SIZE,
1417 },
1418 .hash = &hash_alg_md5,
1419 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1420 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1421}, {
1422 .crypto = {
1423 .base = {
1424 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1425 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1426 },
1427 .ivsize = DES3_EDE_BLOCK_SIZE,
1428 .maxauthsize = MD5_DIGEST_SIZE,
1429 .setkey = des3_aead_setkey,
1430 },
1431 .hash = &hash_alg_md5,
1432 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1433 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1434}, {
1435 .crypto = {
1436 .base = {
1437 .cra_name = "authenc(hmac(sha1),cbc(des))",
1438 .cra_blocksize = DES_BLOCK_SIZE,
1439 },
1440 .ivsize = DES_BLOCK_SIZE,
1441 .maxauthsize = SHA1_DIGEST_SIZE,
1442 },
1443 .hash = &hash_alg_sha1,
1444 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1445 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1446}, {
1447 .crypto = {
1448 .base = {
1449 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1450 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1451 },
1452 .ivsize = DES3_EDE_BLOCK_SIZE,
1453 .maxauthsize = SHA1_DIGEST_SIZE,
1454 .setkey = des3_aead_setkey,
1455 },
1456 .hash = &hash_alg_sha1,
1457 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1458 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1459}, {
1460 .crypto = {
1461 .base = {
1462 .cra_name = "authenc(hmac(md5),cbc(aes))",
1463 .cra_blocksize = AES_BLOCK_SIZE,
1464 },
1465 .ivsize = AES_BLOCK_SIZE,
1466 .maxauthsize = MD5_DIGEST_SIZE,
1467 },
1468 .hash = &hash_alg_md5,
1469 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1470 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1471}, {
1472 .crypto = {
1473 .base = {
1474 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1475 .cra_blocksize = AES_BLOCK_SIZE,
1476 },
1477 .ivsize = AES_BLOCK_SIZE,
1478 .maxauthsize = SHA1_DIGEST_SIZE,
1479 },
1480 .hash = &hash_alg_sha1,
1481 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1482 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1483} };
1484
1485#define IXP_POSTFIX "-ixp4xx"
1486
1487static int ixp_crypto_probe(struct platform_device *_pdev)
1488{
1489 struct device *dev = &_pdev->dev;
1490 int num = ARRAY_SIZE(ixp4xx_algos);
1491 int i, err;
1492
1493 pdev = _pdev;
1494
1495 err = init_ixp_crypto(dev);
1496 if (err)
1497 return err;
1498
1499 for (i = 0; i < num; i++) {
1500 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1501
1502 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1503 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1504 CRYPTO_MAX_ALG_NAME)
1505 continue;
1506 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1507 continue;
1508
1509 /* block ciphers */
1510 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1511 CRYPTO_ALG_ASYNC |
1512 CRYPTO_ALG_ALLOCATES_MEMORY |
1513 CRYPTO_ALG_NEED_FALLBACK;
1514 if (!cra->setkey)
1515 cra->setkey = ablk_setkey;
1516 if (!cra->encrypt)
1517 cra->encrypt = ablk_encrypt;
1518 if (!cra->decrypt)
1519 cra->decrypt = ablk_decrypt;
1520 cra->init = init_tfm_ablk;
1521 cra->exit = exit_tfm_ablk;
1522
1523 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1524 cra->base.cra_module = THIS_MODULE;
1525 cra->base.cra_alignmask = 3;
1526 cra->base.cra_priority = 300;
1527 if (crypto_register_skcipher(cra))
1528 dev_err(&pdev->dev, "Failed to register '%s'\n",
1529 cra->base.cra_name);
1530 else
1531 ixp4xx_algos[i].registered = 1;
1532 }
1533
1534 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1535 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1536
1537 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1538 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1539 CRYPTO_MAX_ALG_NAME)
1540 continue;
1541 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1542 continue;
1543
1544 /* authenc */
1545 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1546 CRYPTO_ALG_ASYNC |
1547 CRYPTO_ALG_ALLOCATES_MEMORY;
1548 cra->setkey = cra->setkey ?: aead_setkey;
1549 cra->setauthsize = aead_setauthsize;
1550 cra->encrypt = aead_encrypt;
1551 cra->decrypt = aead_decrypt;
1552 cra->init = init_tfm_aead;
1553 cra->exit = exit_tfm_aead;
1554
1555 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1556 cra->base.cra_module = THIS_MODULE;
1557 cra->base.cra_alignmask = 3;
1558 cra->base.cra_priority = 300;
1559
1560 if (crypto_register_aead(cra))
1561 dev_err(&pdev->dev, "Failed to register '%s'\n",
1562 cra->base.cra_driver_name);
1563 else
1564 ixp4xx_aeads[i].registered = 1;
1565 }
1566 return 0;
1567}
1568
1569static void ixp_crypto_remove(struct platform_device *pdev)
1570{
1571 int num = ARRAY_SIZE(ixp4xx_algos);
1572 int i;
1573
1574 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1575 if (ixp4xx_aeads[i].registered)
1576 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1577 }
1578
1579 for (i = 0; i < num; i++) {
1580 if (ixp4xx_algos[i].registered)
1581 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1582 }
1583 release_ixp_crypto(&pdev->dev);
1584}
1585static const struct of_device_id ixp4xx_crypto_of_match[] = {
1586 {
1587 .compatible = "intel,ixp4xx-crypto",
1588 },
1589 {},
1590};
1591
1592static struct platform_driver ixp_crypto_driver = {
1593 .probe = ixp_crypto_probe,
1594 .remove = ixp_crypto_remove,
1595 .driver = {
1596 .name = "ixp4xx_crypto",
1597 .of_match_table = ixp4xx_crypto_of_match,
1598 },
1599};
1600module_platform_driver(ixp_crypto_driver);
1601
1602MODULE_LICENSE("GPL");
1603MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1604MODULE_DESCRIPTION("IXP4xx hardware crypto");
1605