Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Cryptographic API.
4 *
5 * Support for ATMEL AES HW acceleration.
6 *
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8 * Author: Nicolas Royer <nicolas@eukrea.com>
9 *
10 * Some ideas are from omap-aes.c driver.
11 */
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/hw_random.h>
21#include <linux/platform_device.h>
22
23#include <linux/device.h>
24#include <linux/dmaengine.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
31#include <linux/mod_devicetable.h>
32#include <linux/delay.h>
33#include <linux/crypto.h>
34#include <crypto/scatterwalk.h>
35#include <crypto/algapi.h>
36#include <crypto/aes.h>
37#include <crypto/gcm.h>
38#include <crypto/xts.h>
39#include <crypto/internal/aead.h>
40#include <crypto/internal/skcipher.h>
41#include "atmel-aes-regs.h"
42#include "atmel-authenc.h"
43
44#define ATMEL_AES_PRIORITY 300
45
46#define ATMEL_AES_BUFFER_ORDER 2
47#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
48
49#define SIZE_IN_WORDS(x) ((x) >> 2)
50
51/* AES flags */
52/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
53#define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
54#define AES_FLAGS_GTAGEN AES_MR_GTAGEN
55#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
56#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
57#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
58#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
59#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
60#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
61
62#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
63 AES_FLAGS_ENCRYPT | \
64 AES_FLAGS_GTAGEN)
65
66#define AES_FLAGS_BUSY BIT(3)
67#define AES_FLAGS_DUMP_REG BIT(4)
68#define AES_FLAGS_OWN_SHA BIT(5)
69
70#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
71
72#define ATMEL_AES_QUEUE_LENGTH 50
73
74#define ATMEL_AES_DMA_THRESHOLD 256
75
76
77struct atmel_aes_caps {
78 bool has_dualbuff;
79 bool has_gcm;
80 bool has_xts;
81 bool has_authenc;
82 u32 max_burst_size;
83};
84
85struct atmel_aes_dev;
86
87
88typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
89
90
91struct atmel_aes_base_ctx {
92 struct atmel_aes_dev *dd;
93 atmel_aes_fn_t start;
94 int keylen;
95 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
96 u16 block_size;
97 bool is_aead;
98};
99
100struct atmel_aes_ctx {
101 struct atmel_aes_base_ctx base;
102};
103
104struct atmel_aes_ctr_ctx {
105 struct atmel_aes_base_ctx base;
106
107 __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
108 size_t offset;
109 struct scatterlist src[2];
110 struct scatterlist dst[2];
111 u32 blocks;
112};
113
114struct atmel_aes_gcm_ctx {
115 struct atmel_aes_base_ctx base;
116
117 struct scatterlist src[2];
118 struct scatterlist dst[2];
119
120 __be32 j0[AES_BLOCK_SIZE / sizeof(u32)];
121 u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
122 __be32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
123 size_t textlen;
124
125 const __be32 *ghash_in;
126 __be32 *ghash_out;
127 atmel_aes_fn_t ghash_resume;
128};
129
130struct atmel_aes_xts_ctx {
131 struct atmel_aes_base_ctx base;
132
133 u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
134 struct crypto_skcipher *fallback_tfm;
135};
136
137#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
138struct atmel_aes_authenc_ctx {
139 struct atmel_aes_base_ctx base;
140 struct atmel_sha_authenc_ctx *auth;
141};
142#endif
143
144struct atmel_aes_reqctx {
145 unsigned long mode;
146 u8 lastc[AES_BLOCK_SIZE];
147 struct skcipher_request fallback_req;
148};
149
150#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
151struct atmel_aes_authenc_reqctx {
152 struct atmel_aes_reqctx base;
153
154 struct scatterlist src[2];
155 struct scatterlist dst[2];
156 size_t textlen;
157 u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)];
158
159 /* auth_req MUST be place last. */
160 struct ahash_request auth_req;
161};
162#endif
163
164struct atmel_aes_dma {
165 struct dma_chan *chan;
166 struct scatterlist *sg;
167 int nents;
168 unsigned int remainder;
169 unsigned int sg_len;
170};
171
172struct atmel_aes_dev {
173 struct list_head list;
174 unsigned long phys_base;
175 void __iomem *io_base;
176
177 struct crypto_async_request *areq;
178 struct atmel_aes_base_ctx *ctx;
179
180 bool is_async;
181 atmel_aes_fn_t resume;
182 atmel_aes_fn_t cpu_transfer_complete;
183
184 struct device *dev;
185 struct clk *iclk;
186 int irq;
187
188 unsigned long flags;
189
190 spinlock_t lock;
191 struct crypto_queue queue;
192
193 struct tasklet_struct done_task;
194 struct tasklet_struct queue_task;
195
196 size_t total;
197 size_t datalen;
198 u32 *data;
199
200 struct atmel_aes_dma src;
201 struct atmel_aes_dma dst;
202
203 size_t buflen;
204 void *buf;
205 struct scatterlist aligned_sg;
206 struct scatterlist *real_dst;
207
208 struct atmel_aes_caps caps;
209
210 u32 hw_version;
211};
212
213struct atmel_aes_drv {
214 struct list_head dev_list;
215 spinlock_t lock;
216};
217
218static struct atmel_aes_drv atmel_aes = {
219 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
220 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
221};
222
223#ifdef VERBOSE_DEBUG
224static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
225{
226 switch (offset) {
227 case AES_CR:
228 return "CR";
229
230 case AES_MR:
231 return "MR";
232
233 case AES_ISR:
234 return "ISR";
235
236 case AES_IMR:
237 return "IMR";
238
239 case AES_IER:
240 return "IER";
241
242 case AES_IDR:
243 return "IDR";
244
245 case AES_KEYWR(0):
246 case AES_KEYWR(1):
247 case AES_KEYWR(2):
248 case AES_KEYWR(3):
249 case AES_KEYWR(4):
250 case AES_KEYWR(5):
251 case AES_KEYWR(6):
252 case AES_KEYWR(7):
253 snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
254 break;
255
256 case AES_IDATAR(0):
257 case AES_IDATAR(1):
258 case AES_IDATAR(2):
259 case AES_IDATAR(3):
260 snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
261 break;
262
263 case AES_ODATAR(0):
264 case AES_ODATAR(1):
265 case AES_ODATAR(2):
266 case AES_ODATAR(3):
267 snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
268 break;
269
270 case AES_IVR(0):
271 case AES_IVR(1):
272 case AES_IVR(2):
273 case AES_IVR(3):
274 snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
275 break;
276
277 case AES_AADLENR:
278 return "AADLENR";
279
280 case AES_CLENR:
281 return "CLENR";
282
283 case AES_GHASHR(0):
284 case AES_GHASHR(1):
285 case AES_GHASHR(2):
286 case AES_GHASHR(3):
287 snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
288 break;
289
290 case AES_TAGR(0):
291 case AES_TAGR(1):
292 case AES_TAGR(2):
293 case AES_TAGR(3):
294 snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
295 break;
296
297 case AES_CTRR:
298 return "CTRR";
299
300 case AES_GCMHR(0):
301 case AES_GCMHR(1):
302 case AES_GCMHR(2):
303 case AES_GCMHR(3):
304 snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
305 break;
306
307 case AES_EMR:
308 return "EMR";
309
310 case AES_TWR(0):
311 case AES_TWR(1):
312 case AES_TWR(2):
313 case AES_TWR(3):
314 snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
315 break;
316
317 case AES_ALPHAR(0):
318 case AES_ALPHAR(1):
319 case AES_ALPHAR(2):
320 case AES_ALPHAR(3):
321 snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
322 break;
323
324 default:
325 snprintf(tmp, sz, "0x%02x", offset);
326 break;
327 }
328
329 return tmp;
330}
331#endif /* VERBOSE_DEBUG */
332
333/* Shared functions */
334
335static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
336{
337 u32 value = readl_relaxed(dd->io_base + offset);
338
339#ifdef VERBOSE_DEBUG
340 if (dd->flags & AES_FLAGS_DUMP_REG) {
341 char tmp[16];
342
343 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
344 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
345 }
346#endif /* VERBOSE_DEBUG */
347
348 return value;
349}
350
351static inline void atmel_aes_write(struct atmel_aes_dev *dd,
352 u32 offset, u32 value)
353{
354#ifdef VERBOSE_DEBUG
355 if (dd->flags & AES_FLAGS_DUMP_REG) {
356 char tmp[16];
357
358 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
359 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
360 }
361#endif /* VERBOSE_DEBUG */
362
363 writel_relaxed(value, dd->io_base + offset);
364}
365
366static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
367 u32 *value, int count)
368{
369 for (; count--; value++, offset += 4)
370 *value = atmel_aes_read(dd, offset);
371}
372
373static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
374 const u32 *value, int count)
375{
376 for (; count--; value++, offset += 4)
377 atmel_aes_write(dd, offset, *value);
378}
379
380static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
381 void *value)
382{
383 atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
384}
385
386static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
387 const void *value)
388{
389 atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
390}
391
392static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
393 atmel_aes_fn_t resume)
394{
395 u32 isr = atmel_aes_read(dd, AES_ISR);
396
397 if (unlikely(isr & AES_INT_DATARDY))
398 return resume(dd);
399
400 dd->resume = resume;
401 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
402 return -EINPROGRESS;
403}
404
405static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
406{
407 len &= block_size - 1;
408 return len ? block_size - len : 0;
409}
410
411static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
412{
413 struct atmel_aes_dev *aes_dd;
414
415 spin_lock_bh(&atmel_aes.lock);
416 /* One AES IP per SoC. */
417 aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
418 struct atmel_aes_dev, list);
419 spin_unlock_bh(&atmel_aes.lock);
420 return aes_dd;
421}
422
423static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
424{
425 int err;
426
427 err = clk_enable(dd->iclk);
428 if (err)
429 return err;
430
431 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
432 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
433
434 return 0;
435}
436
437static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
438{
439 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
440}
441
442static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
443{
444 int err;
445
446 err = atmel_aes_hw_init(dd);
447 if (err)
448 return err;
449
450 dd->hw_version = atmel_aes_get_version(dd);
451
452 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
453
454 clk_disable(dd->iclk);
455 return 0;
456}
457
458static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
459 const struct atmel_aes_reqctx *rctx)
460{
461 /* Clear all but persistent flags and set request flags. */
462 dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
463}
464
465static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
466{
467 return (dd->flags & AES_FLAGS_ENCRYPT);
468}
469
470#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
471static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
472#endif
473
474static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
475{
476 struct skcipher_request *req = skcipher_request_cast(dd->areq);
477 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
478 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
479 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
480
481 if (req->cryptlen < ivsize)
482 return;
483
484 if (rctx->mode & AES_FLAGS_ENCRYPT)
485 scatterwalk_map_and_copy(req->iv, req->dst,
486 req->cryptlen - ivsize, ivsize, 0);
487 else
488 memcpy(req->iv, rctx->lastc, ivsize);
489}
490
491static inline struct atmel_aes_ctr_ctx *
492atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
493{
494 return container_of(ctx, struct atmel_aes_ctr_ctx, base);
495}
496
497static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
498{
499 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
500 struct skcipher_request *req = skcipher_request_cast(dd->areq);
501 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
502 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
503 int i;
504
505 /*
506 * The CTR transfer works in fragments of data of maximum 1 MByte
507 * because of the 16 bit CTR counter embedded in the IP. When reaching
508 * here, ctx->blocks contains the number of blocks of the last fragment
509 * processed, there is no need to explicit cast it to u16.
510 */
511 for (i = 0; i < ctx->blocks; i++)
512 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
513
514 memcpy(req->iv, ctx->iv, ivsize);
515}
516
517static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
518{
519 struct skcipher_request *req = skcipher_request_cast(dd->areq);
520 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
521
522#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
523 if (dd->ctx->is_aead)
524 atmel_aes_authenc_complete(dd, err);
525#endif
526
527 clk_disable(dd->iclk);
528 dd->flags &= ~AES_FLAGS_BUSY;
529
530 if (!err && !dd->ctx->is_aead &&
531 (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
532 if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
533 atmel_aes_set_iv_as_last_ciphertext_block(dd);
534 else
535 atmel_aes_ctr_update_req_iv(dd);
536 }
537
538 if (dd->is_async)
539 crypto_request_complete(dd->areq, err);
540
541 tasklet_schedule(&dd->queue_task);
542
543 return err;
544}
545
546static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
547 const __be32 *iv, const u32 *key, int keylen)
548{
549 u32 valmr = 0;
550
551 /* MR register must be set before IV registers */
552 if (keylen == AES_KEYSIZE_128)
553 valmr |= AES_MR_KEYSIZE_128;
554 else if (keylen == AES_KEYSIZE_192)
555 valmr |= AES_MR_KEYSIZE_192;
556 else
557 valmr |= AES_MR_KEYSIZE_256;
558
559 valmr |= dd->flags & AES_FLAGS_MODE_MASK;
560
561 if (use_dma) {
562 valmr |= AES_MR_SMOD_IDATAR0;
563 if (dd->caps.has_dualbuff)
564 valmr |= AES_MR_DUALBUFF;
565 } else {
566 valmr |= AES_MR_SMOD_AUTO;
567 }
568
569 atmel_aes_write(dd, AES_MR, valmr);
570
571 atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
572
573 if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
574 atmel_aes_write_block(dd, AES_IVR(0), iv);
575}
576
577static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
578 const __be32 *iv)
579
580{
581 atmel_aes_write_ctrl_key(dd, use_dma, iv,
582 dd->ctx->key, dd->ctx->keylen);
583}
584
585/* CPU transfer */
586
587static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
588{
589 int err = 0;
590 u32 isr;
591
592 for (;;) {
593 atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
594 dd->data += 4;
595 dd->datalen -= AES_BLOCK_SIZE;
596
597 if (dd->datalen < AES_BLOCK_SIZE)
598 break;
599
600 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
601
602 isr = atmel_aes_read(dd, AES_ISR);
603 if (!(isr & AES_INT_DATARDY)) {
604 dd->resume = atmel_aes_cpu_transfer;
605 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
606 return -EINPROGRESS;
607 }
608 }
609
610 if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
611 dd->buf, dd->total))
612 err = -EINVAL;
613
614 if (err)
615 return atmel_aes_complete(dd, err);
616
617 return dd->cpu_transfer_complete(dd);
618}
619
620static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
621 struct scatterlist *src,
622 struct scatterlist *dst,
623 size_t len,
624 atmel_aes_fn_t resume)
625{
626 size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
627
628 if (unlikely(len == 0))
629 return -EINVAL;
630
631 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
632
633 dd->total = len;
634 dd->real_dst = dst;
635 dd->cpu_transfer_complete = resume;
636 dd->datalen = len + padlen;
637 dd->data = (u32 *)dd->buf;
638 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
639 return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
640}
641
642
643/* DMA transfer */
644
645static void atmel_aes_dma_callback(void *data);
646
647static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
648 struct scatterlist *sg,
649 size_t len,
650 struct atmel_aes_dma *dma)
651{
652 int nents;
653
654 if (!IS_ALIGNED(len, dd->ctx->block_size))
655 return false;
656
657 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
658 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
659 return false;
660
661 if (len <= sg->length) {
662 if (!IS_ALIGNED(len, dd->ctx->block_size))
663 return false;
664
665 dma->nents = nents+1;
666 dma->remainder = sg->length - len;
667 sg->length = len;
668 return true;
669 }
670
671 if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
672 return false;
673
674 len -= sg->length;
675 }
676
677 return false;
678}
679
680static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
681{
682 struct scatterlist *sg = dma->sg;
683 int nents = dma->nents;
684
685 if (!dma->remainder)
686 return;
687
688 while (--nents > 0 && sg)
689 sg = sg_next(sg);
690
691 if (!sg)
692 return;
693
694 sg->length += dma->remainder;
695}
696
697static int atmel_aes_map(struct atmel_aes_dev *dd,
698 struct scatterlist *src,
699 struct scatterlist *dst,
700 size_t len)
701{
702 bool src_aligned, dst_aligned;
703 size_t padlen;
704
705 dd->total = len;
706 dd->src.sg = src;
707 dd->dst.sg = dst;
708 dd->real_dst = dst;
709
710 src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
711 if (src == dst)
712 dst_aligned = src_aligned;
713 else
714 dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
715 if (!src_aligned || !dst_aligned) {
716 padlen = atmel_aes_padlen(len, dd->ctx->block_size);
717
718 if (dd->buflen < len + padlen)
719 return -ENOMEM;
720
721 if (!src_aligned) {
722 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
723 dd->src.sg = &dd->aligned_sg;
724 dd->src.nents = 1;
725 dd->src.remainder = 0;
726 }
727
728 if (!dst_aligned) {
729 dd->dst.sg = &dd->aligned_sg;
730 dd->dst.nents = 1;
731 dd->dst.remainder = 0;
732 }
733
734 sg_init_table(&dd->aligned_sg, 1);
735 sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
736 }
737
738 if (dd->src.sg == dd->dst.sg) {
739 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
740 DMA_BIDIRECTIONAL);
741 dd->dst.sg_len = dd->src.sg_len;
742 if (!dd->src.sg_len)
743 return -EFAULT;
744 } else {
745 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
746 DMA_TO_DEVICE);
747 if (!dd->src.sg_len)
748 return -EFAULT;
749
750 dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
751 DMA_FROM_DEVICE);
752 if (!dd->dst.sg_len) {
753 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
754 DMA_TO_DEVICE);
755 return -EFAULT;
756 }
757 }
758
759 return 0;
760}
761
762static void atmel_aes_unmap(struct atmel_aes_dev *dd)
763{
764 if (dd->src.sg == dd->dst.sg) {
765 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
766 DMA_BIDIRECTIONAL);
767
768 if (dd->src.sg != &dd->aligned_sg)
769 atmel_aes_restore_sg(&dd->src);
770 } else {
771 dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
772 DMA_FROM_DEVICE);
773
774 if (dd->dst.sg != &dd->aligned_sg)
775 atmel_aes_restore_sg(&dd->dst);
776
777 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
778 DMA_TO_DEVICE);
779
780 if (dd->src.sg != &dd->aligned_sg)
781 atmel_aes_restore_sg(&dd->src);
782 }
783
784 if (dd->dst.sg == &dd->aligned_sg)
785 sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
786 dd->buf, dd->total);
787}
788
789static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
790 enum dma_slave_buswidth addr_width,
791 enum dma_transfer_direction dir,
792 u32 maxburst)
793{
794 struct dma_async_tx_descriptor *desc;
795 struct dma_slave_config config;
796 dma_async_tx_callback callback;
797 struct atmel_aes_dma *dma;
798 int err;
799
800 memset(&config, 0, sizeof(config));
801 config.src_addr_width = addr_width;
802 config.dst_addr_width = addr_width;
803 config.src_maxburst = maxburst;
804 config.dst_maxburst = maxburst;
805
806 switch (dir) {
807 case DMA_MEM_TO_DEV:
808 dma = &dd->src;
809 callback = NULL;
810 config.dst_addr = dd->phys_base + AES_IDATAR(0);
811 break;
812
813 case DMA_DEV_TO_MEM:
814 dma = &dd->dst;
815 callback = atmel_aes_dma_callback;
816 config.src_addr = dd->phys_base + AES_ODATAR(0);
817 break;
818
819 default:
820 return -EINVAL;
821 }
822
823 err = dmaengine_slave_config(dma->chan, &config);
824 if (err)
825 return err;
826
827 desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
828 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
829 if (!desc)
830 return -ENOMEM;
831
832 desc->callback = callback;
833 desc->callback_param = dd;
834 dmaengine_submit(desc);
835 dma_async_issue_pending(dma->chan);
836
837 return 0;
838}
839
840static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
841 struct scatterlist *src,
842 struct scatterlist *dst,
843 size_t len,
844 atmel_aes_fn_t resume)
845{
846 enum dma_slave_buswidth addr_width;
847 u32 maxburst;
848 int err;
849
850 switch (dd->ctx->block_size) {
851 case AES_BLOCK_SIZE:
852 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
853 maxburst = dd->caps.max_burst_size;
854 break;
855
856 default:
857 err = -EINVAL;
858 goto exit;
859 }
860
861 err = atmel_aes_map(dd, src, dst, len);
862 if (err)
863 goto exit;
864
865 dd->resume = resume;
866
867 /* Set output DMA transfer first */
868 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
869 maxburst);
870 if (err)
871 goto unmap;
872
873 /* Then set input DMA transfer */
874 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
875 maxburst);
876 if (err)
877 goto output_transfer_stop;
878
879 return -EINPROGRESS;
880
881output_transfer_stop:
882 dmaengine_terminate_sync(dd->dst.chan);
883unmap:
884 atmel_aes_unmap(dd);
885exit:
886 return atmel_aes_complete(dd, err);
887}
888
889static void atmel_aes_dma_callback(void *data)
890{
891 struct atmel_aes_dev *dd = data;
892
893 atmel_aes_unmap(dd);
894 dd->is_async = true;
895 (void)dd->resume(dd);
896}
897
898static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
899 struct crypto_async_request *new_areq)
900{
901 struct crypto_async_request *areq, *backlog;
902 struct atmel_aes_base_ctx *ctx;
903 unsigned long flags;
904 bool start_async;
905 int err, ret = 0;
906
907 spin_lock_irqsave(&dd->lock, flags);
908 if (new_areq)
909 ret = crypto_enqueue_request(&dd->queue, new_areq);
910 if (dd->flags & AES_FLAGS_BUSY) {
911 spin_unlock_irqrestore(&dd->lock, flags);
912 return ret;
913 }
914 backlog = crypto_get_backlog(&dd->queue);
915 areq = crypto_dequeue_request(&dd->queue);
916 if (areq)
917 dd->flags |= AES_FLAGS_BUSY;
918 spin_unlock_irqrestore(&dd->lock, flags);
919
920 if (!areq)
921 return ret;
922
923 if (backlog)
924 crypto_request_complete(backlog, -EINPROGRESS);
925
926 ctx = crypto_tfm_ctx(areq->tfm);
927
928 dd->areq = areq;
929 dd->ctx = ctx;
930 start_async = (areq != new_areq);
931 dd->is_async = start_async;
932
933 /* WARNING: ctx->start() MAY change dd->is_async. */
934 err = ctx->start(dd);
935 return (start_async) ? ret : err;
936}
937
938
939/* AES async block ciphers */
940
941static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
942{
943 return atmel_aes_complete(dd, 0);
944}
945
946static int atmel_aes_start(struct atmel_aes_dev *dd)
947{
948 struct skcipher_request *req = skcipher_request_cast(dd->areq);
949 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
950 bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
951 dd->ctx->block_size != AES_BLOCK_SIZE);
952 int err;
953
954 atmel_aes_set_mode(dd, rctx);
955
956 err = atmel_aes_hw_init(dd);
957 if (err)
958 return atmel_aes_complete(dd, err);
959
960 atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
961 if (use_dma)
962 return atmel_aes_dma_start(dd, req->src, req->dst,
963 req->cryptlen,
964 atmel_aes_transfer_complete);
965
966 return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
967 atmel_aes_transfer_complete);
968}
969
970static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
971{
972 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
973 struct skcipher_request *req = skcipher_request_cast(dd->areq);
974 struct scatterlist *src, *dst;
975 size_t datalen;
976 u32 ctr;
977 u16 start, end;
978 bool use_dma, fragmented = false;
979
980 /* Check for transfer completion. */
981 ctx->offset += dd->total;
982 if (ctx->offset >= req->cryptlen)
983 return atmel_aes_transfer_complete(dd);
984
985 /* Compute data length. */
986 datalen = req->cryptlen - ctx->offset;
987 ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
988 ctr = be32_to_cpu(ctx->iv[3]);
989
990 /* Check 16bit counter overflow. */
991 start = ctr & 0xffff;
992 end = start + ctx->blocks - 1;
993
994 if (ctx->blocks >> 16 || end < start) {
995 ctr |= 0xffff;
996 datalen = AES_BLOCK_SIZE * (0x10000 - start);
997 fragmented = true;
998 }
999
1000 use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
1001
1002 /* Jump to offset. */
1003 src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
1004 dst = ((req->src == req->dst) ? src :
1005 scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
1006
1007 /* Configure hardware. */
1008 atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
1009 if (unlikely(fragmented)) {
1010 /*
1011 * Increment the counter manually to cope with the hardware
1012 * counter overflow.
1013 */
1014 ctx->iv[3] = cpu_to_be32(ctr);
1015 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
1016 }
1017
1018 if (use_dma)
1019 return atmel_aes_dma_start(dd, src, dst, datalen,
1020 atmel_aes_ctr_transfer);
1021
1022 return atmel_aes_cpu_start(dd, src, dst, datalen,
1023 atmel_aes_ctr_transfer);
1024}
1025
1026static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
1027{
1028 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1029 struct skcipher_request *req = skcipher_request_cast(dd->areq);
1030 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1031 int err;
1032
1033 atmel_aes_set_mode(dd, rctx);
1034
1035 err = atmel_aes_hw_init(dd);
1036 if (err)
1037 return atmel_aes_complete(dd, err);
1038
1039 memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
1040 ctx->offset = 0;
1041 dd->total = 0;
1042 return atmel_aes_ctr_transfer(dd);
1043}
1044
1045static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
1046{
1047 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1048 struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
1049 crypto_skcipher_reqtfm(req));
1050
1051 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
1052 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
1053 req->base.complete, req->base.data);
1054 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
1055 req->cryptlen, req->iv);
1056
1057 return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
1058 crypto_skcipher_decrypt(&rctx->fallback_req);
1059}
1060
1061static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
1062{
1063 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1064 struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
1065 struct atmel_aes_reqctx *rctx;
1066 u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
1067
1068 if (opmode == AES_FLAGS_XTS) {
1069 if (req->cryptlen < XTS_BLOCK_SIZE)
1070 return -EINVAL;
1071
1072 if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
1073 return atmel_aes_xts_fallback(req,
1074 mode & AES_FLAGS_ENCRYPT);
1075 }
1076
1077 /*
1078 * ECB, CBC or CTR mode require the plaintext and ciphertext
1079 * to have a positve integer length.
1080 */
1081 if (!req->cryptlen && opmode != AES_FLAGS_XTS)
1082 return 0;
1083
1084 if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
1085 !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
1086 return -EINVAL;
1087
1088 ctx->block_size = AES_BLOCK_SIZE;
1089 ctx->is_aead = false;
1090
1091 rctx = skcipher_request_ctx(req);
1092 rctx->mode = mode;
1093
1094 if (opmode != AES_FLAGS_ECB &&
1095 !(mode & AES_FLAGS_ENCRYPT)) {
1096 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1097
1098 if (req->cryptlen >= ivsize)
1099 scatterwalk_map_and_copy(rctx->lastc, req->src,
1100 req->cryptlen - ivsize,
1101 ivsize, 0);
1102 }
1103
1104 return atmel_aes_handle_queue(ctx->dd, &req->base);
1105}
1106
1107static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
1108 unsigned int keylen)
1109{
1110 struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
1111
1112 if (keylen != AES_KEYSIZE_128 &&
1113 keylen != AES_KEYSIZE_192 &&
1114 keylen != AES_KEYSIZE_256)
1115 return -EINVAL;
1116
1117 memcpy(ctx->key, key, keylen);
1118 ctx->keylen = keylen;
1119
1120 return 0;
1121}
1122
1123static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
1124{
1125 return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1126}
1127
1128static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
1129{
1130 return atmel_aes_crypt(req, AES_FLAGS_ECB);
1131}
1132
1133static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
1134{
1135 return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1136}
1137
1138static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
1139{
1140 return atmel_aes_crypt(req, AES_FLAGS_CBC);
1141}
1142
1143static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
1144{
1145 return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1146}
1147
1148static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
1149{
1150 return atmel_aes_crypt(req, AES_FLAGS_CTR);
1151}
1152
1153static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
1154{
1155 struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1156 struct atmel_aes_dev *dd;
1157
1158 dd = atmel_aes_dev_alloc(&ctx->base);
1159 if (!dd)
1160 return -ENODEV;
1161
1162 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1163 ctx->base.dd = dd;
1164 ctx->base.start = atmel_aes_start;
1165
1166 return 0;
1167}
1168
1169static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
1170{
1171 struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
1172 struct atmel_aes_dev *dd;
1173
1174 dd = atmel_aes_dev_alloc(&ctx->base);
1175 if (!dd)
1176 return -ENODEV;
1177
1178 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1179 ctx->base.dd = dd;
1180 ctx->base.start = atmel_aes_ctr_start;
1181
1182 return 0;
1183}
1184
1185static struct skcipher_alg aes_algs[] = {
1186{
1187 .base.cra_name = "ecb(aes)",
1188 .base.cra_driver_name = "atmel-ecb-aes",
1189 .base.cra_blocksize = AES_BLOCK_SIZE,
1190 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
1191
1192 .init = atmel_aes_init_tfm,
1193 .min_keysize = AES_MIN_KEY_SIZE,
1194 .max_keysize = AES_MAX_KEY_SIZE,
1195 .setkey = atmel_aes_setkey,
1196 .encrypt = atmel_aes_ecb_encrypt,
1197 .decrypt = atmel_aes_ecb_decrypt,
1198},
1199{
1200 .base.cra_name = "cbc(aes)",
1201 .base.cra_driver_name = "atmel-cbc-aes",
1202 .base.cra_blocksize = AES_BLOCK_SIZE,
1203 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
1204
1205 .init = atmel_aes_init_tfm,
1206 .min_keysize = AES_MIN_KEY_SIZE,
1207 .max_keysize = AES_MAX_KEY_SIZE,
1208 .setkey = atmel_aes_setkey,
1209 .encrypt = atmel_aes_cbc_encrypt,
1210 .decrypt = atmel_aes_cbc_decrypt,
1211 .ivsize = AES_BLOCK_SIZE,
1212},
1213{
1214 .base.cra_name = "ctr(aes)",
1215 .base.cra_driver_name = "atmel-ctr-aes",
1216 .base.cra_blocksize = 1,
1217 .base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
1218
1219 .init = atmel_aes_ctr_init_tfm,
1220 .min_keysize = AES_MIN_KEY_SIZE,
1221 .max_keysize = AES_MAX_KEY_SIZE,
1222 .setkey = atmel_aes_setkey,
1223 .encrypt = atmel_aes_ctr_encrypt,
1224 .decrypt = atmel_aes_ctr_decrypt,
1225 .ivsize = AES_BLOCK_SIZE,
1226},
1227};
1228
1229
1230/* gcm aead functions */
1231
1232static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1233 const u32 *data, size_t datalen,
1234 const __be32 *ghash_in, __be32 *ghash_out,
1235 atmel_aes_fn_t resume);
1236static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1237static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1238
1239static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1240static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1241static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1242static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1243static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1244static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1245static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1246
1247static inline struct atmel_aes_gcm_ctx *
1248atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1249{
1250 return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1251}
1252
1253static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1254 const u32 *data, size_t datalen,
1255 const __be32 *ghash_in, __be32 *ghash_out,
1256 atmel_aes_fn_t resume)
1257{
1258 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1259
1260 dd->data = (u32 *)data;
1261 dd->datalen = datalen;
1262 ctx->ghash_in = ghash_in;
1263 ctx->ghash_out = ghash_out;
1264 ctx->ghash_resume = resume;
1265
1266 atmel_aes_write_ctrl(dd, false, NULL);
1267 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1268}
1269
1270static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1271{
1272 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1273
1274 /* Set the data length. */
1275 atmel_aes_write(dd, AES_AADLENR, dd->total);
1276 atmel_aes_write(dd, AES_CLENR, 0);
1277
1278 /* If needed, overwrite the GCM Intermediate Hash Word Registers */
1279 if (ctx->ghash_in)
1280 atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1281
1282 return atmel_aes_gcm_ghash_finalize(dd);
1283}
1284
1285static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1286{
1287 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1288 u32 isr;
1289
1290 /* Write data into the Input Data Registers. */
1291 while (dd->datalen > 0) {
1292 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1293 dd->data += 4;
1294 dd->datalen -= AES_BLOCK_SIZE;
1295
1296 isr = atmel_aes_read(dd, AES_ISR);
1297 if (!(isr & AES_INT_DATARDY)) {
1298 dd->resume = atmel_aes_gcm_ghash_finalize;
1299 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1300 return -EINPROGRESS;
1301 }
1302 }
1303
1304 /* Read the computed hash from GHASHRx. */
1305 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1306
1307 return ctx->ghash_resume(dd);
1308}
1309
1310
1311static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1312{
1313 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1314 struct aead_request *req = aead_request_cast(dd->areq);
1315 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1316 struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1317 size_t ivsize = crypto_aead_ivsize(tfm);
1318 size_t datalen, padlen;
1319 const void *iv = req->iv;
1320 u8 *data = dd->buf;
1321 int err;
1322
1323 atmel_aes_set_mode(dd, rctx);
1324
1325 err = atmel_aes_hw_init(dd);
1326 if (err)
1327 return atmel_aes_complete(dd, err);
1328
1329 if (likely(ivsize == GCM_AES_IV_SIZE)) {
1330 memcpy(ctx->j0, iv, ivsize);
1331 ctx->j0[3] = cpu_to_be32(1);
1332 return atmel_aes_gcm_process(dd);
1333 }
1334
1335 padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1336 datalen = ivsize + padlen + AES_BLOCK_SIZE;
1337 if (datalen > dd->buflen)
1338 return atmel_aes_complete(dd, -EINVAL);
1339
1340 memcpy(data, iv, ivsize);
1341 memset(data + ivsize, 0, padlen + sizeof(u64));
1342 ((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1343
1344 return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1345 NULL, ctx->j0, atmel_aes_gcm_process);
1346}
1347
1348static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1349{
1350 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1351 struct aead_request *req = aead_request_cast(dd->areq);
1352 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1353 bool enc = atmel_aes_is_encrypt(dd);
1354 u32 authsize;
1355
1356 /* Compute text length. */
1357 authsize = crypto_aead_authsize(tfm);
1358 ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1359
1360 /*
1361 * According to tcrypt test suite, the GCM Automatic Tag Generation
1362 * fails when both the message and its associated data are empty.
1363 */
1364 if (likely(req->assoclen != 0 || ctx->textlen != 0))
1365 dd->flags |= AES_FLAGS_GTAGEN;
1366
1367 atmel_aes_write_ctrl(dd, false, NULL);
1368 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1369}
1370
1371static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1372{
1373 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1374 struct aead_request *req = aead_request_cast(dd->areq);
1375 __be32 j0_lsw, *j0 = ctx->j0;
1376 size_t padlen;
1377
1378 /* Write incr32(J0) into IV. */
1379 j0_lsw = j0[3];
1380 be32_add_cpu(&j0[3], 1);
1381 atmel_aes_write_block(dd, AES_IVR(0), j0);
1382 j0[3] = j0_lsw;
1383
1384 /* Set aad and text lengths. */
1385 atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1386 atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1387
1388 /* Check whether AAD are present. */
1389 if (unlikely(req->assoclen == 0)) {
1390 dd->datalen = 0;
1391 return atmel_aes_gcm_data(dd);
1392 }
1393
1394 /* Copy assoc data and add padding. */
1395 padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1396 if (unlikely(req->assoclen + padlen > dd->buflen))
1397 return atmel_aes_complete(dd, -EINVAL);
1398 sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1399
1400 /* Write assoc data into the Input Data register. */
1401 dd->data = (u32 *)dd->buf;
1402 dd->datalen = req->assoclen + padlen;
1403 return atmel_aes_gcm_data(dd);
1404}
1405
1406static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1407{
1408 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1409 struct aead_request *req = aead_request_cast(dd->areq);
1410 bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1411 struct scatterlist *src, *dst;
1412 u32 isr, mr;
1413
1414 /* Write AAD first. */
1415 while (dd->datalen > 0) {
1416 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1417 dd->data += 4;
1418 dd->datalen -= AES_BLOCK_SIZE;
1419
1420 isr = atmel_aes_read(dd, AES_ISR);
1421 if (!(isr & AES_INT_DATARDY)) {
1422 dd->resume = atmel_aes_gcm_data;
1423 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1424 return -EINPROGRESS;
1425 }
1426 }
1427
1428 /* GMAC only. */
1429 if (unlikely(ctx->textlen == 0))
1430 return atmel_aes_gcm_tag_init(dd);
1431
1432 /* Prepare src and dst scatter lists to transfer cipher/plain texts */
1433 src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1434 dst = ((req->src == req->dst) ? src :
1435 scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1436
1437 if (use_dma) {
1438 /* Update the Mode Register for DMA transfers. */
1439 mr = atmel_aes_read(dd, AES_MR);
1440 mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1441 mr |= AES_MR_SMOD_IDATAR0;
1442 if (dd->caps.has_dualbuff)
1443 mr |= AES_MR_DUALBUFF;
1444 atmel_aes_write(dd, AES_MR, mr);
1445
1446 return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1447 atmel_aes_gcm_tag_init);
1448 }
1449
1450 return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1451 atmel_aes_gcm_tag_init);
1452}
1453
1454static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1455{
1456 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1457 struct aead_request *req = aead_request_cast(dd->areq);
1458 __be64 *data = dd->buf;
1459
1460 if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1461 if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1462 dd->resume = atmel_aes_gcm_tag_init;
1463 atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1464 return -EINPROGRESS;
1465 }
1466
1467 return atmel_aes_gcm_finalize(dd);
1468 }
1469
1470 /* Read the GCM Intermediate Hash Word Registers. */
1471 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1472
1473 data[0] = cpu_to_be64(req->assoclen * 8);
1474 data[1] = cpu_to_be64(ctx->textlen * 8);
1475
1476 return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1477 ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1478}
1479
1480static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1481{
1482 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1483 unsigned long flags;
1484
1485 /*
1486 * Change mode to CTR to complete the tag generation.
1487 * Use J0 as Initialization Vector.
1488 */
1489 flags = dd->flags;
1490 dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1491 dd->flags |= AES_FLAGS_CTR;
1492 atmel_aes_write_ctrl(dd, false, ctx->j0);
1493 dd->flags = flags;
1494
1495 atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1496 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1497}
1498
1499static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1500{
1501 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1502 struct aead_request *req = aead_request_cast(dd->areq);
1503 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1504 bool enc = atmel_aes_is_encrypt(dd);
1505 u32 offset, authsize, itag[4], *otag = ctx->tag;
1506 int err;
1507
1508 /* Read the computed tag. */
1509 if (likely(dd->flags & AES_FLAGS_GTAGEN))
1510 atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1511 else
1512 atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1513
1514 offset = req->assoclen + ctx->textlen;
1515 authsize = crypto_aead_authsize(tfm);
1516 if (enc) {
1517 scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1518 err = 0;
1519 } else {
1520 scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1521 err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1522 }
1523
1524 return atmel_aes_complete(dd, err);
1525}
1526
1527static int atmel_aes_gcm_crypt(struct aead_request *req,
1528 unsigned long mode)
1529{
1530 struct atmel_aes_base_ctx *ctx;
1531 struct atmel_aes_reqctx *rctx;
1532
1533 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1534 ctx->block_size = AES_BLOCK_SIZE;
1535 ctx->is_aead = true;
1536
1537 rctx = aead_request_ctx(req);
1538 rctx->mode = AES_FLAGS_GCM | mode;
1539
1540 return atmel_aes_handle_queue(ctx->dd, &req->base);
1541}
1542
1543static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1544 unsigned int keylen)
1545{
1546 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1547
1548 if (keylen != AES_KEYSIZE_256 &&
1549 keylen != AES_KEYSIZE_192 &&
1550 keylen != AES_KEYSIZE_128)
1551 return -EINVAL;
1552
1553 memcpy(ctx->key, key, keylen);
1554 ctx->keylen = keylen;
1555
1556 return 0;
1557}
1558
1559static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1560 unsigned int authsize)
1561{
1562 return crypto_gcm_check_authsize(authsize);
1563}
1564
1565static int atmel_aes_gcm_encrypt(struct aead_request *req)
1566{
1567 return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1568}
1569
1570static int atmel_aes_gcm_decrypt(struct aead_request *req)
1571{
1572 return atmel_aes_gcm_crypt(req, 0);
1573}
1574
1575static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1576{
1577 struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1578 struct atmel_aes_dev *dd;
1579
1580 dd = atmel_aes_dev_alloc(&ctx->base);
1581 if (!dd)
1582 return -ENODEV;
1583
1584 crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1585 ctx->base.dd = dd;
1586 ctx->base.start = atmel_aes_gcm_start;
1587
1588 return 0;
1589}
1590
1591static struct aead_alg aes_gcm_alg = {
1592 .setkey = atmel_aes_gcm_setkey,
1593 .setauthsize = atmel_aes_gcm_setauthsize,
1594 .encrypt = atmel_aes_gcm_encrypt,
1595 .decrypt = atmel_aes_gcm_decrypt,
1596 .init = atmel_aes_gcm_init,
1597 .ivsize = GCM_AES_IV_SIZE,
1598 .maxauthsize = AES_BLOCK_SIZE,
1599
1600 .base = {
1601 .cra_name = "gcm(aes)",
1602 .cra_driver_name = "atmel-gcm-aes",
1603 .cra_blocksize = 1,
1604 .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
1605 },
1606};
1607
1608
1609/* xts functions */
1610
1611static inline struct atmel_aes_xts_ctx *
1612atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
1613{
1614 return container_of(ctx, struct atmel_aes_xts_ctx, base);
1615}
1616
1617static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
1618
1619static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
1620{
1621 struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
1622 struct skcipher_request *req = skcipher_request_cast(dd->areq);
1623 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
1624 unsigned long flags;
1625 int err;
1626
1627 atmel_aes_set_mode(dd, rctx);
1628
1629 err = atmel_aes_hw_init(dd);
1630 if (err)
1631 return atmel_aes_complete(dd, err);
1632
1633 /* Compute the tweak value from req->iv with ecb(aes). */
1634 flags = dd->flags;
1635 dd->flags &= ~AES_FLAGS_MODE_MASK;
1636 dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1637 atmel_aes_write_ctrl_key(dd, false, NULL,
1638 ctx->key2, ctx->base.keylen);
1639 dd->flags = flags;
1640
1641 atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
1642 return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
1643}
1644
1645static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
1646{
1647 struct skcipher_request *req = skcipher_request_cast(dd->areq);
1648 bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
1649 u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
1650 static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
1651 u8 *tweak_bytes = (u8 *)tweak;
1652 int i;
1653
1654 /* Read the computed ciphered tweak value. */
1655 atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
1656 /*
1657 * Hardware quirk:
1658 * the order of the ciphered tweak bytes need to be reversed before
1659 * writing them into the ODATARx registers.
1660 */
1661 for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
1662 swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
1663
1664 /* Process the data. */
1665 atmel_aes_write_ctrl(dd, use_dma, NULL);
1666 atmel_aes_write_block(dd, AES_TWR(0), tweak);
1667 atmel_aes_write_block(dd, AES_ALPHAR(0), one);
1668 if (use_dma)
1669 return atmel_aes_dma_start(dd, req->src, req->dst,
1670 req->cryptlen,
1671 atmel_aes_transfer_complete);
1672
1673 return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
1674 atmel_aes_transfer_complete);
1675}
1676
1677static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1678 unsigned int keylen)
1679{
1680 struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1681 int err;
1682
1683 err = xts_verify_key(tfm, key, keylen);
1684 if (err)
1685 return err;
1686
1687 crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
1688 crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
1689 CRYPTO_TFM_REQ_MASK);
1690 err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
1691 if (err)
1692 return err;
1693
1694 memcpy(ctx->base.key, key, keylen/2);
1695 memcpy(ctx->key2, key + keylen/2, keylen/2);
1696 ctx->base.keylen = keylen/2;
1697
1698 return 0;
1699}
1700
1701static int atmel_aes_xts_encrypt(struct skcipher_request *req)
1702{
1703 return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
1704}
1705
1706static int atmel_aes_xts_decrypt(struct skcipher_request *req)
1707{
1708 return atmel_aes_crypt(req, AES_FLAGS_XTS);
1709}
1710
1711static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
1712{
1713 struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1714 struct atmel_aes_dev *dd;
1715 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1716
1717 dd = atmel_aes_dev_alloc(&ctx->base);
1718 if (!dd)
1719 return -ENODEV;
1720
1721 ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
1722 CRYPTO_ALG_NEED_FALLBACK);
1723 if (IS_ERR(ctx->fallback_tfm))
1724 return PTR_ERR(ctx->fallback_tfm);
1725
1726 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
1727 crypto_skcipher_reqsize(ctx->fallback_tfm));
1728 ctx->base.dd = dd;
1729 ctx->base.start = atmel_aes_xts_start;
1730
1731 return 0;
1732}
1733
1734static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
1735{
1736 struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
1737
1738 crypto_free_skcipher(ctx->fallback_tfm);
1739}
1740
1741static struct skcipher_alg aes_xts_alg = {
1742 .base.cra_name = "xts(aes)",
1743 .base.cra_driver_name = "atmel-xts-aes",
1744 .base.cra_blocksize = AES_BLOCK_SIZE,
1745 .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
1746 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1747
1748 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1749 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1750 .ivsize = AES_BLOCK_SIZE,
1751 .setkey = atmel_aes_xts_setkey,
1752 .encrypt = atmel_aes_xts_encrypt,
1753 .decrypt = atmel_aes_xts_decrypt,
1754 .init = atmel_aes_xts_init_tfm,
1755 .exit = atmel_aes_xts_exit_tfm,
1756};
1757
1758#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
1759/* authenc aead functions */
1760
1761static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
1762static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1763 bool is_async);
1764static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1765 bool is_async);
1766static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
1767static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1768 bool is_async);
1769
1770static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
1771{
1772 struct aead_request *req = aead_request_cast(dd->areq);
1773 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1774
1775 if (err && (dd->flags & AES_FLAGS_OWN_SHA))
1776 atmel_sha_authenc_abort(&rctx->auth_req);
1777 dd->flags &= ~AES_FLAGS_OWN_SHA;
1778}
1779
1780static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
1781{
1782 struct aead_request *req = aead_request_cast(dd->areq);
1783 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1784 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1785 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1786 int err;
1787
1788 atmel_aes_set_mode(dd, &rctx->base);
1789
1790 err = atmel_aes_hw_init(dd);
1791 if (err)
1792 return atmel_aes_complete(dd, err);
1793
1794 return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
1795 atmel_aes_authenc_init, dd);
1796}
1797
1798static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1799 bool is_async)
1800{
1801 struct aead_request *req = aead_request_cast(dd->areq);
1802 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1803
1804 if (is_async)
1805 dd->is_async = true;
1806 if (err)
1807 return atmel_aes_complete(dd, err);
1808
1809 /* If here, we've got the ownership of the SHA device. */
1810 dd->flags |= AES_FLAGS_OWN_SHA;
1811
1812 /* Configure the SHA device. */
1813 return atmel_sha_authenc_init(&rctx->auth_req,
1814 req->src, req->assoclen,
1815 rctx->textlen,
1816 atmel_aes_authenc_transfer, dd);
1817}
1818
1819static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1820 bool is_async)
1821{
1822 struct aead_request *req = aead_request_cast(dd->areq);
1823 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1824 bool enc = atmel_aes_is_encrypt(dd);
1825 struct scatterlist *src, *dst;
1826 __be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
1827 u32 emr;
1828
1829 if (is_async)
1830 dd->is_async = true;
1831 if (err)
1832 return atmel_aes_complete(dd, err);
1833
1834 /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
1835 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
1836 dst = src;
1837
1838 if (req->src != req->dst)
1839 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
1840
1841 /* Configure the AES device. */
1842 memcpy(iv, req->iv, sizeof(iv));
1843
1844 /*
1845 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
1846 * 'true' even if the data transfer is actually performed by the CPU (so
1847 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
1848 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
1849 * must be set to *_MR_SMOD_IDATAR0.
1850 */
1851 atmel_aes_write_ctrl(dd, true, iv);
1852 emr = AES_EMR_PLIPEN;
1853 if (!enc)
1854 emr |= AES_EMR_PLIPD;
1855 atmel_aes_write(dd, AES_EMR, emr);
1856
1857 /* Transfer data. */
1858 return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
1859 atmel_aes_authenc_digest);
1860}
1861
1862static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
1863{
1864 struct aead_request *req = aead_request_cast(dd->areq);
1865 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1866
1867 /* atmel_sha_authenc_final() releases the SHA device. */
1868 dd->flags &= ~AES_FLAGS_OWN_SHA;
1869 return atmel_sha_authenc_final(&rctx->auth_req,
1870 rctx->digest, sizeof(rctx->digest),
1871 atmel_aes_authenc_final, dd);
1872}
1873
1874static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1875 bool is_async)
1876{
1877 struct aead_request *req = aead_request_cast(dd->areq);
1878 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1879 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1880 bool enc = atmel_aes_is_encrypt(dd);
1881 u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
1882 u32 offs, authsize;
1883
1884 if (is_async)
1885 dd->is_async = true;
1886 if (err)
1887 goto complete;
1888
1889 offs = req->assoclen + rctx->textlen;
1890 authsize = crypto_aead_authsize(tfm);
1891 if (enc) {
1892 scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
1893 } else {
1894 scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
1895 if (crypto_memneq(idigest, odigest, authsize))
1896 err = -EBADMSG;
1897 }
1898
1899complete:
1900 return atmel_aes_complete(dd, err);
1901}
1902
1903static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
1904 unsigned int keylen)
1905{
1906 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1907 struct crypto_authenc_keys keys;
1908 int err;
1909
1910 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1911 goto badkey;
1912
1913 if (keys.enckeylen > sizeof(ctx->base.key))
1914 goto badkey;
1915
1916 /* Save auth key. */
1917 err = atmel_sha_authenc_setkey(ctx->auth,
1918 keys.authkey, keys.authkeylen,
1919 crypto_aead_get_flags(tfm));
1920 if (err) {
1921 memzero_explicit(&keys, sizeof(keys));
1922 return err;
1923 }
1924
1925 /* Save enc key. */
1926 ctx->base.keylen = keys.enckeylen;
1927 memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
1928
1929 memzero_explicit(&keys, sizeof(keys));
1930 return 0;
1931
1932badkey:
1933 memzero_explicit(&keys, sizeof(keys));
1934 return -EINVAL;
1935}
1936
1937static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
1938 unsigned long auth_mode)
1939{
1940 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1941 unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
1942 struct atmel_aes_dev *dd;
1943
1944 dd = atmel_aes_dev_alloc(&ctx->base);
1945 if (!dd)
1946 return -ENODEV;
1947
1948 ctx->auth = atmel_sha_authenc_spawn(auth_mode);
1949 if (IS_ERR(ctx->auth))
1950 return PTR_ERR(ctx->auth);
1951
1952 crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
1953 auth_reqsize));
1954 ctx->base.dd = dd;
1955 ctx->base.start = atmel_aes_authenc_start;
1956
1957 return 0;
1958}
1959
1960static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
1961{
1962 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
1963}
1964
1965static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
1966{
1967 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
1968}
1969
1970static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
1971{
1972 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
1973}
1974
1975static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
1976{
1977 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
1978}
1979
1980static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
1981{
1982 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
1983}
1984
1985static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
1986{
1987 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
1988
1989 atmel_sha_authenc_free(ctx->auth);
1990}
1991
1992static int atmel_aes_authenc_crypt(struct aead_request *req,
1993 unsigned long mode)
1994{
1995 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1996 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1997 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1998 u32 authsize = crypto_aead_authsize(tfm);
1999 bool enc = (mode & AES_FLAGS_ENCRYPT);
2000
2001 /* Compute text length. */
2002 if (!enc && req->cryptlen < authsize)
2003 return -EINVAL;
2004 rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
2005
2006 /*
2007 * Currently, empty messages are not supported yet:
2008 * the SHA auto-padding can be used only on non-empty messages.
2009 * Hence a special case needs to be implemented for empty message.
2010 */
2011 if (!rctx->textlen && !req->assoclen)
2012 return -EINVAL;
2013
2014 rctx->base.mode = mode;
2015 ctx->block_size = AES_BLOCK_SIZE;
2016 ctx->is_aead = true;
2017
2018 return atmel_aes_handle_queue(ctx->dd, &req->base);
2019}
2020
2021static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
2022{
2023 return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
2024}
2025
2026static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
2027{
2028 return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
2029}
2030
2031static struct aead_alg aes_authenc_algs[] = {
2032{
2033 .setkey = atmel_aes_authenc_setkey,
2034 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2035 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2036 .init = atmel_aes_authenc_hmac_sha1_init_tfm,
2037 .exit = atmel_aes_authenc_exit_tfm,
2038 .ivsize = AES_BLOCK_SIZE,
2039 .maxauthsize = SHA1_DIGEST_SIZE,
2040
2041 .base = {
2042 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2043 .cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
2044 .cra_blocksize = AES_BLOCK_SIZE,
2045 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2046 },
2047},
2048{
2049 .setkey = atmel_aes_authenc_setkey,
2050 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2051 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2052 .init = atmel_aes_authenc_hmac_sha224_init_tfm,
2053 .exit = atmel_aes_authenc_exit_tfm,
2054 .ivsize = AES_BLOCK_SIZE,
2055 .maxauthsize = SHA224_DIGEST_SIZE,
2056
2057 .base = {
2058 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2059 .cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
2060 .cra_blocksize = AES_BLOCK_SIZE,
2061 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2062 },
2063},
2064{
2065 .setkey = atmel_aes_authenc_setkey,
2066 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2067 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2068 .init = atmel_aes_authenc_hmac_sha256_init_tfm,
2069 .exit = atmel_aes_authenc_exit_tfm,
2070 .ivsize = AES_BLOCK_SIZE,
2071 .maxauthsize = SHA256_DIGEST_SIZE,
2072
2073 .base = {
2074 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2075 .cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
2076 .cra_blocksize = AES_BLOCK_SIZE,
2077 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2078 },
2079},
2080{
2081 .setkey = atmel_aes_authenc_setkey,
2082 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2083 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2084 .init = atmel_aes_authenc_hmac_sha384_init_tfm,
2085 .exit = atmel_aes_authenc_exit_tfm,
2086 .ivsize = AES_BLOCK_SIZE,
2087 .maxauthsize = SHA384_DIGEST_SIZE,
2088
2089 .base = {
2090 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2091 .cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
2092 .cra_blocksize = AES_BLOCK_SIZE,
2093 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2094 },
2095},
2096{
2097 .setkey = atmel_aes_authenc_setkey,
2098 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2099 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2100 .init = atmel_aes_authenc_hmac_sha512_init_tfm,
2101 .exit = atmel_aes_authenc_exit_tfm,
2102 .ivsize = AES_BLOCK_SIZE,
2103 .maxauthsize = SHA512_DIGEST_SIZE,
2104
2105 .base = {
2106 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2107 .cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
2108 .cra_blocksize = AES_BLOCK_SIZE,
2109 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2110 },
2111},
2112};
2113#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2114
2115/* Probe functions */
2116
2117static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
2118{
2119 dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
2120 dd->buflen = ATMEL_AES_BUFFER_SIZE;
2121 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
2122
2123 if (!dd->buf) {
2124 dev_err(dd->dev, "unable to alloc pages.\n");
2125 return -ENOMEM;
2126 }
2127
2128 return 0;
2129}
2130
2131static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
2132{
2133 free_page((unsigned long)dd->buf);
2134}
2135
2136static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
2137{
2138 int ret;
2139
2140 /* Try to grab 2 DMA channels */
2141 dd->src.chan = dma_request_chan(dd->dev, "tx");
2142 if (IS_ERR(dd->src.chan)) {
2143 ret = PTR_ERR(dd->src.chan);
2144 goto err_dma_in;
2145 }
2146
2147 dd->dst.chan = dma_request_chan(dd->dev, "rx");
2148 if (IS_ERR(dd->dst.chan)) {
2149 ret = PTR_ERR(dd->dst.chan);
2150 goto err_dma_out;
2151 }
2152
2153 return 0;
2154
2155err_dma_out:
2156 dma_release_channel(dd->src.chan);
2157err_dma_in:
2158 dev_err(dd->dev, "no DMA channel available\n");
2159 return ret;
2160}
2161
2162static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
2163{
2164 dma_release_channel(dd->dst.chan);
2165 dma_release_channel(dd->src.chan);
2166}
2167
2168static void atmel_aes_queue_task(unsigned long data)
2169{
2170 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2171
2172 atmel_aes_handle_queue(dd, NULL);
2173}
2174
2175static void atmel_aes_done_task(unsigned long data)
2176{
2177 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2178
2179 dd->is_async = true;
2180 (void)dd->resume(dd);
2181}
2182
2183static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
2184{
2185 struct atmel_aes_dev *aes_dd = dev_id;
2186 u32 reg;
2187
2188 reg = atmel_aes_read(aes_dd, AES_ISR);
2189 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
2190 atmel_aes_write(aes_dd, AES_IDR, reg);
2191 if (AES_FLAGS_BUSY & aes_dd->flags)
2192 tasklet_schedule(&aes_dd->done_task);
2193 else
2194 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
2195 return IRQ_HANDLED;
2196 }
2197
2198 return IRQ_NONE;
2199}
2200
2201static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
2202{
2203 int i;
2204
2205#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2206 if (dd->caps.has_authenc)
2207 for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
2208 crypto_unregister_aead(&aes_authenc_algs[i]);
2209#endif
2210
2211 if (dd->caps.has_xts)
2212 crypto_unregister_skcipher(&aes_xts_alg);
2213
2214 if (dd->caps.has_gcm)
2215 crypto_unregister_aead(&aes_gcm_alg);
2216
2217 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
2218 crypto_unregister_skcipher(&aes_algs[i]);
2219}
2220
2221static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
2222{
2223 alg->cra_flags |= CRYPTO_ALG_ASYNC;
2224 alg->cra_alignmask = 0xf;
2225 alg->cra_priority = ATMEL_AES_PRIORITY;
2226 alg->cra_module = THIS_MODULE;
2227}
2228
2229static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
2230{
2231 int err, i, j;
2232
2233 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
2234 atmel_aes_crypto_alg_init(&aes_algs[i].base);
2235
2236 err = crypto_register_skcipher(&aes_algs[i]);
2237 if (err)
2238 goto err_aes_algs;
2239 }
2240
2241 if (dd->caps.has_gcm) {
2242 atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
2243
2244 err = crypto_register_aead(&aes_gcm_alg);
2245 if (err)
2246 goto err_aes_gcm_alg;
2247 }
2248
2249 if (dd->caps.has_xts) {
2250 atmel_aes_crypto_alg_init(&aes_xts_alg.base);
2251
2252 err = crypto_register_skcipher(&aes_xts_alg);
2253 if (err)
2254 goto err_aes_xts_alg;
2255 }
2256
2257#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2258 if (dd->caps.has_authenc) {
2259 for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
2260 atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
2261
2262 err = crypto_register_aead(&aes_authenc_algs[i]);
2263 if (err)
2264 goto err_aes_authenc_alg;
2265 }
2266 }
2267#endif
2268
2269 return 0;
2270
2271#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2272 /* i = ARRAY_SIZE(aes_authenc_algs); */
2273err_aes_authenc_alg:
2274 for (j = 0; j < i; j++)
2275 crypto_unregister_aead(&aes_authenc_algs[j]);
2276 crypto_unregister_skcipher(&aes_xts_alg);
2277#endif
2278err_aes_xts_alg:
2279 crypto_unregister_aead(&aes_gcm_alg);
2280err_aes_gcm_alg:
2281 i = ARRAY_SIZE(aes_algs);
2282err_aes_algs:
2283 for (j = 0; j < i; j++)
2284 crypto_unregister_skcipher(&aes_algs[j]);
2285
2286 return err;
2287}
2288
2289static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
2290{
2291 dd->caps.has_dualbuff = 0;
2292 dd->caps.has_gcm = 0;
2293 dd->caps.has_xts = 0;
2294 dd->caps.has_authenc = 0;
2295 dd->caps.max_burst_size = 1;
2296
2297 /* keep only major version number */
2298 switch (dd->hw_version & 0xff0) {
2299 case 0x700:
2300 case 0x600:
2301 case 0x500:
2302 dd->caps.has_dualbuff = 1;
2303 dd->caps.has_gcm = 1;
2304 dd->caps.has_xts = 1;
2305 dd->caps.has_authenc = 1;
2306 dd->caps.max_burst_size = 4;
2307 break;
2308 case 0x200:
2309 dd->caps.has_dualbuff = 1;
2310 dd->caps.has_gcm = 1;
2311 dd->caps.max_burst_size = 4;
2312 break;
2313 case 0x130:
2314 dd->caps.has_dualbuff = 1;
2315 dd->caps.max_burst_size = 4;
2316 break;
2317 case 0x120:
2318 break;
2319 default:
2320 dev_warn(dd->dev,
2321 "Unmanaged aes version, set minimum capabilities\n");
2322 break;
2323 }
2324}
2325
2326static const struct of_device_id atmel_aes_dt_ids[] = {
2327 { .compatible = "atmel,at91sam9g46-aes" },
2328 { /* sentinel */ }
2329};
2330MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
2331
2332static int atmel_aes_probe(struct platform_device *pdev)
2333{
2334 struct atmel_aes_dev *aes_dd;
2335 struct device *dev = &pdev->dev;
2336 struct resource *aes_res;
2337 int err;
2338
2339 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2340 if (!aes_dd)
2341 return -ENOMEM;
2342
2343 aes_dd->dev = dev;
2344
2345 platform_set_drvdata(pdev, aes_dd);
2346
2347 INIT_LIST_HEAD(&aes_dd->list);
2348 spin_lock_init(&aes_dd->lock);
2349
2350 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2351 (unsigned long)aes_dd);
2352 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2353 (unsigned long)aes_dd);
2354
2355 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2356
2357 aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
2358 if (IS_ERR(aes_dd->io_base)) {
2359 err = PTR_ERR(aes_dd->io_base);
2360 goto err_tasklet_kill;
2361 }
2362 aes_dd->phys_base = aes_res->start;
2363
2364 /* Get the IRQ */
2365 aes_dd->irq = platform_get_irq(pdev, 0);
2366 if (aes_dd->irq < 0) {
2367 err = aes_dd->irq;
2368 goto err_tasklet_kill;
2369 }
2370
2371 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2372 IRQF_SHARED, "atmel-aes", aes_dd);
2373 if (err) {
2374 dev_err(dev, "unable to request aes irq.\n");
2375 goto err_tasklet_kill;
2376 }
2377
2378 /* Initializing the clock */
2379 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2380 if (IS_ERR(aes_dd->iclk)) {
2381 dev_err(dev, "clock initialization failed.\n");
2382 err = PTR_ERR(aes_dd->iclk);
2383 goto err_tasklet_kill;
2384 }
2385
2386 err = clk_prepare(aes_dd->iclk);
2387 if (err)
2388 goto err_tasklet_kill;
2389
2390 err = atmel_aes_hw_version_init(aes_dd);
2391 if (err)
2392 goto err_iclk_unprepare;
2393
2394 atmel_aes_get_cap(aes_dd);
2395
2396#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2397 if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
2398 err = -EPROBE_DEFER;
2399 goto err_iclk_unprepare;
2400 }
2401#endif
2402
2403 err = atmel_aes_buff_init(aes_dd);
2404 if (err)
2405 goto err_iclk_unprepare;
2406
2407 err = atmel_aes_dma_init(aes_dd);
2408 if (err)
2409 goto err_buff_cleanup;
2410
2411 spin_lock(&atmel_aes.lock);
2412 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2413 spin_unlock(&atmel_aes.lock);
2414
2415 err = atmel_aes_register_algs(aes_dd);
2416 if (err)
2417 goto err_algs;
2418
2419 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2420 dma_chan_name(aes_dd->src.chan),
2421 dma_chan_name(aes_dd->dst.chan));
2422
2423 return 0;
2424
2425err_algs:
2426 spin_lock(&atmel_aes.lock);
2427 list_del(&aes_dd->list);
2428 spin_unlock(&atmel_aes.lock);
2429 atmel_aes_dma_cleanup(aes_dd);
2430err_buff_cleanup:
2431 atmel_aes_buff_cleanup(aes_dd);
2432err_iclk_unprepare:
2433 clk_unprepare(aes_dd->iclk);
2434err_tasklet_kill:
2435 tasklet_kill(&aes_dd->done_task);
2436 tasklet_kill(&aes_dd->queue_task);
2437
2438 return err;
2439}
2440
2441static void atmel_aes_remove(struct platform_device *pdev)
2442{
2443 struct atmel_aes_dev *aes_dd;
2444
2445 aes_dd = platform_get_drvdata(pdev);
2446
2447 spin_lock(&atmel_aes.lock);
2448 list_del(&aes_dd->list);
2449 spin_unlock(&atmel_aes.lock);
2450
2451 atmel_aes_unregister_algs(aes_dd);
2452
2453 tasklet_kill(&aes_dd->done_task);
2454 tasklet_kill(&aes_dd->queue_task);
2455
2456 atmel_aes_dma_cleanup(aes_dd);
2457 atmel_aes_buff_cleanup(aes_dd);
2458
2459 clk_unprepare(aes_dd->iclk);
2460}
2461
2462static struct platform_driver atmel_aes_driver = {
2463 .probe = atmel_aes_probe,
2464 .remove_new = atmel_aes_remove,
2465 .driver = {
2466 .name = "atmel_aes",
2467 .of_match_table = atmel_aes_dt_ids,
2468 },
2469};
2470
2471module_platform_driver(atmel_aes_driver);
2472
2473MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2474MODULE_LICENSE("GPL v2");
2475MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1/*
2 * Cryptographic API.
3 *
4 * Support for ATMEL AES HW acceleration.
5 *
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
7 * Author: Nicolas Royer <nicolas@eukrea.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 *
13 * Some ideas are from omap-aes.c driver.
14 */
15
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/hw_random.h>
24#include <linux/platform_device.h>
25
26#include <linux/device.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/irq.h>
31#include <linux/scatterlist.h>
32#include <linux/dma-mapping.h>
33#include <linux/of_device.h>
34#include <linux/delay.h>
35#include <linux/crypto.h>
36#include <crypto/scatterwalk.h>
37#include <crypto/algapi.h>
38#include <crypto/aes.h>
39#include <crypto/gcm.h>
40#include <crypto/xts.h>
41#include <crypto/internal/aead.h>
42#include <linux/platform_data/crypto-atmel.h>
43#include <dt-bindings/dma/at91.h>
44#include "atmel-aes-regs.h"
45#include "atmel-authenc.h"
46
47#define ATMEL_AES_PRIORITY 300
48
49#define ATMEL_AES_BUFFER_ORDER 2
50#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
51
52#define CFB8_BLOCK_SIZE 1
53#define CFB16_BLOCK_SIZE 2
54#define CFB32_BLOCK_SIZE 4
55#define CFB64_BLOCK_SIZE 8
56
57#define SIZE_IN_WORDS(x) ((x) >> 2)
58
59/* AES flags */
60/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
61#define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
62#define AES_FLAGS_GTAGEN AES_MR_GTAGEN
63#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
64#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
65#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
66#define AES_FLAGS_OFB AES_MR_OPMOD_OFB
67#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
68#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
69#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
70#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
71#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
72#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
73#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
74#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
75
76#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
77 AES_FLAGS_ENCRYPT | \
78 AES_FLAGS_GTAGEN)
79
80#define AES_FLAGS_BUSY BIT(3)
81#define AES_FLAGS_DUMP_REG BIT(4)
82#define AES_FLAGS_OWN_SHA BIT(5)
83
84#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
85
86#define ATMEL_AES_QUEUE_LENGTH 50
87
88#define ATMEL_AES_DMA_THRESHOLD 256
89
90
91struct atmel_aes_caps {
92 bool has_dualbuff;
93 bool has_cfb64;
94 bool has_ctr32;
95 bool has_gcm;
96 bool has_xts;
97 bool has_authenc;
98 u32 max_burst_size;
99};
100
101struct atmel_aes_dev;
102
103
104typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
105
106
107struct atmel_aes_base_ctx {
108 struct atmel_aes_dev *dd;
109 atmel_aes_fn_t start;
110 int keylen;
111 u32 key[AES_KEYSIZE_256 / sizeof(u32)];
112 u16 block_size;
113 bool is_aead;
114};
115
116struct atmel_aes_ctx {
117 struct atmel_aes_base_ctx base;
118};
119
120struct atmel_aes_ctr_ctx {
121 struct atmel_aes_base_ctx base;
122
123 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
124 size_t offset;
125 struct scatterlist src[2];
126 struct scatterlist dst[2];
127};
128
129struct atmel_aes_gcm_ctx {
130 struct atmel_aes_base_ctx base;
131
132 struct scatterlist src[2];
133 struct scatterlist dst[2];
134
135 u32 j0[AES_BLOCK_SIZE / sizeof(u32)];
136 u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
137 u32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
138 size_t textlen;
139
140 const u32 *ghash_in;
141 u32 *ghash_out;
142 atmel_aes_fn_t ghash_resume;
143};
144
145struct atmel_aes_xts_ctx {
146 struct atmel_aes_base_ctx base;
147
148 u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
149};
150
151#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
152struct atmel_aes_authenc_ctx {
153 struct atmel_aes_base_ctx base;
154 struct atmel_sha_authenc_ctx *auth;
155};
156#endif
157
158struct atmel_aes_reqctx {
159 unsigned long mode;
160 u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
161};
162
163#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
164struct atmel_aes_authenc_reqctx {
165 struct atmel_aes_reqctx base;
166
167 struct scatterlist src[2];
168 struct scatterlist dst[2];
169 size_t textlen;
170 u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)];
171
172 /* auth_req MUST be place last. */
173 struct ahash_request auth_req;
174};
175#endif
176
177struct atmel_aes_dma {
178 struct dma_chan *chan;
179 struct scatterlist *sg;
180 int nents;
181 unsigned int remainder;
182 unsigned int sg_len;
183};
184
185struct atmel_aes_dev {
186 struct list_head list;
187 unsigned long phys_base;
188 void __iomem *io_base;
189
190 struct crypto_async_request *areq;
191 struct atmel_aes_base_ctx *ctx;
192
193 bool is_async;
194 atmel_aes_fn_t resume;
195 atmel_aes_fn_t cpu_transfer_complete;
196
197 struct device *dev;
198 struct clk *iclk;
199 int irq;
200
201 unsigned long flags;
202
203 spinlock_t lock;
204 struct crypto_queue queue;
205
206 struct tasklet_struct done_task;
207 struct tasklet_struct queue_task;
208
209 size_t total;
210 size_t datalen;
211 u32 *data;
212
213 struct atmel_aes_dma src;
214 struct atmel_aes_dma dst;
215
216 size_t buflen;
217 void *buf;
218 struct scatterlist aligned_sg;
219 struct scatterlist *real_dst;
220
221 struct atmel_aes_caps caps;
222
223 u32 hw_version;
224};
225
226struct atmel_aes_drv {
227 struct list_head dev_list;
228 spinlock_t lock;
229};
230
231static struct atmel_aes_drv atmel_aes = {
232 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
233 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
234};
235
236#ifdef VERBOSE_DEBUG
237static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
238{
239 switch (offset) {
240 case AES_CR:
241 return "CR";
242
243 case AES_MR:
244 return "MR";
245
246 case AES_ISR:
247 return "ISR";
248
249 case AES_IMR:
250 return "IMR";
251
252 case AES_IER:
253 return "IER";
254
255 case AES_IDR:
256 return "IDR";
257
258 case AES_KEYWR(0):
259 case AES_KEYWR(1):
260 case AES_KEYWR(2):
261 case AES_KEYWR(3):
262 case AES_KEYWR(4):
263 case AES_KEYWR(5):
264 case AES_KEYWR(6):
265 case AES_KEYWR(7):
266 snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
267 break;
268
269 case AES_IDATAR(0):
270 case AES_IDATAR(1):
271 case AES_IDATAR(2):
272 case AES_IDATAR(3):
273 snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
274 break;
275
276 case AES_ODATAR(0):
277 case AES_ODATAR(1):
278 case AES_ODATAR(2):
279 case AES_ODATAR(3):
280 snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
281 break;
282
283 case AES_IVR(0):
284 case AES_IVR(1):
285 case AES_IVR(2):
286 case AES_IVR(3):
287 snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
288 break;
289
290 case AES_AADLENR:
291 return "AADLENR";
292
293 case AES_CLENR:
294 return "CLENR";
295
296 case AES_GHASHR(0):
297 case AES_GHASHR(1):
298 case AES_GHASHR(2):
299 case AES_GHASHR(3):
300 snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
301 break;
302
303 case AES_TAGR(0):
304 case AES_TAGR(1):
305 case AES_TAGR(2):
306 case AES_TAGR(3):
307 snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
308 break;
309
310 case AES_CTRR:
311 return "CTRR";
312
313 case AES_GCMHR(0):
314 case AES_GCMHR(1):
315 case AES_GCMHR(2):
316 case AES_GCMHR(3):
317 snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
318 break;
319
320 case AES_EMR:
321 return "EMR";
322
323 case AES_TWR(0):
324 case AES_TWR(1):
325 case AES_TWR(2):
326 case AES_TWR(3):
327 snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
328 break;
329
330 case AES_ALPHAR(0):
331 case AES_ALPHAR(1):
332 case AES_ALPHAR(2):
333 case AES_ALPHAR(3):
334 snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
335 break;
336
337 default:
338 snprintf(tmp, sz, "0x%02x", offset);
339 break;
340 }
341
342 return tmp;
343}
344#endif /* VERBOSE_DEBUG */
345
346/* Shared functions */
347
348static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
349{
350 u32 value = readl_relaxed(dd->io_base + offset);
351
352#ifdef VERBOSE_DEBUG
353 if (dd->flags & AES_FLAGS_DUMP_REG) {
354 char tmp[16];
355
356 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
357 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
358 }
359#endif /* VERBOSE_DEBUG */
360
361 return value;
362}
363
364static inline void atmel_aes_write(struct atmel_aes_dev *dd,
365 u32 offset, u32 value)
366{
367#ifdef VERBOSE_DEBUG
368 if (dd->flags & AES_FLAGS_DUMP_REG) {
369 char tmp[16];
370
371 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
372 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
373 }
374#endif /* VERBOSE_DEBUG */
375
376 writel_relaxed(value, dd->io_base + offset);
377}
378
379static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
380 u32 *value, int count)
381{
382 for (; count--; value++, offset += 4)
383 *value = atmel_aes_read(dd, offset);
384}
385
386static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
387 const u32 *value, int count)
388{
389 for (; count--; value++, offset += 4)
390 atmel_aes_write(dd, offset, *value);
391}
392
393static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
394 u32 *value)
395{
396 atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
397}
398
399static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
400 const u32 *value)
401{
402 atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
403}
404
405static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
406 atmel_aes_fn_t resume)
407{
408 u32 isr = atmel_aes_read(dd, AES_ISR);
409
410 if (unlikely(isr & AES_INT_DATARDY))
411 return resume(dd);
412
413 dd->resume = resume;
414 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
415 return -EINPROGRESS;
416}
417
418static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
419{
420 len &= block_size - 1;
421 return len ? block_size - len : 0;
422}
423
424static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
425{
426 struct atmel_aes_dev *aes_dd = NULL;
427 struct atmel_aes_dev *tmp;
428
429 spin_lock_bh(&atmel_aes.lock);
430 if (!ctx->dd) {
431 list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
432 aes_dd = tmp;
433 break;
434 }
435 ctx->dd = aes_dd;
436 } else {
437 aes_dd = ctx->dd;
438 }
439
440 spin_unlock_bh(&atmel_aes.lock);
441
442 return aes_dd;
443}
444
445static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
446{
447 int err;
448
449 err = clk_enable(dd->iclk);
450 if (err)
451 return err;
452
453 atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
454 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
455
456 return 0;
457}
458
459static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
460{
461 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
462}
463
464static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
465{
466 int err;
467
468 err = atmel_aes_hw_init(dd);
469 if (err)
470 return err;
471
472 dd->hw_version = atmel_aes_get_version(dd);
473
474 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
475
476 clk_disable(dd->iclk);
477 return 0;
478}
479
480static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
481 const struct atmel_aes_reqctx *rctx)
482{
483 /* Clear all but persistent flags and set request flags. */
484 dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
485}
486
487static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
488{
489 return (dd->flags & AES_FLAGS_ENCRYPT);
490}
491
492#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
493static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
494#endif
495
496static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
497{
498#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
499 if (dd->ctx->is_aead)
500 atmel_aes_authenc_complete(dd, err);
501#endif
502
503 clk_disable(dd->iclk);
504 dd->flags &= ~AES_FLAGS_BUSY;
505
506 if (!dd->ctx->is_aead) {
507 struct ablkcipher_request *req =
508 ablkcipher_request_cast(dd->areq);
509 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
510 struct crypto_ablkcipher *ablkcipher =
511 crypto_ablkcipher_reqtfm(req);
512 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
513
514 if (rctx->mode & AES_FLAGS_ENCRYPT) {
515 scatterwalk_map_and_copy(req->info, req->dst,
516 req->nbytes - ivsize, ivsize, 0);
517 } else {
518 if (req->src == req->dst) {
519 memcpy(req->info, rctx->lastc, ivsize);
520 } else {
521 scatterwalk_map_and_copy(req->info, req->src,
522 req->nbytes - ivsize, ivsize, 0);
523 }
524 }
525 }
526
527 if (dd->is_async)
528 dd->areq->complete(dd->areq, err);
529
530 tasklet_schedule(&dd->queue_task);
531
532 return err;
533}
534
535static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
536 const u32 *iv, const u32 *key, int keylen)
537{
538 u32 valmr = 0;
539
540 /* MR register must be set before IV registers */
541 if (keylen == AES_KEYSIZE_128)
542 valmr |= AES_MR_KEYSIZE_128;
543 else if (keylen == AES_KEYSIZE_192)
544 valmr |= AES_MR_KEYSIZE_192;
545 else
546 valmr |= AES_MR_KEYSIZE_256;
547
548 valmr |= dd->flags & AES_FLAGS_MODE_MASK;
549
550 if (use_dma) {
551 valmr |= AES_MR_SMOD_IDATAR0;
552 if (dd->caps.has_dualbuff)
553 valmr |= AES_MR_DUALBUFF;
554 } else {
555 valmr |= AES_MR_SMOD_AUTO;
556 }
557
558 atmel_aes_write(dd, AES_MR, valmr);
559
560 atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
561
562 if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
563 atmel_aes_write_block(dd, AES_IVR(0), iv);
564}
565
566static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
567 const u32 *iv)
568
569{
570 atmel_aes_write_ctrl_key(dd, use_dma, iv,
571 dd->ctx->key, dd->ctx->keylen);
572}
573
574/* CPU transfer */
575
576static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
577{
578 int err = 0;
579 u32 isr;
580
581 for (;;) {
582 atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
583 dd->data += 4;
584 dd->datalen -= AES_BLOCK_SIZE;
585
586 if (dd->datalen < AES_BLOCK_SIZE)
587 break;
588
589 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
590
591 isr = atmel_aes_read(dd, AES_ISR);
592 if (!(isr & AES_INT_DATARDY)) {
593 dd->resume = atmel_aes_cpu_transfer;
594 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
595 return -EINPROGRESS;
596 }
597 }
598
599 if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
600 dd->buf, dd->total))
601 err = -EINVAL;
602
603 if (err)
604 return atmel_aes_complete(dd, err);
605
606 return dd->cpu_transfer_complete(dd);
607}
608
609static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
610 struct scatterlist *src,
611 struct scatterlist *dst,
612 size_t len,
613 atmel_aes_fn_t resume)
614{
615 size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
616
617 if (unlikely(len == 0))
618 return -EINVAL;
619
620 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
621
622 dd->total = len;
623 dd->real_dst = dst;
624 dd->cpu_transfer_complete = resume;
625 dd->datalen = len + padlen;
626 dd->data = (u32 *)dd->buf;
627 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
628 return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
629}
630
631
632/* DMA transfer */
633
634static void atmel_aes_dma_callback(void *data);
635
636static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
637 struct scatterlist *sg,
638 size_t len,
639 struct atmel_aes_dma *dma)
640{
641 int nents;
642
643 if (!IS_ALIGNED(len, dd->ctx->block_size))
644 return false;
645
646 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
647 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
648 return false;
649
650 if (len <= sg->length) {
651 if (!IS_ALIGNED(len, dd->ctx->block_size))
652 return false;
653
654 dma->nents = nents+1;
655 dma->remainder = sg->length - len;
656 sg->length = len;
657 return true;
658 }
659
660 if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
661 return false;
662
663 len -= sg->length;
664 }
665
666 return false;
667}
668
669static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
670{
671 struct scatterlist *sg = dma->sg;
672 int nents = dma->nents;
673
674 if (!dma->remainder)
675 return;
676
677 while (--nents > 0 && sg)
678 sg = sg_next(sg);
679
680 if (!sg)
681 return;
682
683 sg->length += dma->remainder;
684}
685
686static int atmel_aes_map(struct atmel_aes_dev *dd,
687 struct scatterlist *src,
688 struct scatterlist *dst,
689 size_t len)
690{
691 bool src_aligned, dst_aligned;
692 size_t padlen;
693
694 dd->total = len;
695 dd->src.sg = src;
696 dd->dst.sg = dst;
697 dd->real_dst = dst;
698
699 src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
700 if (src == dst)
701 dst_aligned = src_aligned;
702 else
703 dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
704 if (!src_aligned || !dst_aligned) {
705 padlen = atmel_aes_padlen(len, dd->ctx->block_size);
706
707 if (dd->buflen < len + padlen)
708 return -ENOMEM;
709
710 if (!src_aligned) {
711 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
712 dd->src.sg = &dd->aligned_sg;
713 dd->src.nents = 1;
714 dd->src.remainder = 0;
715 }
716
717 if (!dst_aligned) {
718 dd->dst.sg = &dd->aligned_sg;
719 dd->dst.nents = 1;
720 dd->dst.remainder = 0;
721 }
722
723 sg_init_table(&dd->aligned_sg, 1);
724 sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
725 }
726
727 if (dd->src.sg == dd->dst.sg) {
728 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
729 DMA_BIDIRECTIONAL);
730 dd->dst.sg_len = dd->src.sg_len;
731 if (!dd->src.sg_len)
732 return -EFAULT;
733 } else {
734 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
735 DMA_TO_DEVICE);
736 if (!dd->src.sg_len)
737 return -EFAULT;
738
739 dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
740 DMA_FROM_DEVICE);
741 if (!dd->dst.sg_len) {
742 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
743 DMA_TO_DEVICE);
744 return -EFAULT;
745 }
746 }
747
748 return 0;
749}
750
751static void atmel_aes_unmap(struct atmel_aes_dev *dd)
752{
753 if (dd->src.sg == dd->dst.sg) {
754 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
755 DMA_BIDIRECTIONAL);
756
757 if (dd->src.sg != &dd->aligned_sg)
758 atmel_aes_restore_sg(&dd->src);
759 } else {
760 dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
761 DMA_FROM_DEVICE);
762
763 if (dd->dst.sg != &dd->aligned_sg)
764 atmel_aes_restore_sg(&dd->dst);
765
766 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
767 DMA_TO_DEVICE);
768
769 if (dd->src.sg != &dd->aligned_sg)
770 atmel_aes_restore_sg(&dd->src);
771 }
772
773 if (dd->dst.sg == &dd->aligned_sg)
774 sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
775 dd->buf, dd->total);
776}
777
778static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
779 enum dma_slave_buswidth addr_width,
780 enum dma_transfer_direction dir,
781 u32 maxburst)
782{
783 struct dma_async_tx_descriptor *desc;
784 struct dma_slave_config config;
785 dma_async_tx_callback callback;
786 struct atmel_aes_dma *dma;
787 int err;
788
789 memset(&config, 0, sizeof(config));
790 config.direction = dir;
791 config.src_addr_width = addr_width;
792 config.dst_addr_width = addr_width;
793 config.src_maxburst = maxburst;
794 config.dst_maxburst = maxburst;
795
796 switch (dir) {
797 case DMA_MEM_TO_DEV:
798 dma = &dd->src;
799 callback = NULL;
800 config.dst_addr = dd->phys_base + AES_IDATAR(0);
801 break;
802
803 case DMA_DEV_TO_MEM:
804 dma = &dd->dst;
805 callback = atmel_aes_dma_callback;
806 config.src_addr = dd->phys_base + AES_ODATAR(0);
807 break;
808
809 default:
810 return -EINVAL;
811 }
812
813 err = dmaengine_slave_config(dma->chan, &config);
814 if (err)
815 return err;
816
817 desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
818 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
819 if (!desc)
820 return -ENOMEM;
821
822 desc->callback = callback;
823 desc->callback_param = dd;
824 dmaengine_submit(desc);
825 dma_async_issue_pending(dma->chan);
826
827 return 0;
828}
829
830static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
831 enum dma_transfer_direction dir)
832{
833 struct atmel_aes_dma *dma;
834
835 switch (dir) {
836 case DMA_MEM_TO_DEV:
837 dma = &dd->src;
838 break;
839
840 case DMA_DEV_TO_MEM:
841 dma = &dd->dst;
842 break;
843
844 default:
845 return;
846 }
847
848 dmaengine_terminate_all(dma->chan);
849}
850
851static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
852 struct scatterlist *src,
853 struct scatterlist *dst,
854 size_t len,
855 atmel_aes_fn_t resume)
856{
857 enum dma_slave_buswidth addr_width;
858 u32 maxburst;
859 int err;
860
861 switch (dd->ctx->block_size) {
862 case CFB8_BLOCK_SIZE:
863 addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
864 maxburst = 1;
865 break;
866
867 case CFB16_BLOCK_SIZE:
868 addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
869 maxburst = 1;
870 break;
871
872 case CFB32_BLOCK_SIZE:
873 case CFB64_BLOCK_SIZE:
874 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
875 maxburst = 1;
876 break;
877
878 case AES_BLOCK_SIZE:
879 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
880 maxburst = dd->caps.max_burst_size;
881 break;
882
883 default:
884 err = -EINVAL;
885 goto exit;
886 }
887
888 err = atmel_aes_map(dd, src, dst, len);
889 if (err)
890 goto exit;
891
892 dd->resume = resume;
893
894 /* Set output DMA transfer first */
895 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
896 maxburst);
897 if (err)
898 goto unmap;
899
900 /* Then set input DMA transfer */
901 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
902 maxburst);
903 if (err)
904 goto output_transfer_stop;
905
906 return -EINPROGRESS;
907
908output_transfer_stop:
909 atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
910unmap:
911 atmel_aes_unmap(dd);
912exit:
913 return atmel_aes_complete(dd, err);
914}
915
916static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
917{
918 atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
919 atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
920 atmel_aes_unmap(dd);
921}
922
923static void atmel_aes_dma_callback(void *data)
924{
925 struct atmel_aes_dev *dd = data;
926
927 atmel_aes_dma_stop(dd);
928 dd->is_async = true;
929 (void)dd->resume(dd);
930}
931
932static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
933 struct crypto_async_request *new_areq)
934{
935 struct crypto_async_request *areq, *backlog;
936 struct atmel_aes_base_ctx *ctx;
937 unsigned long flags;
938 bool start_async;
939 int err, ret = 0;
940
941 spin_lock_irqsave(&dd->lock, flags);
942 if (new_areq)
943 ret = crypto_enqueue_request(&dd->queue, new_areq);
944 if (dd->flags & AES_FLAGS_BUSY) {
945 spin_unlock_irqrestore(&dd->lock, flags);
946 return ret;
947 }
948 backlog = crypto_get_backlog(&dd->queue);
949 areq = crypto_dequeue_request(&dd->queue);
950 if (areq)
951 dd->flags |= AES_FLAGS_BUSY;
952 spin_unlock_irqrestore(&dd->lock, flags);
953
954 if (!areq)
955 return ret;
956
957 if (backlog)
958 backlog->complete(backlog, -EINPROGRESS);
959
960 ctx = crypto_tfm_ctx(areq->tfm);
961
962 dd->areq = areq;
963 dd->ctx = ctx;
964 start_async = (areq != new_areq);
965 dd->is_async = start_async;
966
967 /* WARNING: ctx->start() MAY change dd->is_async. */
968 err = ctx->start(dd);
969 return (start_async) ? ret : err;
970}
971
972
973/* AES async block ciphers */
974
975static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
976{
977 return atmel_aes_complete(dd, 0);
978}
979
980static int atmel_aes_start(struct atmel_aes_dev *dd)
981{
982 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
983 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
984 bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
985 dd->ctx->block_size != AES_BLOCK_SIZE);
986 int err;
987
988 atmel_aes_set_mode(dd, rctx);
989
990 err = atmel_aes_hw_init(dd);
991 if (err)
992 return atmel_aes_complete(dd, err);
993
994 atmel_aes_write_ctrl(dd, use_dma, req->info);
995 if (use_dma)
996 return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
997 atmel_aes_transfer_complete);
998
999 return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
1000 atmel_aes_transfer_complete);
1001}
1002
1003static inline struct atmel_aes_ctr_ctx *
1004atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
1005{
1006 return container_of(ctx, struct atmel_aes_ctr_ctx, base);
1007}
1008
1009static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
1010{
1011 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1012 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1013 struct scatterlist *src, *dst;
1014 u32 ctr, blocks;
1015 size_t datalen;
1016 bool use_dma, fragmented = false;
1017
1018 /* Check for transfer completion. */
1019 ctx->offset += dd->total;
1020 if (ctx->offset >= req->nbytes)
1021 return atmel_aes_transfer_complete(dd);
1022
1023 /* Compute data length. */
1024 datalen = req->nbytes - ctx->offset;
1025 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
1026 ctr = be32_to_cpu(ctx->iv[3]);
1027 if (dd->caps.has_ctr32) {
1028 /* Check 32bit counter overflow. */
1029 u32 start = ctr;
1030 u32 end = start + blocks - 1;
1031
1032 if (end < start) {
1033 ctr |= 0xffffffff;
1034 datalen = AES_BLOCK_SIZE * -start;
1035 fragmented = true;
1036 }
1037 } else {
1038 /* Check 16bit counter overflow. */
1039 u16 start = ctr & 0xffff;
1040 u16 end = start + (u16)blocks - 1;
1041
1042 if (blocks >> 16 || end < start) {
1043 ctr |= 0xffff;
1044 datalen = AES_BLOCK_SIZE * (0x10000-start);
1045 fragmented = true;
1046 }
1047 }
1048 use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
1049
1050 /* Jump to offset. */
1051 src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
1052 dst = ((req->src == req->dst) ? src :
1053 scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
1054
1055 /* Configure hardware. */
1056 atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
1057 if (unlikely(fragmented)) {
1058 /*
1059 * Increment the counter manually to cope with the hardware
1060 * counter overflow.
1061 */
1062 ctx->iv[3] = cpu_to_be32(ctr);
1063 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
1064 }
1065
1066 if (use_dma)
1067 return atmel_aes_dma_start(dd, src, dst, datalen,
1068 atmel_aes_ctr_transfer);
1069
1070 return atmel_aes_cpu_start(dd, src, dst, datalen,
1071 atmel_aes_ctr_transfer);
1072}
1073
1074static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
1075{
1076 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1077 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1078 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
1079 int err;
1080
1081 atmel_aes_set_mode(dd, rctx);
1082
1083 err = atmel_aes_hw_init(dd);
1084 if (err)
1085 return atmel_aes_complete(dd, err);
1086
1087 memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
1088 ctx->offset = 0;
1089 dd->total = 0;
1090 return atmel_aes_ctr_transfer(dd);
1091}
1092
1093static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
1094{
1095 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1096 struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1097 struct atmel_aes_reqctx *rctx;
1098 struct atmel_aes_dev *dd;
1099
1100 switch (mode & AES_FLAGS_OPMODE_MASK) {
1101 case AES_FLAGS_CFB8:
1102 ctx->block_size = CFB8_BLOCK_SIZE;
1103 break;
1104
1105 case AES_FLAGS_CFB16:
1106 ctx->block_size = CFB16_BLOCK_SIZE;
1107 break;
1108
1109 case AES_FLAGS_CFB32:
1110 ctx->block_size = CFB32_BLOCK_SIZE;
1111 break;
1112
1113 case AES_FLAGS_CFB64:
1114 ctx->block_size = CFB64_BLOCK_SIZE;
1115 break;
1116
1117 default:
1118 ctx->block_size = AES_BLOCK_SIZE;
1119 break;
1120 }
1121 ctx->is_aead = false;
1122
1123 dd = atmel_aes_find_dev(ctx);
1124 if (!dd)
1125 return -ENODEV;
1126
1127 rctx = ablkcipher_request_ctx(req);
1128 rctx->mode = mode;
1129
1130 if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
1131 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1132
1133 scatterwalk_map_and_copy(rctx->lastc, req->src,
1134 (req->nbytes - ivsize), ivsize, 0);
1135 }
1136
1137 return atmel_aes_handle_queue(dd, &req->base);
1138}
1139
1140static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1141 unsigned int keylen)
1142{
1143 struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1144
1145 if (keylen != AES_KEYSIZE_128 &&
1146 keylen != AES_KEYSIZE_192 &&
1147 keylen != AES_KEYSIZE_256) {
1148 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1149 return -EINVAL;
1150 }
1151
1152 memcpy(ctx->key, key, keylen);
1153 ctx->keylen = keylen;
1154
1155 return 0;
1156}
1157
1158static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1159{
1160 return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1161}
1162
1163static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1164{
1165 return atmel_aes_crypt(req, AES_FLAGS_ECB);
1166}
1167
1168static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1169{
1170 return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1171}
1172
1173static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1174{
1175 return atmel_aes_crypt(req, AES_FLAGS_CBC);
1176}
1177
1178static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1179{
1180 return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1181}
1182
1183static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1184{
1185 return atmel_aes_crypt(req, AES_FLAGS_OFB);
1186}
1187
1188static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1189{
1190 return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1191}
1192
1193static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1194{
1195 return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1196}
1197
1198static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1199{
1200 return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1201}
1202
1203static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1204{
1205 return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1206}
1207
1208static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1209{
1210 return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1211}
1212
1213static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1214{
1215 return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1216}
1217
1218static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1219{
1220 return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1221}
1222
1223static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1224{
1225 return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1226}
1227
1228static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1229{
1230 return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1231}
1232
1233static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1234{
1235 return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1236}
1237
1238static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1239{
1240 return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1241}
1242
1243static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1244{
1245 return atmel_aes_crypt(req, AES_FLAGS_CTR);
1246}
1247
1248static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1249{
1250 struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1251
1252 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1253 ctx->base.start = atmel_aes_start;
1254
1255 return 0;
1256}
1257
1258static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1259{
1260 struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1261
1262 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1263 ctx->base.start = atmel_aes_ctr_start;
1264
1265 return 0;
1266}
1267
1268static struct crypto_alg aes_algs[] = {
1269{
1270 .cra_name = "ecb(aes)",
1271 .cra_driver_name = "atmel-ecb-aes",
1272 .cra_priority = ATMEL_AES_PRIORITY,
1273 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1274 .cra_blocksize = AES_BLOCK_SIZE,
1275 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1276 .cra_alignmask = 0xf,
1277 .cra_type = &crypto_ablkcipher_type,
1278 .cra_module = THIS_MODULE,
1279 .cra_init = atmel_aes_cra_init,
1280 .cra_u.ablkcipher = {
1281 .min_keysize = AES_MIN_KEY_SIZE,
1282 .max_keysize = AES_MAX_KEY_SIZE,
1283 .setkey = atmel_aes_setkey,
1284 .encrypt = atmel_aes_ecb_encrypt,
1285 .decrypt = atmel_aes_ecb_decrypt,
1286 }
1287},
1288{
1289 .cra_name = "cbc(aes)",
1290 .cra_driver_name = "atmel-cbc-aes",
1291 .cra_priority = ATMEL_AES_PRIORITY,
1292 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1293 .cra_blocksize = AES_BLOCK_SIZE,
1294 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1295 .cra_alignmask = 0xf,
1296 .cra_type = &crypto_ablkcipher_type,
1297 .cra_module = THIS_MODULE,
1298 .cra_init = atmel_aes_cra_init,
1299 .cra_u.ablkcipher = {
1300 .min_keysize = AES_MIN_KEY_SIZE,
1301 .max_keysize = AES_MAX_KEY_SIZE,
1302 .ivsize = AES_BLOCK_SIZE,
1303 .setkey = atmel_aes_setkey,
1304 .encrypt = atmel_aes_cbc_encrypt,
1305 .decrypt = atmel_aes_cbc_decrypt,
1306 }
1307},
1308{
1309 .cra_name = "ofb(aes)",
1310 .cra_driver_name = "atmel-ofb-aes",
1311 .cra_priority = ATMEL_AES_PRIORITY,
1312 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1313 .cra_blocksize = AES_BLOCK_SIZE,
1314 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1315 .cra_alignmask = 0xf,
1316 .cra_type = &crypto_ablkcipher_type,
1317 .cra_module = THIS_MODULE,
1318 .cra_init = atmel_aes_cra_init,
1319 .cra_u.ablkcipher = {
1320 .min_keysize = AES_MIN_KEY_SIZE,
1321 .max_keysize = AES_MAX_KEY_SIZE,
1322 .ivsize = AES_BLOCK_SIZE,
1323 .setkey = atmel_aes_setkey,
1324 .encrypt = atmel_aes_ofb_encrypt,
1325 .decrypt = atmel_aes_ofb_decrypt,
1326 }
1327},
1328{
1329 .cra_name = "cfb(aes)",
1330 .cra_driver_name = "atmel-cfb-aes",
1331 .cra_priority = ATMEL_AES_PRIORITY,
1332 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1333 .cra_blocksize = AES_BLOCK_SIZE,
1334 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1335 .cra_alignmask = 0xf,
1336 .cra_type = &crypto_ablkcipher_type,
1337 .cra_module = THIS_MODULE,
1338 .cra_init = atmel_aes_cra_init,
1339 .cra_u.ablkcipher = {
1340 .min_keysize = AES_MIN_KEY_SIZE,
1341 .max_keysize = AES_MAX_KEY_SIZE,
1342 .ivsize = AES_BLOCK_SIZE,
1343 .setkey = atmel_aes_setkey,
1344 .encrypt = atmel_aes_cfb_encrypt,
1345 .decrypt = atmel_aes_cfb_decrypt,
1346 }
1347},
1348{
1349 .cra_name = "cfb32(aes)",
1350 .cra_driver_name = "atmel-cfb32-aes",
1351 .cra_priority = ATMEL_AES_PRIORITY,
1352 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1353 .cra_blocksize = CFB32_BLOCK_SIZE,
1354 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1355 .cra_alignmask = 0x3,
1356 .cra_type = &crypto_ablkcipher_type,
1357 .cra_module = THIS_MODULE,
1358 .cra_init = atmel_aes_cra_init,
1359 .cra_u.ablkcipher = {
1360 .min_keysize = AES_MIN_KEY_SIZE,
1361 .max_keysize = AES_MAX_KEY_SIZE,
1362 .ivsize = AES_BLOCK_SIZE,
1363 .setkey = atmel_aes_setkey,
1364 .encrypt = atmel_aes_cfb32_encrypt,
1365 .decrypt = atmel_aes_cfb32_decrypt,
1366 }
1367},
1368{
1369 .cra_name = "cfb16(aes)",
1370 .cra_driver_name = "atmel-cfb16-aes",
1371 .cra_priority = ATMEL_AES_PRIORITY,
1372 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1373 .cra_blocksize = CFB16_BLOCK_SIZE,
1374 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1375 .cra_alignmask = 0x1,
1376 .cra_type = &crypto_ablkcipher_type,
1377 .cra_module = THIS_MODULE,
1378 .cra_init = atmel_aes_cra_init,
1379 .cra_u.ablkcipher = {
1380 .min_keysize = AES_MIN_KEY_SIZE,
1381 .max_keysize = AES_MAX_KEY_SIZE,
1382 .ivsize = AES_BLOCK_SIZE,
1383 .setkey = atmel_aes_setkey,
1384 .encrypt = atmel_aes_cfb16_encrypt,
1385 .decrypt = atmel_aes_cfb16_decrypt,
1386 }
1387},
1388{
1389 .cra_name = "cfb8(aes)",
1390 .cra_driver_name = "atmel-cfb8-aes",
1391 .cra_priority = ATMEL_AES_PRIORITY,
1392 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1393 .cra_blocksize = CFB8_BLOCK_SIZE,
1394 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1395 .cra_alignmask = 0x0,
1396 .cra_type = &crypto_ablkcipher_type,
1397 .cra_module = THIS_MODULE,
1398 .cra_init = atmel_aes_cra_init,
1399 .cra_u.ablkcipher = {
1400 .min_keysize = AES_MIN_KEY_SIZE,
1401 .max_keysize = AES_MAX_KEY_SIZE,
1402 .ivsize = AES_BLOCK_SIZE,
1403 .setkey = atmel_aes_setkey,
1404 .encrypt = atmel_aes_cfb8_encrypt,
1405 .decrypt = atmel_aes_cfb8_decrypt,
1406 }
1407},
1408{
1409 .cra_name = "ctr(aes)",
1410 .cra_driver_name = "atmel-ctr-aes",
1411 .cra_priority = ATMEL_AES_PRIORITY,
1412 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1413 .cra_blocksize = 1,
1414 .cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
1415 .cra_alignmask = 0xf,
1416 .cra_type = &crypto_ablkcipher_type,
1417 .cra_module = THIS_MODULE,
1418 .cra_init = atmel_aes_ctr_cra_init,
1419 .cra_u.ablkcipher = {
1420 .min_keysize = AES_MIN_KEY_SIZE,
1421 .max_keysize = AES_MAX_KEY_SIZE,
1422 .ivsize = AES_BLOCK_SIZE,
1423 .setkey = atmel_aes_setkey,
1424 .encrypt = atmel_aes_ctr_encrypt,
1425 .decrypt = atmel_aes_ctr_decrypt,
1426 }
1427},
1428};
1429
1430static struct crypto_alg aes_cfb64_alg = {
1431 .cra_name = "cfb64(aes)",
1432 .cra_driver_name = "atmel-cfb64-aes",
1433 .cra_priority = ATMEL_AES_PRIORITY,
1434 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1435 .cra_blocksize = CFB64_BLOCK_SIZE,
1436 .cra_ctxsize = sizeof(struct atmel_aes_ctx),
1437 .cra_alignmask = 0x7,
1438 .cra_type = &crypto_ablkcipher_type,
1439 .cra_module = THIS_MODULE,
1440 .cra_init = atmel_aes_cra_init,
1441 .cra_u.ablkcipher = {
1442 .min_keysize = AES_MIN_KEY_SIZE,
1443 .max_keysize = AES_MAX_KEY_SIZE,
1444 .ivsize = AES_BLOCK_SIZE,
1445 .setkey = atmel_aes_setkey,
1446 .encrypt = atmel_aes_cfb64_encrypt,
1447 .decrypt = atmel_aes_cfb64_decrypt,
1448 }
1449};
1450
1451
1452/* gcm aead functions */
1453
1454static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1455 const u32 *data, size_t datalen,
1456 const u32 *ghash_in, u32 *ghash_out,
1457 atmel_aes_fn_t resume);
1458static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1459static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1460
1461static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1462static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1463static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1464static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1465static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1466static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1467static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1468
1469static inline struct atmel_aes_gcm_ctx *
1470atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1471{
1472 return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1473}
1474
1475static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1476 const u32 *data, size_t datalen,
1477 const u32 *ghash_in, u32 *ghash_out,
1478 atmel_aes_fn_t resume)
1479{
1480 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1481
1482 dd->data = (u32 *)data;
1483 dd->datalen = datalen;
1484 ctx->ghash_in = ghash_in;
1485 ctx->ghash_out = ghash_out;
1486 ctx->ghash_resume = resume;
1487
1488 atmel_aes_write_ctrl(dd, false, NULL);
1489 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1490}
1491
1492static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1493{
1494 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1495
1496 /* Set the data length. */
1497 atmel_aes_write(dd, AES_AADLENR, dd->total);
1498 atmel_aes_write(dd, AES_CLENR, 0);
1499
1500 /* If needed, overwrite the GCM Intermediate Hash Word Registers */
1501 if (ctx->ghash_in)
1502 atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1503
1504 return atmel_aes_gcm_ghash_finalize(dd);
1505}
1506
1507static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1508{
1509 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1510 u32 isr;
1511
1512 /* Write data into the Input Data Registers. */
1513 while (dd->datalen > 0) {
1514 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1515 dd->data += 4;
1516 dd->datalen -= AES_BLOCK_SIZE;
1517
1518 isr = atmel_aes_read(dd, AES_ISR);
1519 if (!(isr & AES_INT_DATARDY)) {
1520 dd->resume = atmel_aes_gcm_ghash_finalize;
1521 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1522 return -EINPROGRESS;
1523 }
1524 }
1525
1526 /* Read the computed hash from GHASHRx. */
1527 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1528
1529 return ctx->ghash_resume(dd);
1530}
1531
1532
1533static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1534{
1535 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1536 struct aead_request *req = aead_request_cast(dd->areq);
1537 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1538 struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1539 size_t ivsize = crypto_aead_ivsize(tfm);
1540 size_t datalen, padlen;
1541 const void *iv = req->iv;
1542 u8 *data = dd->buf;
1543 int err;
1544
1545 atmel_aes_set_mode(dd, rctx);
1546
1547 err = atmel_aes_hw_init(dd);
1548 if (err)
1549 return atmel_aes_complete(dd, err);
1550
1551 if (likely(ivsize == GCM_AES_IV_SIZE)) {
1552 memcpy(ctx->j0, iv, ivsize);
1553 ctx->j0[3] = cpu_to_be32(1);
1554 return atmel_aes_gcm_process(dd);
1555 }
1556
1557 padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1558 datalen = ivsize + padlen + AES_BLOCK_SIZE;
1559 if (datalen > dd->buflen)
1560 return atmel_aes_complete(dd, -EINVAL);
1561
1562 memcpy(data, iv, ivsize);
1563 memset(data + ivsize, 0, padlen + sizeof(u64));
1564 ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1565
1566 return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1567 NULL, ctx->j0, atmel_aes_gcm_process);
1568}
1569
1570static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1571{
1572 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1573 struct aead_request *req = aead_request_cast(dd->areq);
1574 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1575 bool enc = atmel_aes_is_encrypt(dd);
1576 u32 authsize;
1577
1578 /* Compute text length. */
1579 authsize = crypto_aead_authsize(tfm);
1580 ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1581
1582 /*
1583 * According to tcrypt test suite, the GCM Automatic Tag Generation
1584 * fails when both the message and its associated data are empty.
1585 */
1586 if (likely(req->assoclen != 0 || ctx->textlen != 0))
1587 dd->flags |= AES_FLAGS_GTAGEN;
1588
1589 atmel_aes_write_ctrl(dd, false, NULL);
1590 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1591}
1592
1593static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1594{
1595 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1596 struct aead_request *req = aead_request_cast(dd->areq);
1597 u32 j0_lsw, *j0 = ctx->j0;
1598 size_t padlen;
1599
1600 /* Write incr32(J0) into IV. */
1601 j0_lsw = j0[3];
1602 j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1603 atmel_aes_write_block(dd, AES_IVR(0), j0);
1604 j0[3] = j0_lsw;
1605
1606 /* Set aad and text lengths. */
1607 atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1608 atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1609
1610 /* Check whether AAD are present. */
1611 if (unlikely(req->assoclen == 0)) {
1612 dd->datalen = 0;
1613 return atmel_aes_gcm_data(dd);
1614 }
1615
1616 /* Copy assoc data and add padding. */
1617 padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1618 if (unlikely(req->assoclen + padlen > dd->buflen))
1619 return atmel_aes_complete(dd, -EINVAL);
1620 sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1621
1622 /* Write assoc data into the Input Data register. */
1623 dd->data = (u32 *)dd->buf;
1624 dd->datalen = req->assoclen + padlen;
1625 return atmel_aes_gcm_data(dd);
1626}
1627
1628static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1629{
1630 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1631 struct aead_request *req = aead_request_cast(dd->areq);
1632 bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1633 struct scatterlist *src, *dst;
1634 u32 isr, mr;
1635
1636 /* Write AAD first. */
1637 while (dd->datalen > 0) {
1638 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1639 dd->data += 4;
1640 dd->datalen -= AES_BLOCK_SIZE;
1641
1642 isr = atmel_aes_read(dd, AES_ISR);
1643 if (!(isr & AES_INT_DATARDY)) {
1644 dd->resume = atmel_aes_gcm_data;
1645 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1646 return -EINPROGRESS;
1647 }
1648 }
1649
1650 /* GMAC only. */
1651 if (unlikely(ctx->textlen == 0))
1652 return atmel_aes_gcm_tag_init(dd);
1653
1654 /* Prepare src and dst scatter lists to transfer cipher/plain texts */
1655 src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1656 dst = ((req->src == req->dst) ? src :
1657 scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1658
1659 if (use_dma) {
1660 /* Update the Mode Register for DMA transfers. */
1661 mr = atmel_aes_read(dd, AES_MR);
1662 mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1663 mr |= AES_MR_SMOD_IDATAR0;
1664 if (dd->caps.has_dualbuff)
1665 mr |= AES_MR_DUALBUFF;
1666 atmel_aes_write(dd, AES_MR, mr);
1667
1668 return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1669 atmel_aes_gcm_tag_init);
1670 }
1671
1672 return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1673 atmel_aes_gcm_tag_init);
1674}
1675
1676static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1677{
1678 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1679 struct aead_request *req = aead_request_cast(dd->areq);
1680 u64 *data = dd->buf;
1681
1682 if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1683 if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1684 dd->resume = atmel_aes_gcm_tag_init;
1685 atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1686 return -EINPROGRESS;
1687 }
1688
1689 return atmel_aes_gcm_finalize(dd);
1690 }
1691
1692 /* Read the GCM Intermediate Hash Word Registers. */
1693 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1694
1695 data[0] = cpu_to_be64(req->assoclen * 8);
1696 data[1] = cpu_to_be64(ctx->textlen * 8);
1697
1698 return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1699 ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1700}
1701
1702static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1703{
1704 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1705 unsigned long flags;
1706
1707 /*
1708 * Change mode to CTR to complete the tag generation.
1709 * Use J0 as Initialization Vector.
1710 */
1711 flags = dd->flags;
1712 dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1713 dd->flags |= AES_FLAGS_CTR;
1714 atmel_aes_write_ctrl(dd, false, ctx->j0);
1715 dd->flags = flags;
1716
1717 atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1718 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1719}
1720
1721static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1722{
1723 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1724 struct aead_request *req = aead_request_cast(dd->areq);
1725 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1726 bool enc = atmel_aes_is_encrypt(dd);
1727 u32 offset, authsize, itag[4], *otag = ctx->tag;
1728 int err;
1729
1730 /* Read the computed tag. */
1731 if (likely(dd->flags & AES_FLAGS_GTAGEN))
1732 atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1733 else
1734 atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1735
1736 offset = req->assoclen + ctx->textlen;
1737 authsize = crypto_aead_authsize(tfm);
1738 if (enc) {
1739 scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1740 err = 0;
1741 } else {
1742 scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1743 err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1744 }
1745
1746 return atmel_aes_complete(dd, err);
1747}
1748
1749static int atmel_aes_gcm_crypt(struct aead_request *req,
1750 unsigned long mode)
1751{
1752 struct atmel_aes_base_ctx *ctx;
1753 struct atmel_aes_reqctx *rctx;
1754 struct atmel_aes_dev *dd;
1755
1756 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1757 ctx->block_size = AES_BLOCK_SIZE;
1758 ctx->is_aead = true;
1759
1760 dd = atmel_aes_find_dev(ctx);
1761 if (!dd)
1762 return -ENODEV;
1763
1764 rctx = aead_request_ctx(req);
1765 rctx->mode = AES_FLAGS_GCM | mode;
1766
1767 return atmel_aes_handle_queue(dd, &req->base);
1768}
1769
1770static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1771 unsigned int keylen)
1772{
1773 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1774
1775 if (keylen != AES_KEYSIZE_256 &&
1776 keylen != AES_KEYSIZE_192 &&
1777 keylen != AES_KEYSIZE_128) {
1778 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1779 return -EINVAL;
1780 }
1781
1782 memcpy(ctx->key, key, keylen);
1783 ctx->keylen = keylen;
1784
1785 return 0;
1786}
1787
1788static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1789 unsigned int authsize)
1790{
1791 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1792 switch (authsize) {
1793 case 4:
1794 case 8:
1795 case 12:
1796 case 13:
1797 case 14:
1798 case 15:
1799 case 16:
1800 break;
1801 default:
1802 return -EINVAL;
1803 }
1804
1805 return 0;
1806}
1807
1808static int atmel_aes_gcm_encrypt(struct aead_request *req)
1809{
1810 return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1811}
1812
1813static int atmel_aes_gcm_decrypt(struct aead_request *req)
1814{
1815 return atmel_aes_gcm_crypt(req, 0);
1816}
1817
1818static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1819{
1820 struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1821
1822 crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1823 ctx->base.start = atmel_aes_gcm_start;
1824
1825 return 0;
1826}
1827
1828static struct aead_alg aes_gcm_alg = {
1829 .setkey = atmel_aes_gcm_setkey,
1830 .setauthsize = atmel_aes_gcm_setauthsize,
1831 .encrypt = atmel_aes_gcm_encrypt,
1832 .decrypt = atmel_aes_gcm_decrypt,
1833 .init = atmel_aes_gcm_init,
1834 .ivsize = GCM_AES_IV_SIZE,
1835 .maxauthsize = AES_BLOCK_SIZE,
1836
1837 .base = {
1838 .cra_name = "gcm(aes)",
1839 .cra_driver_name = "atmel-gcm-aes",
1840 .cra_priority = ATMEL_AES_PRIORITY,
1841 .cra_flags = CRYPTO_ALG_ASYNC,
1842 .cra_blocksize = 1,
1843 .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
1844 .cra_alignmask = 0xf,
1845 .cra_module = THIS_MODULE,
1846 },
1847};
1848
1849
1850/* xts functions */
1851
1852static inline struct atmel_aes_xts_ctx *
1853atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
1854{
1855 return container_of(ctx, struct atmel_aes_xts_ctx, base);
1856}
1857
1858static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
1859
1860static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
1861{
1862 struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
1863 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1864 struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
1865 unsigned long flags;
1866 int err;
1867
1868 atmel_aes_set_mode(dd, rctx);
1869
1870 err = atmel_aes_hw_init(dd);
1871 if (err)
1872 return atmel_aes_complete(dd, err);
1873
1874 /* Compute the tweak value from req->info with ecb(aes). */
1875 flags = dd->flags;
1876 dd->flags &= ~AES_FLAGS_MODE_MASK;
1877 dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1878 atmel_aes_write_ctrl_key(dd, false, NULL,
1879 ctx->key2, ctx->base.keylen);
1880 dd->flags = flags;
1881
1882 atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
1883 return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
1884}
1885
1886static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
1887{
1888 struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1889 bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
1890 u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
1891 static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
1892 u8 *tweak_bytes = (u8 *)tweak;
1893 int i;
1894
1895 /* Read the computed ciphered tweak value. */
1896 atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
1897 /*
1898 * Hardware quirk:
1899 * the order of the ciphered tweak bytes need to be reversed before
1900 * writing them into the ODATARx registers.
1901 */
1902 for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
1903 u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
1904
1905 tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
1906 tweak_bytes[i] = tmp;
1907 }
1908
1909 /* Process the data. */
1910 atmel_aes_write_ctrl(dd, use_dma, NULL);
1911 atmel_aes_write_block(dd, AES_TWR(0), tweak);
1912 atmel_aes_write_block(dd, AES_ALPHAR(0), one);
1913 if (use_dma)
1914 return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
1915 atmel_aes_transfer_complete);
1916
1917 return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
1918 atmel_aes_transfer_complete);
1919}
1920
1921static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1922 unsigned int keylen)
1923{
1924 struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1925 int err;
1926
1927 err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
1928 if (err)
1929 return err;
1930
1931 memcpy(ctx->base.key, key, keylen/2);
1932 memcpy(ctx->key2, key + keylen/2, keylen/2);
1933 ctx->base.keylen = keylen/2;
1934
1935 return 0;
1936}
1937
1938static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
1939{
1940 return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
1941}
1942
1943static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
1944{
1945 return atmel_aes_crypt(req, AES_FLAGS_XTS);
1946}
1947
1948static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
1949{
1950 struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
1951
1952 tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1953 ctx->base.start = atmel_aes_xts_start;
1954
1955 return 0;
1956}
1957
1958static struct crypto_alg aes_xts_alg = {
1959 .cra_name = "xts(aes)",
1960 .cra_driver_name = "atmel-xts-aes",
1961 .cra_priority = ATMEL_AES_PRIORITY,
1962 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1963 .cra_blocksize = AES_BLOCK_SIZE,
1964 .cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
1965 .cra_alignmask = 0xf,
1966 .cra_type = &crypto_ablkcipher_type,
1967 .cra_module = THIS_MODULE,
1968 .cra_init = atmel_aes_xts_cra_init,
1969 .cra_u.ablkcipher = {
1970 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1971 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1972 .ivsize = AES_BLOCK_SIZE,
1973 .setkey = atmel_aes_xts_setkey,
1974 .encrypt = atmel_aes_xts_encrypt,
1975 .decrypt = atmel_aes_xts_decrypt,
1976 }
1977};
1978
1979#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
1980/* authenc aead functions */
1981
1982static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
1983static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
1984 bool is_async);
1985static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
1986 bool is_async);
1987static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
1988static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
1989 bool is_async);
1990
1991static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
1992{
1993 struct aead_request *req = aead_request_cast(dd->areq);
1994 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
1995
1996 if (err && (dd->flags & AES_FLAGS_OWN_SHA))
1997 atmel_sha_authenc_abort(&rctx->auth_req);
1998 dd->flags &= ~AES_FLAGS_OWN_SHA;
1999}
2000
2001static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
2002{
2003 struct aead_request *req = aead_request_cast(dd->areq);
2004 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2005 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2006 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2007 int err;
2008
2009 atmel_aes_set_mode(dd, &rctx->base);
2010
2011 err = atmel_aes_hw_init(dd);
2012 if (err)
2013 return atmel_aes_complete(dd, err);
2014
2015 return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
2016 atmel_aes_authenc_init, dd);
2017}
2018
2019static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
2020 bool is_async)
2021{
2022 struct aead_request *req = aead_request_cast(dd->areq);
2023 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2024
2025 if (is_async)
2026 dd->is_async = true;
2027 if (err)
2028 return atmel_aes_complete(dd, err);
2029
2030 /* If here, we've got the ownership of the SHA device. */
2031 dd->flags |= AES_FLAGS_OWN_SHA;
2032
2033 /* Configure the SHA device. */
2034 return atmel_sha_authenc_init(&rctx->auth_req,
2035 req->src, req->assoclen,
2036 rctx->textlen,
2037 atmel_aes_authenc_transfer, dd);
2038}
2039
2040static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
2041 bool is_async)
2042{
2043 struct aead_request *req = aead_request_cast(dd->areq);
2044 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2045 bool enc = atmel_aes_is_encrypt(dd);
2046 struct scatterlist *src, *dst;
2047 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
2048 u32 emr;
2049
2050 if (is_async)
2051 dd->is_async = true;
2052 if (err)
2053 return atmel_aes_complete(dd, err);
2054
2055 /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
2056 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
2057 dst = src;
2058
2059 if (req->src != req->dst)
2060 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
2061
2062 /* Configure the AES device. */
2063 memcpy(iv, req->iv, sizeof(iv));
2064
2065 /*
2066 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
2067 * 'true' even if the data transfer is actually performed by the CPU (so
2068 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
2069 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
2070 * must be set to *_MR_SMOD_IDATAR0.
2071 */
2072 atmel_aes_write_ctrl(dd, true, iv);
2073 emr = AES_EMR_PLIPEN;
2074 if (!enc)
2075 emr |= AES_EMR_PLIPD;
2076 atmel_aes_write(dd, AES_EMR, emr);
2077
2078 /* Transfer data. */
2079 return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
2080 atmel_aes_authenc_digest);
2081}
2082
2083static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
2084{
2085 struct aead_request *req = aead_request_cast(dd->areq);
2086 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2087
2088 /* atmel_sha_authenc_final() releases the SHA device. */
2089 dd->flags &= ~AES_FLAGS_OWN_SHA;
2090 return atmel_sha_authenc_final(&rctx->auth_req,
2091 rctx->digest, sizeof(rctx->digest),
2092 atmel_aes_authenc_final, dd);
2093}
2094
2095static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
2096 bool is_async)
2097{
2098 struct aead_request *req = aead_request_cast(dd->areq);
2099 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2100 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2101 bool enc = atmel_aes_is_encrypt(dd);
2102 u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
2103 u32 offs, authsize;
2104
2105 if (is_async)
2106 dd->is_async = true;
2107 if (err)
2108 goto complete;
2109
2110 offs = req->assoclen + rctx->textlen;
2111 authsize = crypto_aead_authsize(tfm);
2112 if (enc) {
2113 scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
2114 } else {
2115 scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
2116 if (crypto_memneq(idigest, odigest, authsize))
2117 err = -EBADMSG;
2118 }
2119
2120complete:
2121 return atmel_aes_complete(dd, err);
2122}
2123
2124static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
2125 unsigned int keylen)
2126{
2127 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2128 struct crypto_authenc_keys keys;
2129 u32 flags;
2130 int err;
2131
2132 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
2133 goto badkey;
2134
2135 if (keys.enckeylen > sizeof(ctx->base.key))
2136 goto badkey;
2137
2138 /* Save auth key. */
2139 flags = crypto_aead_get_flags(tfm);
2140 err = atmel_sha_authenc_setkey(ctx->auth,
2141 keys.authkey, keys.authkeylen,
2142 &flags);
2143 crypto_aead_set_flags(tfm, flags & CRYPTO_TFM_RES_MASK);
2144 if (err) {
2145 memzero_explicit(&keys, sizeof(keys));
2146 return err;
2147 }
2148
2149 /* Save enc key. */
2150 ctx->base.keylen = keys.enckeylen;
2151 memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
2152
2153 memzero_explicit(&keys, sizeof(keys));
2154 return 0;
2155
2156badkey:
2157 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2158 memzero_explicit(&keys, sizeof(keys));
2159 return -EINVAL;
2160}
2161
2162static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
2163 unsigned long auth_mode)
2164{
2165 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2166 unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
2167
2168 ctx->auth = atmel_sha_authenc_spawn(auth_mode);
2169 if (IS_ERR(ctx->auth))
2170 return PTR_ERR(ctx->auth);
2171
2172 crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
2173 auth_reqsize));
2174 ctx->base.start = atmel_aes_authenc_start;
2175
2176 return 0;
2177}
2178
2179static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
2180{
2181 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
2182}
2183
2184static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
2185{
2186 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
2187}
2188
2189static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
2190{
2191 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
2192}
2193
2194static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
2195{
2196 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
2197}
2198
2199static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
2200{
2201 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
2202}
2203
2204static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
2205{
2206 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
2207
2208 atmel_sha_authenc_free(ctx->auth);
2209}
2210
2211static int atmel_aes_authenc_crypt(struct aead_request *req,
2212 unsigned long mode)
2213{
2214 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
2215 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2216 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
2217 u32 authsize = crypto_aead_authsize(tfm);
2218 bool enc = (mode & AES_FLAGS_ENCRYPT);
2219 struct atmel_aes_dev *dd;
2220
2221 /* Compute text length. */
2222 if (!enc && req->cryptlen < authsize)
2223 return -EINVAL;
2224 rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
2225
2226 /*
2227 * Currently, empty messages are not supported yet:
2228 * the SHA auto-padding can be used only on non-empty messages.
2229 * Hence a special case needs to be implemented for empty message.
2230 */
2231 if (!rctx->textlen && !req->assoclen)
2232 return -EINVAL;
2233
2234 rctx->base.mode = mode;
2235 ctx->block_size = AES_BLOCK_SIZE;
2236 ctx->is_aead = true;
2237
2238 dd = atmel_aes_find_dev(ctx);
2239 if (!dd)
2240 return -ENODEV;
2241
2242 return atmel_aes_handle_queue(dd, &req->base);
2243}
2244
2245static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
2246{
2247 return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
2248}
2249
2250static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
2251{
2252 return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
2253}
2254
2255static struct aead_alg aes_authenc_algs[] = {
2256{
2257 .setkey = atmel_aes_authenc_setkey,
2258 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2259 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2260 .init = atmel_aes_authenc_hmac_sha1_init_tfm,
2261 .exit = atmel_aes_authenc_exit_tfm,
2262 .ivsize = AES_BLOCK_SIZE,
2263 .maxauthsize = SHA1_DIGEST_SIZE,
2264
2265 .base = {
2266 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2267 .cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
2268 .cra_priority = ATMEL_AES_PRIORITY,
2269 .cra_flags = CRYPTO_ALG_ASYNC,
2270 .cra_blocksize = AES_BLOCK_SIZE,
2271 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2272 .cra_alignmask = 0xf,
2273 .cra_module = THIS_MODULE,
2274 },
2275},
2276{
2277 .setkey = atmel_aes_authenc_setkey,
2278 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2279 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2280 .init = atmel_aes_authenc_hmac_sha224_init_tfm,
2281 .exit = atmel_aes_authenc_exit_tfm,
2282 .ivsize = AES_BLOCK_SIZE,
2283 .maxauthsize = SHA224_DIGEST_SIZE,
2284
2285 .base = {
2286 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2287 .cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
2288 .cra_priority = ATMEL_AES_PRIORITY,
2289 .cra_flags = CRYPTO_ALG_ASYNC,
2290 .cra_blocksize = AES_BLOCK_SIZE,
2291 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2292 .cra_alignmask = 0xf,
2293 .cra_module = THIS_MODULE,
2294 },
2295},
2296{
2297 .setkey = atmel_aes_authenc_setkey,
2298 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2299 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2300 .init = atmel_aes_authenc_hmac_sha256_init_tfm,
2301 .exit = atmel_aes_authenc_exit_tfm,
2302 .ivsize = AES_BLOCK_SIZE,
2303 .maxauthsize = SHA256_DIGEST_SIZE,
2304
2305 .base = {
2306 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2307 .cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
2308 .cra_priority = ATMEL_AES_PRIORITY,
2309 .cra_flags = CRYPTO_ALG_ASYNC,
2310 .cra_blocksize = AES_BLOCK_SIZE,
2311 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2312 .cra_alignmask = 0xf,
2313 .cra_module = THIS_MODULE,
2314 },
2315},
2316{
2317 .setkey = atmel_aes_authenc_setkey,
2318 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2319 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2320 .init = atmel_aes_authenc_hmac_sha384_init_tfm,
2321 .exit = atmel_aes_authenc_exit_tfm,
2322 .ivsize = AES_BLOCK_SIZE,
2323 .maxauthsize = SHA384_DIGEST_SIZE,
2324
2325 .base = {
2326 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2327 .cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
2328 .cra_priority = ATMEL_AES_PRIORITY,
2329 .cra_flags = CRYPTO_ALG_ASYNC,
2330 .cra_blocksize = AES_BLOCK_SIZE,
2331 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2332 .cra_alignmask = 0xf,
2333 .cra_module = THIS_MODULE,
2334 },
2335},
2336{
2337 .setkey = atmel_aes_authenc_setkey,
2338 .encrypt = atmel_aes_authenc_cbc_aes_encrypt,
2339 .decrypt = atmel_aes_authenc_cbc_aes_decrypt,
2340 .init = atmel_aes_authenc_hmac_sha512_init_tfm,
2341 .exit = atmel_aes_authenc_exit_tfm,
2342 .ivsize = AES_BLOCK_SIZE,
2343 .maxauthsize = SHA512_DIGEST_SIZE,
2344
2345 .base = {
2346 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2347 .cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
2348 .cra_priority = ATMEL_AES_PRIORITY,
2349 .cra_flags = CRYPTO_ALG_ASYNC,
2350 .cra_blocksize = AES_BLOCK_SIZE,
2351 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
2352 .cra_alignmask = 0xf,
2353 .cra_module = THIS_MODULE,
2354 },
2355},
2356};
2357#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2358
2359/* Probe functions */
2360
2361static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
2362{
2363 dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
2364 dd->buflen = ATMEL_AES_BUFFER_SIZE;
2365 dd->buflen &= ~(AES_BLOCK_SIZE - 1);
2366
2367 if (!dd->buf) {
2368 dev_err(dd->dev, "unable to alloc pages.\n");
2369 return -ENOMEM;
2370 }
2371
2372 return 0;
2373}
2374
2375static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
2376{
2377 free_page((unsigned long)dd->buf);
2378}
2379
2380static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
2381{
2382 struct at_dma_slave *sl = slave;
2383
2384 if (sl && sl->dma_dev == chan->device->dev) {
2385 chan->private = sl;
2386 return true;
2387 } else {
2388 return false;
2389 }
2390}
2391
2392static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
2393 struct crypto_platform_data *pdata)
2394{
2395 struct at_dma_slave *slave;
2396 dma_cap_mask_t mask;
2397
2398 dma_cap_zero(mask);
2399 dma_cap_set(DMA_SLAVE, mask);
2400
2401 /* Try to grab 2 DMA channels */
2402 slave = &pdata->dma_slave->rxdata;
2403 dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
2404 slave, dd->dev, "tx");
2405 if (!dd->src.chan)
2406 goto err_dma_in;
2407
2408 slave = &pdata->dma_slave->txdata;
2409 dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
2410 slave, dd->dev, "rx");
2411 if (!dd->dst.chan)
2412 goto err_dma_out;
2413
2414 return 0;
2415
2416err_dma_out:
2417 dma_release_channel(dd->src.chan);
2418err_dma_in:
2419 dev_warn(dd->dev, "no DMA channel available\n");
2420 return -ENODEV;
2421}
2422
2423static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
2424{
2425 dma_release_channel(dd->dst.chan);
2426 dma_release_channel(dd->src.chan);
2427}
2428
2429static void atmel_aes_queue_task(unsigned long data)
2430{
2431 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2432
2433 atmel_aes_handle_queue(dd, NULL);
2434}
2435
2436static void atmel_aes_done_task(unsigned long data)
2437{
2438 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2439
2440 dd->is_async = true;
2441 (void)dd->resume(dd);
2442}
2443
2444static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
2445{
2446 struct atmel_aes_dev *aes_dd = dev_id;
2447 u32 reg;
2448
2449 reg = atmel_aes_read(aes_dd, AES_ISR);
2450 if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
2451 atmel_aes_write(aes_dd, AES_IDR, reg);
2452 if (AES_FLAGS_BUSY & aes_dd->flags)
2453 tasklet_schedule(&aes_dd->done_task);
2454 else
2455 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
2456 return IRQ_HANDLED;
2457 }
2458
2459 return IRQ_NONE;
2460}
2461
2462static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
2463{
2464 int i;
2465
2466#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
2467 if (dd->caps.has_authenc)
2468 for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
2469 crypto_unregister_aead(&aes_authenc_algs[i]);
2470#endif
2471
2472 if (dd->caps.has_xts)
2473 crypto_unregister_alg(&aes_xts_alg);
2474
2475 if (dd->caps.has_gcm)
2476 crypto_unregister_aead(&aes_gcm_alg);
2477
2478 if (dd->caps.has_cfb64)
2479 crypto_unregister_alg(&aes_cfb64_alg);
2480
2481 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
2482 crypto_unregister_alg(&aes_algs[i]);
2483}
2484
2485static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
2486{
2487 int err, i, j;
2488
2489 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
2490 err = crypto_register_alg(&aes_algs[i]);
2491 if (err)
2492 goto err_aes_algs;
2493 }
2494
2495 if (dd->caps.has_cfb64) {
2496 err = crypto_register_alg(&aes_cfb64_alg);
2497 if (err)
2498 goto err_aes_cfb64_alg;
2499 }
2500
2501 if (dd->caps.has_gcm) {
2502 err = crypto_register_aead(&aes_gcm_alg);
2503 if (err)
2504 goto err_aes_gcm_alg;
2505 }
2506
2507 if (dd->caps.has_xts) {
2508 err = crypto_register_alg(&aes_xts_alg);
2509 if (err)
2510 goto err_aes_xts_alg;
2511 }
2512
2513#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
2514 if (dd->caps.has_authenc) {
2515 for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
2516 err = crypto_register_aead(&aes_authenc_algs[i]);
2517 if (err)
2518 goto err_aes_authenc_alg;
2519 }
2520 }
2521#endif
2522
2523 return 0;
2524
2525#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
2526 /* i = ARRAY_SIZE(aes_authenc_algs); */
2527err_aes_authenc_alg:
2528 for (j = 0; j < i; j++)
2529 crypto_unregister_aead(&aes_authenc_algs[j]);
2530 crypto_unregister_alg(&aes_xts_alg);
2531#endif
2532err_aes_xts_alg:
2533 crypto_unregister_aead(&aes_gcm_alg);
2534err_aes_gcm_alg:
2535 crypto_unregister_alg(&aes_cfb64_alg);
2536err_aes_cfb64_alg:
2537 i = ARRAY_SIZE(aes_algs);
2538err_aes_algs:
2539 for (j = 0; j < i; j++)
2540 crypto_unregister_alg(&aes_algs[j]);
2541
2542 return err;
2543}
2544
2545static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
2546{
2547 dd->caps.has_dualbuff = 0;
2548 dd->caps.has_cfb64 = 0;
2549 dd->caps.has_ctr32 = 0;
2550 dd->caps.has_gcm = 0;
2551 dd->caps.has_xts = 0;
2552 dd->caps.has_authenc = 0;
2553 dd->caps.max_burst_size = 1;
2554
2555 /* keep only major version number */
2556 switch (dd->hw_version & 0xff0) {
2557 case 0x500:
2558 dd->caps.has_dualbuff = 1;
2559 dd->caps.has_cfb64 = 1;
2560 dd->caps.has_ctr32 = 1;
2561 dd->caps.has_gcm = 1;
2562 dd->caps.has_xts = 1;
2563 dd->caps.has_authenc = 1;
2564 dd->caps.max_burst_size = 4;
2565 break;
2566 case 0x200:
2567 dd->caps.has_dualbuff = 1;
2568 dd->caps.has_cfb64 = 1;
2569 dd->caps.has_ctr32 = 1;
2570 dd->caps.has_gcm = 1;
2571 dd->caps.max_burst_size = 4;
2572 break;
2573 case 0x130:
2574 dd->caps.has_dualbuff = 1;
2575 dd->caps.has_cfb64 = 1;
2576 dd->caps.max_burst_size = 4;
2577 break;
2578 case 0x120:
2579 break;
2580 default:
2581 dev_warn(dd->dev,
2582 "Unmanaged aes version, set minimum capabilities\n");
2583 break;
2584 }
2585}
2586
2587#if defined(CONFIG_OF)
2588static const struct of_device_id atmel_aes_dt_ids[] = {
2589 { .compatible = "atmel,at91sam9g46-aes" },
2590 { /* sentinel */ }
2591};
2592MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
2593
2594static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
2595{
2596 struct device_node *np = pdev->dev.of_node;
2597 struct crypto_platform_data *pdata;
2598
2599 if (!np) {
2600 dev_err(&pdev->dev, "device node not found\n");
2601 return ERR_PTR(-EINVAL);
2602 }
2603
2604 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2605 if (!pdata)
2606 return ERR_PTR(-ENOMEM);
2607
2608 pdata->dma_slave = devm_kzalloc(&pdev->dev,
2609 sizeof(*(pdata->dma_slave)),
2610 GFP_KERNEL);
2611 if (!pdata->dma_slave) {
2612 devm_kfree(&pdev->dev, pdata);
2613 return ERR_PTR(-ENOMEM);
2614 }
2615
2616 return pdata;
2617}
2618#else
2619static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
2620{
2621 return ERR_PTR(-EINVAL);
2622}
2623#endif
2624
2625static int atmel_aes_probe(struct platform_device *pdev)
2626{
2627 struct atmel_aes_dev *aes_dd;
2628 struct crypto_platform_data *pdata;
2629 struct device *dev = &pdev->dev;
2630 struct resource *aes_res;
2631 int err;
2632
2633 pdata = pdev->dev.platform_data;
2634 if (!pdata) {
2635 pdata = atmel_aes_of_init(pdev);
2636 if (IS_ERR(pdata)) {
2637 err = PTR_ERR(pdata);
2638 goto aes_dd_err;
2639 }
2640 }
2641
2642 if (!pdata->dma_slave) {
2643 err = -ENXIO;
2644 goto aes_dd_err;
2645 }
2646
2647 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2648 if (aes_dd == NULL) {
2649 err = -ENOMEM;
2650 goto aes_dd_err;
2651 }
2652
2653 aes_dd->dev = dev;
2654
2655 platform_set_drvdata(pdev, aes_dd);
2656
2657 INIT_LIST_HEAD(&aes_dd->list);
2658 spin_lock_init(&aes_dd->lock);
2659
2660 tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2661 (unsigned long)aes_dd);
2662 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2663 (unsigned long)aes_dd);
2664
2665 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2666
2667 /* Get the base address */
2668 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2669 if (!aes_res) {
2670 dev_err(dev, "no MEM resource info\n");
2671 err = -ENODEV;
2672 goto res_err;
2673 }
2674 aes_dd->phys_base = aes_res->start;
2675
2676 /* Get the IRQ */
2677 aes_dd->irq = platform_get_irq(pdev, 0);
2678 if (aes_dd->irq < 0) {
2679 dev_err(dev, "no IRQ resource info\n");
2680 err = aes_dd->irq;
2681 goto res_err;
2682 }
2683
2684 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2685 IRQF_SHARED, "atmel-aes", aes_dd);
2686 if (err) {
2687 dev_err(dev, "unable to request aes irq.\n");
2688 goto res_err;
2689 }
2690
2691 /* Initializing the clock */
2692 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2693 if (IS_ERR(aes_dd->iclk)) {
2694 dev_err(dev, "clock initialization failed.\n");
2695 err = PTR_ERR(aes_dd->iclk);
2696 goto res_err;
2697 }
2698
2699 aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2700 if (IS_ERR(aes_dd->io_base)) {
2701 dev_err(dev, "can't ioremap\n");
2702 err = PTR_ERR(aes_dd->io_base);
2703 goto res_err;
2704 }
2705
2706 err = clk_prepare(aes_dd->iclk);
2707 if (err)
2708 goto res_err;
2709
2710 err = atmel_aes_hw_version_init(aes_dd);
2711 if (err)
2712 goto iclk_unprepare;
2713
2714 atmel_aes_get_cap(aes_dd);
2715
2716#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
2717 if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
2718 err = -EPROBE_DEFER;
2719 goto iclk_unprepare;
2720 }
2721#endif
2722
2723 err = atmel_aes_buff_init(aes_dd);
2724 if (err)
2725 goto err_aes_buff;
2726
2727 err = atmel_aes_dma_init(aes_dd, pdata);
2728 if (err)
2729 goto err_aes_dma;
2730
2731 spin_lock(&atmel_aes.lock);
2732 list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2733 spin_unlock(&atmel_aes.lock);
2734
2735 err = atmel_aes_register_algs(aes_dd);
2736 if (err)
2737 goto err_algs;
2738
2739 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2740 dma_chan_name(aes_dd->src.chan),
2741 dma_chan_name(aes_dd->dst.chan));
2742
2743 return 0;
2744
2745err_algs:
2746 spin_lock(&atmel_aes.lock);
2747 list_del(&aes_dd->list);
2748 spin_unlock(&atmel_aes.lock);
2749 atmel_aes_dma_cleanup(aes_dd);
2750err_aes_dma:
2751 atmel_aes_buff_cleanup(aes_dd);
2752err_aes_buff:
2753iclk_unprepare:
2754 clk_unprepare(aes_dd->iclk);
2755res_err:
2756 tasklet_kill(&aes_dd->done_task);
2757 tasklet_kill(&aes_dd->queue_task);
2758aes_dd_err:
2759 if (err != -EPROBE_DEFER)
2760 dev_err(dev, "initialization failed.\n");
2761
2762 return err;
2763}
2764
2765static int atmel_aes_remove(struct platform_device *pdev)
2766{
2767 struct atmel_aes_dev *aes_dd;
2768
2769 aes_dd = platform_get_drvdata(pdev);
2770 if (!aes_dd)
2771 return -ENODEV;
2772 spin_lock(&atmel_aes.lock);
2773 list_del(&aes_dd->list);
2774 spin_unlock(&atmel_aes.lock);
2775
2776 atmel_aes_unregister_algs(aes_dd);
2777
2778 tasklet_kill(&aes_dd->done_task);
2779 tasklet_kill(&aes_dd->queue_task);
2780
2781 atmel_aes_dma_cleanup(aes_dd);
2782 atmel_aes_buff_cleanup(aes_dd);
2783
2784 clk_unprepare(aes_dd->iclk);
2785
2786 return 0;
2787}
2788
2789static struct platform_driver atmel_aes_driver = {
2790 .probe = atmel_aes_probe,
2791 .remove = atmel_aes_remove,
2792 .driver = {
2793 .name = "atmel_aes",
2794 .of_match_table = of_match_ptr(atmel_aes_dt_ids),
2795 },
2796};
2797
2798module_platform_driver(atmel_aes_driver);
2799
2800MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2801MODULE_LICENSE("GPL v2");
2802MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");