Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Driver for EIP97 AES acceleration.
6 *
7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
8 *
9 * Some ideas are from atmel-aes.c drivers.
10 */
11
12#include <crypto/aes.h>
13#include <crypto/gcm.h>
14#include <crypto/internal/skcipher.h>
15#include "mtk-platform.h"
16
17#define AES_QUEUE_SIZE 512
18#define AES_BUF_ORDER 2
19#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
20 & ~(AES_BLOCK_SIZE - 1))
21#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
22 AES_BLOCK_SIZE * 2)
23#define AES_MAX_CT_SIZE 6
24
25#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
26
27/* AES-CBC/ECB/CTR/OFB/CFB command token */
28#define AES_CMD0 cpu_to_le32(0x05000000)
29#define AES_CMD1 cpu_to_le32(0x2d060000)
30#define AES_CMD2 cpu_to_le32(0xe4a63806)
31/* AES-GCM command token */
32#define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
33#define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
34#define AES_GCM_CMD2 cpu_to_le32(0x25000010)
35#define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
36#define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
37#define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
38#define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
39
40/* AES transform information word 0 fields */
41#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
42#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
43#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
44#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
45#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
46#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
47#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
48#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
49#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
50#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
51/* AES transform information word 1 fields */
52#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
53#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
54#define AES_TFM_OFB cpu_to_le32(0x4 << 0)
55#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
56#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
57#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
58#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
62
63/* AES flags */
64#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
65#define AES_FLAGS_ECB BIT(0)
66#define AES_FLAGS_CBC BIT(1)
67#define AES_FLAGS_CTR BIT(2)
68#define AES_FLAGS_OFB BIT(3)
69#define AES_FLAGS_CFB128 BIT(4)
70#define AES_FLAGS_GCM BIT(5)
71#define AES_FLAGS_ENCRYPT BIT(6)
72#define AES_FLAGS_BUSY BIT(7)
73
74#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
75
76/**
77 * mtk_aes_info - hardware information of AES
78 * @cmd: command token, hardware instruction
79 * @tfm: transform state of cipher algorithm.
80 * @state: contains keys and initial vectors.
81 *
82 * Memory layout of GCM buffer:
83 * /-----------\
84 * | AES KEY | 128/196/256 bits
85 * |-----------|
86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
87 * |-----------|
88 * | IVs | 4 * 4 bytes
89 * \-----------/
90 *
91 * The engine requires all these info to do:
92 * - Commands decoding and control of the engine's data path.
93 * - Coordinating hardware data fetch and store operations.
94 * - Result token construction and output.
95 */
96struct mtk_aes_info {
97 __le32 cmd[AES_MAX_CT_SIZE];
98 __le32 tfm[2];
99 __le32 state[AES_MAX_STATE_BUF_SIZE];
100};
101
102struct mtk_aes_reqctx {
103 u64 mode;
104};
105
106struct mtk_aes_base_ctx {
107 struct mtk_cryp *cryp;
108 u32 keylen;
109 __le32 key[12];
110 __le32 keymode;
111
112 mtk_aes_fn start;
113
114 struct mtk_aes_info info;
115 dma_addr_t ct_dma;
116 dma_addr_t tfm_dma;
117
118 __le32 ct_hdr;
119 u32 ct_size;
120};
121
122struct mtk_aes_ctx {
123 struct mtk_aes_base_ctx base;
124};
125
126struct mtk_aes_ctr_ctx {
127 struct mtk_aes_base_ctx base;
128
129 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
130 size_t offset;
131 struct scatterlist src[2];
132 struct scatterlist dst[2];
133};
134
135struct mtk_aes_gcm_ctx {
136 struct mtk_aes_base_ctx base;
137
138 u32 authsize;
139 size_t textlen;
140};
141
142struct mtk_aes_drv {
143 struct list_head dev_list;
144 /* Device list lock */
145 spinlock_t lock;
146};
147
148static struct mtk_aes_drv mtk_aes = {
149 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
150 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
151};
152
153static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
154{
155 return readl_relaxed(cryp->base + offset);
156}
157
158static inline void mtk_aes_write(struct mtk_cryp *cryp,
159 u32 offset, u32 value)
160{
161 writel_relaxed(value, cryp->base + offset);
162}
163
164static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
165{
166 struct mtk_cryp *cryp = NULL;
167 struct mtk_cryp *tmp;
168
169 spin_lock_bh(&mtk_aes.lock);
170 if (!ctx->cryp) {
171 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
172 cryp = tmp;
173 break;
174 }
175 ctx->cryp = cryp;
176 } else {
177 cryp = ctx->cryp;
178 }
179 spin_unlock_bh(&mtk_aes.lock);
180
181 return cryp;
182}
183
184static inline size_t mtk_aes_padlen(size_t len)
185{
186 len &= AES_BLOCK_SIZE - 1;
187 return len ? AES_BLOCK_SIZE - len : 0;
188}
189
190static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
191 struct mtk_aes_dma *dma)
192{
193 int nents;
194
195 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
196 return false;
197
198 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
199 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
200 return false;
201
202 if (len <= sg->length) {
203 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
204 return false;
205
206 dma->nents = nents + 1;
207 dma->remainder = sg->length - len;
208 sg->length = len;
209 return true;
210 }
211
212 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
213 return false;
214
215 len -= sg->length;
216 }
217
218 return false;
219}
220
221static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
222 const struct mtk_aes_reqctx *rctx)
223{
224 /* Clear all but persistent flags and set request flags. */
225 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
226}
227
228static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
229{
230 struct scatterlist *sg = dma->sg;
231 int nents = dma->nents;
232
233 if (!dma->remainder)
234 return;
235
236 while (--nents > 0 && sg)
237 sg = sg_next(sg);
238
239 if (!sg)
240 return;
241
242 sg->length += dma->remainder;
243}
244
245static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
246{
247 int i;
248
249 for (i = 0; i < SIZE_IN_WORDS(size); i++)
250 dst[i] = cpu_to_le32(src[i]);
251}
252
253static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
254{
255 int i;
256
257 for (i = 0; i < SIZE_IN_WORDS(size); i++)
258 dst[i] = cpu_to_be32(src[i]);
259}
260
261static inline int mtk_aes_complete(struct mtk_cryp *cryp,
262 struct mtk_aes_rec *aes,
263 int err)
264{
265 aes->flags &= ~AES_FLAGS_BUSY;
266 aes->areq->complete(aes->areq, err);
267 /* Handle new request */
268 tasklet_schedule(&aes->queue_task);
269 return err;
270}
271
272/*
273 * Write descriptors for processing. This will configure the engine, load
274 * the transform information and then start the packet processing.
275 */
276static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
277{
278 struct mtk_ring *ring = cryp->ring[aes->id];
279 struct mtk_desc *cmd = NULL, *res = NULL;
280 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
281 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
282 int nents;
283
284 /* Write command descriptors */
285 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
286 cmd = ring->cmd_next;
287 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
288 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
289
290 if (nents == 0) {
291 cmd->hdr |= MTK_DESC_FIRST |
292 MTK_DESC_CT_LEN(aes->ctx->ct_size);
293 cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
294 cmd->ct_hdr = aes->ctx->ct_hdr;
295 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
296 }
297
298 /* Shift ring buffer and check boundary */
299 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
300 ring->cmd_next = ring->cmd_base;
301 }
302 cmd->hdr |= MTK_DESC_LAST;
303
304 /* Prepare result descriptors */
305 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
306 res = ring->res_next;
307 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
308 res->buf = cpu_to_le32(sg_dma_address(dsg));
309
310 if (nents == 0)
311 res->hdr |= MTK_DESC_FIRST;
312
313 /* Shift ring buffer and check boundary */
314 if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
315 ring->res_next = ring->res_base;
316 }
317 res->hdr |= MTK_DESC_LAST;
318
319 /* Pointer to current result descriptor */
320 ring->res_prev = res;
321
322 /* Prepare enough space for authenticated tag */
323 if (aes->flags & AES_FLAGS_GCM)
324 res->hdr += AES_BLOCK_SIZE;
325
326 /*
327 * Make sure that all changes to the DMA ring are done before we
328 * start engine.
329 */
330 wmb();
331 /* Start DMA transfer */
332 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
333 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
334
335 return -EINPROGRESS;
336}
337
338static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
339{
340 struct mtk_aes_base_ctx *ctx = aes->ctx;
341
342 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
343 DMA_TO_DEVICE);
344
345 if (aes->src.sg == aes->dst.sg) {
346 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
347 DMA_BIDIRECTIONAL);
348
349 if (aes->src.sg != &aes->aligned_sg)
350 mtk_aes_restore_sg(&aes->src);
351 } else {
352 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
353 DMA_FROM_DEVICE);
354
355 if (aes->dst.sg != &aes->aligned_sg)
356 mtk_aes_restore_sg(&aes->dst);
357
358 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
359 DMA_TO_DEVICE);
360
361 if (aes->src.sg != &aes->aligned_sg)
362 mtk_aes_restore_sg(&aes->src);
363 }
364
365 if (aes->dst.sg == &aes->aligned_sg)
366 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
367 aes->buf, aes->total);
368}
369
370static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
371{
372 struct mtk_aes_base_ctx *ctx = aes->ctx;
373 struct mtk_aes_info *info = &ctx->info;
374
375 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
376 DMA_TO_DEVICE);
377 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
378 goto exit;
379
380 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
381
382 if (aes->src.sg == aes->dst.sg) {
383 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
384 aes->src.nents,
385 DMA_BIDIRECTIONAL);
386 aes->dst.sg_len = aes->src.sg_len;
387 if (unlikely(!aes->src.sg_len))
388 goto sg_map_err;
389 } else {
390 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
391 aes->src.nents, DMA_TO_DEVICE);
392 if (unlikely(!aes->src.sg_len))
393 goto sg_map_err;
394
395 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
396 aes->dst.nents, DMA_FROM_DEVICE);
397 if (unlikely(!aes->dst.sg_len)) {
398 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
399 DMA_TO_DEVICE);
400 goto sg_map_err;
401 }
402 }
403
404 return mtk_aes_xmit(cryp, aes);
405
406sg_map_err:
407 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
408exit:
409 return mtk_aes_complete(cryp, aes, -EINVAL);
410}
411
412/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
413static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
414 size_t len)
415{
416 struct skcipher_request *req = skcipher_request_cast(aes->areq);
417 struct mtk_aes_base_ctx *ctx = aes->ctx;
418 struct mtk_aes_info *info = &ctx->info;
419 u32 cnt = 0;
420
421 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
422 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
423 info->cmd[cnt++] = AES_CMD1;
424
425 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
426 if (aes->flags & AES_FLAGS_ENCRYPT)
427 info->tfm[0] |= AES_TFM_BASIC_OUT;
428 else
429 info->tfm[0] |= AES_TFM_BASIC_IN;
430
431 switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
432 case AES_FLAGS_CBC:
433 info->tfm[1] = AES_TFM_CBC;
434 break;
435 case AES_FLAGS_ECB:
436 info->tfm[1] = AES_TFM_ECB;
437 goto ecb;
438 case AES_FLAGS_CTR:
439 info->tfm[1] = AES_TFM_CTR_LOAD;
440 goto ctr;
441 case AES_FLAGS_OFB:
442 info->tfm[1] = AES_TFM_OFB;
443 break;
444 case AES_FLAGS_CFB128:
445 info->tfm[1] = AES_TFM_CFB128;
446 break;
447 default:
448 /* Should not happen... */
449 return;
450 }
451
452 mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
453 AES_BLOCK_SIZE);
454ctr:
455 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
456 info->tfm[1] |= AES_TFM_FULL_IV;
457 info->cmd[cnt++] = AES_CMD2;
458ecb:
459 ctx->ct_size = cnt;
460}
461
462static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
463 struct scatterlist *src, struct scatterlist *dst,
464 size_t len)
465{
466 size_t padlen = 0;
467 bool src_aligned, dst_aligned;
468
469 aes->total = len;
470 aes->src.sg = src;
471 aes->dst.sg = dst;
472 aes->real_dst = dst;
473
474 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
475 if (src == dst)
476 dst_aligned = src_aligned;
477 else
478 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
479
480 if (!src_aligned || !dst_aligned) {
481 padlen = mtk_aes_padlen(len);
482
483 if (len + padlen > AES_BUF_SIZE)
484 return mtk_aes_complete(cryp, aes, -ENOMEM);
485
486 if (!src_aligned) {
487 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
488 aes->src.sg = &aes->aligned_sg;
489 aes->src.nents = 1;
490 aes->src.remainder = 0;
491 }
492
493 if (!dst_aligned) {
494 aes->dst.sg = &aes->aligned_sg;
495 aes->dst.nents = 1;
496 aes->dst.remainder = 0;
497 }
498
499 sg_init_table(&aes->aligned_sg, 1);
500 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
501 }
502
503 mtk_aes_info_init(cryp, aes, len + padlen);
504
505 return mtk_aes_map(cryp, aes);
506}
507
508static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
509 struct crypto_async_request *new_areq)
510{
511 struct mtk_aes_rec *aes = cryp->aes[id];
512 struct crypto_async_request *areq, *backlog;
513 struct mtk_aes_base_ctx *ctx;
514 unsigned long flags;
515 int ret = 0;
516
517 spin_lock_irqsave(&aes->lock, flags);
518 if (new_areq)
519 ret = crypto_enqueue_request(&aes->queue, new_areq);
520 if (aes->flags & AES_FLAGS_BUSY) {
521 spin_unlock_irqrestore(&aes->lock, flags);
522 return ret;
523 }
524 backlog = crypto_get_backlog(&aes->queue);
525 areq = crypto_dequeue_request(&aes->queue);
526 if (areq)
527 aes->flags |= AES_FLAGS_BUSY;
528 spin_unlock_irqrestore(&aes->lock, flags);
529
530 if (!areq)
531 return ret;
532
533 if (backlog)
534 backlog->complete(backlog, -EINPROGRESS);
535
536 ctx = crypto_tfm_ctx(areq->tfm);
537 /* Write key into state buffer */
538 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
539
540 aes->areq = areq;
541 aes->ctx = ctx;
542
543 return ctx->start(cryp, aes);
544}
545
546static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
547 struct mtk_aes_rec *aes)
548{
549 return mtk_aes_complete(cryp, aes, 0);
550}
551
552static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
553{
554 struct skcipher_request *req = skcipher_request_cast(aes->areq);
555 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
556
557 mtk_aes_set_mode(aes, rctx);
558 aes->resume = mtk_aes_transfer_complete;
559
560 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
561}
562
563static inline struct mtk_aes_ctr_ctx *
564mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
565{
566 return container_of(ctx, struct mtk_aes_ctr_ctx, base);
567}
568
569static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
570{
571 struct mtk_aes_base_ctx *ctx = aes->ctx;
572 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
573 struct skcipher_request *req = skcipher_request_cast(aes->areq);
574 struct scatterlist *src, *dst;
575 u32 start, end, ctr, blocks;
576 size_t datalen;
577 bool fragmented = false;
578
579 /* Check for transfer completion. */
580 cctx->offset += aes->total;
581 if (cctx->offset >= req->cryptlen)
582 return mtk_aes_transfer_complete(cryp, aes);
583
584 /* Compute data length. */
585 datalen = req->cryptlen - cctx->offset;
586 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
587 ctr = be32_to_cpu(cctx->iv[3]);
588
589 /* Check 32bit counter overflow. */
590 start = ctr;
591 end = start + blocks - 1;
592 if (end < start) {
593 ctr = 0xffffffff;
594 datalen = AES_BLOCK_SIZE * -start;
595 fragmented = true;
596 }
597
598 /* Jump to offset. */
599 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
600 dst = ((req->src == req->dst) ? src :
601 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
602
603 /* Write IVs into transform state buffer. */
604 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
605 AES_BLOCK_SIZE);
606
607 if (unlikely(fragmented)) {
608 /*
609 * Increment the counter manually to cope with the hardware
610 * counter overflow.
611 */
612 cctx->iv[3] = cpu_to_be32(ctr);
613 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
614 }
615
616 return mtk_aes_dma(cryp, aes, src, dst, datalen);
617}
618
619static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
620{
621 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
622 struct skcipher_request *req = skcipher_request_cast(aes->areq);
623 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
624
625 mtk_aes_set_mode(aes, rctx);
626
627 memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
628 cctx->offset = 0;
629 aes->total = 0;
630 aes->resume = mtk_aes_ctr_transfer;
631
632 return mtk_aes_ctr_transfer(cryp, aes);
633}
634
635/* Check and set the AES key to transform state buffer */
636static int mtk_aes_setkey(struct crypto_skcipher *tfm,
637 const u8 *key, u32 keylen)
638{
639 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
640
641 switch (keylen) {
642 case AES_KEYSIZE_128:
643 ctx->keymode = AES_TFM_128BITS;
644 break;
645 case AES_KEYSIZE_192:
646 ctx->keymode = AES_TFM_192BITS;
647 break;
648 case AES_KEYSIZE_256:
649 ctx->keymode = AES_TFM_256BITS;
650 break;
651
652 default:
653 return -EINVAL;
654 }
655
656 ctx->keylen = SIZE_IN_WORDS(keylen);
657 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
658
659 return 0;
660}
661
662static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
663{
664 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
665 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
666 struct mtk_aes_reqctx *rctx;
667 struct mtk_cryp *cryp;
668
669 cryp = mtk_aes_find_dev(ctx);
670 if (!cryp)
671 return -ENODEV;
672
673 rctx = skcipher_request_ctx(req);
674 rctx->mode = mode;
675
676 return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
677 &req->base);
678}
679
680static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
681{
682 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
683}
684
685static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
686{
687 return mtk_aes_crypt(req, AES_FLAGS_ECB);
688}
689
690static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
691{
692 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
693}
694
695static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
696{
697 return mtk_aes_crypt(req, AES_FLAGS_CBC);
698}
699
700static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
701{
702 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
703}
704
705static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
706{
707 return mtk_aes_crypt(req, AES_FLAGS_CTR);
708}
709
710static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
711{
712 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
713}
714
715static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
716{
717 return mtk_aes_crypt(req, AES_FLAGS_OFB);
718}
719
720static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
721{
722 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
723}
724
725static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
726{
727 return mtk_aes_crypt(req, AES_FLAGS_CFB128);
728}
729
730static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
731{
732 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
733
734 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
735 ctx->base.start = mtk_aes_start;
736 return 0;
737}
738
739static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
740{
741 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
742
743 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
744 ctx->base.start = mtk_aes_ctr_start;
745 return 0;
746}
747
748static struct skcipher_alg aes_algs[] = {
749{
750 .base.cra_name = "cbc(aes)",
751 .base.cra_driver_name = "cbc-aes-mtk",
752 .base.cra_priority = 400,
753 .base.cra_flags = CRYPTO_ALG_ASYNC,
754 .base.cra_blocksize = AES_BLOCK_SIZE,
755 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
756 .base.cra_alignmask = 0xf,
757 .base.cra_module = THIS_MODULE,
758
759 .min_keysize = AES_MIN_KEY_SIZE,
760 .max_keysize = AES_MAX_KEY_SIZE,
761 .setkey = mtk_aes_setkey,
762 .encrypt = mtk_aes_cbc_encrypt,
763 .decrypt = mtk_aes_cbc_decrypt,
764 .ivsize = AES_BLOCK_SIZE,
765 .init = mtk_aes_init_tfm,
766},
767{
768 .base.cra_name = "ecb(aes)",
769 .base.cra_driver_name = "ecb-aes-mtk",
770 .base.cra_priority = 400,
771 .base.cra_flags = CRYPTO_ALG_ASYNC,
772 .base.cra_blocksize = AES_BLOCK_SIZE,
773 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
774 .base.cra_alignmask = 0xf,
775 .base.cra_module = THIS_MODULE,
776
777 .min_keysize = AES_MIN_KEY_SIZE,
778 .max_keysize = AES_MAX_KEY_SIZE,
779 .setkey = mtk_aes_setkey,
780 .encrypt = mtk_aes_ecb_encrypt,
781 .decrypt = mtk_aes_ecb_decrypt,
782 .init = mtk_aes_init_tfm,
783},
784{
785 .base.cra_name = "ctr(aes)",
786 .base.cra_driver_name = "ctr-aes-mtk",
787 .base.cra_priority = 400,
788 .base.cra_flags = CRYPTO_ALG_ASYNC,
789 .base.cra_blocksize = 1,
790 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
791 .base.cra_alignmask = 0xf,
792 .base.cra_module = THIS_MODULE,
793
794 .min_keysize = AES_MIN_KEY_SIZE,
795 .max_keysize = AES_MAX_KEY_SIZE,
796 .ivsize = AES_BLOCK_SIZE,
797 .setkey = mtk_aes_setkey,
798 .encrypt = mtk_aes_ctr_encrypt,
799 .decrypt = mtk_aes_ctr_decrypt,
800 .init = mtk_aes_ctr_init_tfm,
801},
802{
803 .base.cra_name = "ofb(aes)",
804 .base.cra_driver_name = "ofb-aes-mtk",
805 .base.cra_priority = 400,
806 .base.cra_flags = CRYPTO_ALG_ASYNC,
807 .base.cra_blocksize = AES_BLOCK_SIZE,
808 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
809 .base.cra_alignmask = 0xf,
810 .base.cra_module = THIS_MODULE,
811
812 .min_keysize = AES_MIN_KEY_SIZE,
813 .max_keysize = AES_MAX_KEY_SIZE,
814 .ivsize = AES_BLOCK_SIZE,
815 .setkey = mtk_aes_setkey,
816 .encrypt = mtk_aes_ofb_encrypt,
817 .decrypt = mtk_aes_ofb_decrypt,
818},
819{
820 .base.cra_name = "cfb(aes)",
821 .base.cra_driver_name = "cfb-aes-mtk",
822 .base.cra_priority = 400,
823 .base.cra_flags = CRYPTO_ALG_ASYNC,
824 .base.cra_blocksize = 1,
825 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
826 .base.cra_alignmask = 0xf,
827 .base.cra_module = THIS_MODULE,
828
829 .min_keysize = AES_MIN_KEY_SIZE,
830 .max_keysize = AES_MAX_KEY_SIZE,
831 .ivsize = AES_BLOCK_SIZE,
832 .setkey = mtk_aes_setkey,
833 .encrypt = mtk_aes_cfb_encrypt,
834 .decrypt = mtk_aes_cfb_decrypt,
835},
836};
837
838static inline struct mtk_aes_gcm_ctx *
839mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
840{
841 return container_of(ctx, struct mtk_aes_gcm_ctx, base);
842}
843
844/*
845 * Engine will verify and compare tag automatically, so we just need
846 * to check returned status which stored in the result descriptor.
847 */
848static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
849 struct mtk_aes_rec *aes)
850{
851 u32 status = cryp->ring[aes->id]->res_prev->ct;
852
853 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
854 -EBADMSG : 0);
855}
856
857/* Initialize transform information of GCM mode */
858static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
859 struct mtk_aes_rec *aes,
860 size_t len)
861{
862 struct aead_request *req = aead_request_cast(aes->areq);
863 struct mtk_aes_base_ctx *ctx = aes->ctx;
864 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
865 struct mtk_aes_info *info = &ctx->info;
866 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
867 u32 cnt = 0;
868
869 ctx->ct_hdr = AES_CT_CTRL_HDR | len;
870
871 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
872 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
873 info->cmd[cnt++] = AES_GCM_CMD2;
874 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
875
876 if (aes->flags & AES_FLAGS_ENCRYPT) {
877 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
878 info->tfm[0] = AES_TFM_GCM_OUT;
879 } else {
880 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
881 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
882 info->tfm[0] = AES_TFM_GCM_IN;
883 }
884 ctx->ct_size = cnt;
885
886 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
887 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
888 ctx->keymode;
889 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
890 AES_TFM_ENC_HASH;
891
892 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
893 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
894}
895
896static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
897 struct scatterlist *src, struct scatterlist *dst,
898 size_t len)
899{
900 bool src_aligned, dst_aligned;
901
902 aes->src.sg = src;
903 aes->dst.sg = dst;
904 aes->real_dst = dst;
905
906 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
907 if (src == dst)
908 dst_aligned = src_aligned;
909 else
910 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
911
912 if (!src_aligned || !dst_aligned) {
913 if (aes->total > AES_BUF_SIZE)
914 return mtk_aes_complete(cryp, aes, -ENOMEM);
915
916 if (!src_aligned) {
917 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
918 aes->src.sg = &aes->aligned_sg;
919 aes->src.nents = 1;
920 aes->src.remainder = 0;
921 }
922
923 if (!dst_aligned) {
924 aes->dst.sg = &aes->aligned_sg;
925 aes->dst.nents = 1;
926 aes->dst.remainder = 0;
927 }
928
929 sg_init_table(&aes->aligned_sg, 1);
930 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
931 }
932
933 mtk_aes_gcm_info_init(cryp, aes, len);
934
935 return mtk_aes_map(cryp, aes);
936}
937
938/* Todo: GMAC */
939static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
940{
941 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
942 struct aead_request *req = aead_request_cast(aes->areq);
943 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
944 u32 len = req->assoclen + req->cryptlen;
945
946 mtk_aes_set_mode(aes, rctx);
947
948 if (aes->flags & AES_FLAGS_ENCRYPT) {
949 u32 tag[4];
950
951 aes->resume = mtk_aes_transfer_complete;
952 /* Compute total process length. */
953 aes->total = len + gctx->authsize;
954 /* Hardware will append authenticated tag to output buffer */
955 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
956 } else {
957 aes->resume = mtk_aes_gcm_tag_verify;
958 aes->total = len;
959 }
960
961 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
962}
963
964static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
965{
966 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
967 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
968 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
969 struct mtk_cryp *cryp;
970 bool enc = !!(mode & AES_FLAGS_ENCRYPT);
971
972 cryp = mtk_aes_find_dev(ctx);
973 if (!cryp)
974 return -ENODEV;
975
976 /* Compute text length. */
977 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
978
979 /* Empty messages are not supported yet */
980 if (!gctx->textlen && !req->assoclen)
981 return -EINVAL;
982
983 rctx->mode = AES_FLAGS_GCM | mode;
984
985 return mtk_aes_handle_queue(cryp, enc, &req->base);
986}
987
988/*
989 * Because of the hardware limitation, we need to pre-calculate key(H)
990 * for the GHASH operation. The result of the encryption operation
991 * need to be stored in the transform state buffer.
992 */
993static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
994 u32 keylen)
995{
996 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
997 u8 hash[AES_BLOCK_SIZE] __aligned(4) = {};
998 struct crypto_aes_ctx aes_ctx;
999 int err;
1000
1001 switch (keylen) {
1002 case AES_KEYSIZE_128:
1003 ctx->keymode = AES_TFM_128BITS;
1004 break;
1005 case AES_KEYSIZE_192:
1006 ctx->keymode = AES_TFM_192BITS;
1007 break;
1008 case AES_KEYSIZE_256:
1009 ctx->keymode = AES_TFM_256BITS;
1010 break;
1011
1012 default:
1013 return -EINVAL;
1014 }
1015
1016 ctx->keylen = SIZE_IN_WORDS(keylen);
1017
1018 err = aes_expandkey(&aes_ctx, key, keylen);
1019 if (err)
1020 return err;
1021
1022 aes_encrypt(&aes_ctx, hash, hash);
1023 memzero_explicit(&aes_ctx, sizeof(aes_ctx));
1024
1025 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
1026 mtk_aes_write_state_be(ctx->key + ctx->keylen, (const u32 *)hash,
1027 AES_BLOCK_SIZE);
1028
1029 return 0;
1030}
1031
1032static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1033 u32 authsize)
1034{
1035 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1036 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1037
1038 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1039 switch (authsize) {
1040 case 8:
1041 case 12:
1042 case 16:
1043 break;
1044 default:
1045 return -EINVAL;
1046 }
1047
1048 gctx->authsize = authsize;
1049 return 0;
1050}
1051
1052static int mtk_aes_gcm_encrypt(struct aead_request *req)
1053{
1054 return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1055}
1056
1057static int mtk_aes_gcm_decrypt(struct aead_request *req)
1058{
1059 return mtk_aes_gcm_crypt(req, 0);
1060}
1061
1062static int mtk_aes_gcm_init(struct crypto_aead *aead)
1063{
1064 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1065
1066 crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1067 ctx->base.start = mtk_aes_gcm_start;
1068 return 0;
1069}
1070
1071static struct aead_alg aes_gcm_alg = {
1072 .setkey = mtk_aes_gcm_setkey,
1073 .setauthsize = mtk_aes_gcm_setauthsize,
1074 .encrypt = mtk_aes_gcm_encrypt,
1075 .decrypt = mtk_aes_gcm_decrypt,
1076 .init = mtk_aes_gcm_init,
1077 .ivsize = GCM_AES_IV_SIZE,
1078 .maxauthsize = AES_BLOCK_SIZE,
1079
1080 .base = {
1081 .cra_name = "gcm(aes)",
1082 .cra_driver_name = "gcm-aes-mtk",
1083 .cra_priority = 400,
1084 .cra_flags = CRYPTO_ALG_ASYNC,
1085 .cra_blocksize = 1,
1086 .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
1087 .cra_alignmask = 0xf,
1088 .cra_module = THIS_MODULE,
1089 },
1090};
1091
1092static void mtk_aes_queue_task(unsigned long data)
1093{
1094 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1095
1096 mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1097}
1098
1099static void mtk_aes_done_task(unsigned long data)
1100{
1101 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1102 struct mtk_cryp *cryp = aes->cryp;
1103
1104 mtk_aes_unmap(cryp, aes);
1105 aes->resume(cryp, aes);
1106}
1107
1108static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1109{
1110 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
1111 struct mtk_cryp *cryp = aes->cryp;
1112 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1113
1114 mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1115
1116 if (likely(AES_FLAGS_BUSY & aes->flags)) {
1117 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1118 mtk_aes_write(cryp, RDR_THRESH(aes->id),
1119 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1120
1121 tasklet_schedule(&aes->done_task);
1122 } else {
1123 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1124 }
1125 return IRQ_HANDLED;
1126}
1127
1128/*
1129 * The purpose of creating encryption and decryption records is
1130 * to process outbound/inbound data in parallel, it can improve
1131 * performance in most use cases, such as IPSec VPN, especially
1132 * under heavy network traffic.
1133 */
1134static int mtk_aes_record_init(struct mtk_cryp *cryp)
1135{
1136 struct mtk_aes_rec **aes = cryp->aes;
1137 int i, err = -ENOMEM;
1138
1139 for (i = 0; i < MTK_REC_NUM; i++) {
1140 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1141 if (!aes[i])
1142 goto err_cleanup;
1143
1144 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1145 AES_BUF_ORDER);
1146 if (!aes[i]->buf)
1147 goto err_cleanup;
1148
1149 aes[i]->cryp = cryp;
1150
1151 spin_lock_init(&aes[i]->lock);
1152 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1153
1154 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1155 (unsigned long)aes[i]);
1156 tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1157 (unsigned long)aes[i]);
1158 }
1159
1160 /* Link to ring0 and ring1 respectively */
1161 aes[0]->id = MTK_RING0;
1162 aes[1]->id = MTK_RING1;
1163
1164 return 0;
1165
1166err_cleanup:
1167 for (; i--; ) {
1168 free_page((unsigned long)aes[i]->buf);
1169 kfree(aes[i]);
1170 }
1171
1172 return err;
1173}
1174
1175static void mtk_aes_record_free(struct mtk_cryp *cryp)
1176{
1177 int i;
1178
1179 for (i = 0; i < MTK_REC_NUM; i++) {
1180 tasklet_kill(&cryp->aes[i]->done_task);
1181 tasklet_kill(&cryp->aes[i]->queue_task);
1182
1183 free_page((unsigned long)cryp->aes[i]->buf);
1184 kfree(cryp->aes[i]);
1185 }
1186}
1187
1188static void mtk_aes_unregister_algs(void)
1189{
1190 int i;
1191
1192 crypto_unregister_aead(&aes_gcm_alg);
1193
1194 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1195 crypto_unregister_skcipher(&aes_algs[i]);
1196}
1197
1198static int mtk_aes_register_algs(void)
1199{
1200 int err, i;
1201
1202 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1203 err = crypto_register_skcipher(&aes_algs[i]);
1204 if (err)
1205 goto err_aes_algs;
1206 }
1207
1208 err = crypto_register_aead(&aes_gcm_alg);
1209 if (err)
1210 goto err_aes_algs;
1211
1212 return 0;
1213
1214err_aes_algs:
1215 for (; i--; )
1216 crypto_unregister_skcipher(&aes_algs[i]);
1217
1218 return err;
1219}
1220
1221int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1222{
1223 int ret;
1224
1225 INIT_LIST_HEAD(&cryp->aes_list);
1226
1227 /* Initialize two cipher records */
1228 ret = mtk_aes_record_init(cryp);
1229 if (ret)
1230 goto err_record;
1231
1232 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1233 0, "mtk-aes", cryp->aes[0]);
1234 if (ret) {
1235 dev_err(cryp->dev, "unable to request AES irq.\n");
1236 goto err_res;
1237 }
1238
1239 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1240 0, "mtk-aes", cryp->aes[1]);
1241 if (ret) {
1242 dev_err(cryp->dev, "unable to request AES irq.\n");
1243 goto err_res;
1244 }
1245
1246 /* Enable ring0 and ring1 interrupt */
1247 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1248 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1249
1250 spin_lock(&mtk_aes.lock);
1251 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1252 spin_unlock(&mtk_aes.lock);
1253
1254 ret = mtk_aes_register_algs();
1255 if (ret)
1256 goto err_algs;
1257
1258 return 0;
1259
1260err_algs:
1261 spin_lock(&mtk_aes.lock);
1262 list_del(&cryp->aes_list);
1263 spin_unlock(&mtk_aes.lock);
1264err_res:
1265 mtk_aes_record_free(cryp);
1266err_record:
1267
1268 dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1269 return ret;
1270}
1271
1272void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1273{
1274 spin_lock(&mtk_aes.lock);
1275 list_del(&cryp->aes_list);
1276 spin_unlock(&mtk_aes.lock);
1277
1278 mtk_aes_unregister_algs();
1279 mtk_aes_record_free(cryp);
1280}