Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
11 * Based on omap-aes.c and tegra-aes.c
12 */
13
14#include <crypto/aes.h>
15#include <crypto/internal/hash.h>
16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/engine.h>
19#include <crypto/sha1.h>
20#include <crypto/sha2.h>
21
22#include <linux/clk.h>
23#include <linux/dma-mapping.h>
24#include <linux/interrupt.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/of.h>
30#include <linux/platform_device.h>
31#include <linux/spinlock.h>
32
33#define SHA_BUFFER_LEN PAGE_SIZE
34#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
35
36#define SAHARA_NAME "sahara"
37#define SAHARA_VERSION_3 3
38#define SAHARA_VERSION_4 4
39#define SAHARA_TIMEOUT_MS 1000
40#define SAHARA_MAX_HW_DESC 2
41#define SAHARA_MAX_HW_LINK 20
42
43#define FLAGS_MODE_MASK 0x000f
44#define FLAGS_ENCRYPT BIT(0)
45#define FLAGS_CBC BIT(1)
46
47#define SAHARA_HDR_BASE 0x00800000
48#define SAHARA_HDR_SKHA_ALG_AES 0
49#define SAHARA_HDR_SKHA_MODE_ECB 0
50#define SAHARA_HDR_SKHA_OP_ENC BIT(2)
51#define SAHARA_HDR_SKHA_MODE_CBC BIT(3)
52#define SAHARA_HDR_FORM_DATA (5 << 16)
53#define SAHARA_HDR_FORM_KEY BIT(19)
54#define SAHARA_HDR_LLO BIT(24)
55#define SAHARA_HDR_CHA_SKHA BIT(28)
56#define SAHARA_HDR_CHA_MDHA BIT(29)
57#define SAHARA_HDR_PARITY_BIT BIT(31)
58
59#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
60#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
61#define SAHARA_HDR_MDHA_HASH 0xA0850000
62#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
63#define SAHARA_HDR_MDHA_ALG_SHA1 0
64#define SAHARA_HDR_MDHA_ALG_MD5 1
65#define SAHARA_HDR_MDHA_ALG_SHA256 2
66#define SAHARA_HDR_MDHA_ALG_SHA224 3
67#define SAHARA_HDR_MDHA_PDATA BIT(2)
68#define SAHARA_HDR_MDHA_HMAC BIT(3)
69#define SAHARA_HDR_MDHA_INIT BIT(5)
70#define SAHARA_HDR_MDHA_IPAD BIT(6)
71#define SAHARA_HDR_MDHA_OPAD BIT(7)
72#define SAHARA_HDR_MDHA_SWAP BIT(8)
73#define SAHARA_HDR_MDHA_MAC_FULL BIT(9)
74#define SAHARA_HDR_MDHA_SSL BIT(10)
75
76#define SAHARA_REG_VERSION 0x00
77#define SAHARA_REG_DAR 0x04
78#define SAHARA_REG_CONTROL 0x08
79#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
80#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
81#define SAHARA_CONTROL_RNG_AUTORSD BIT(7)
82#define SAHARA_CONTROL_ENABLE_INT BIT(4)
83#define SAHARA_REG_CMD 0x0C
84#define SAHARA_CMD_RESET BIT(0)
85#define SAHARA_CMD_CLEAR_INT BIT(8)
86#define SAHARA_CMD_CLEAR_ERR BIT(9)
87#define SAHARA_CMD_SINGLE_STEP BIT(10)
88#define SAHARA_CMD_MODE_BATCH BIT(16)
89#define SAHARA_CMD_MODE_DEBUG BIT(18)
90#define SAHARA_REG_STATUS 0x10
91#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
92#define SAHARA_STATE_IDLE 0
93#define SAHARA_STATE_BUSY 1
94#define SAHARA_STATE_ERR 2
95#define SAHARA_STATE_FAULT 3
96#define SAHARA_STATE_COMPLETE 4
97#define SAHARA_STATE_COMP_FLAG BIT(2)
98#define SAHARA_STATUS_DAR_FULL BIT(3)
99#define SAHARA_STATUS_ERROR BIT(4)
100#define SAHARA_STATUS_SECURE BIT(5)
101#define SAHARA_STATUS_FAIL BIT(6)
102#define SAHARA_STATUS_INIT BIT(7)
103#define SAHARA_STATUS_RNG_RESEED BIT(8)
104#define SAHARA_STATUS_ACTIVE_RNG BIT(9)
105#define SAHARA_STATUS_ACTIVE_MDHA BIT(10)
106#define SAHARA_STATUS_ACTIVE_SKHA BIT(11)
107#define SAHARA_STATUS_MODE_BATCH BIT(16)
108#define SAHARA_STATUS_MODE_DEDICATED BIT(17)
109#define SAHARA_STATUS_MODE_DEBUG BIT(18)
110#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
111#define SAHARA_REG_ERRSTATUS 0x14
112#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
113#define SAHARA_ERRSOURCE_CHA 14
114#define SAHARA_ERRSOURCE_DMA 15
115#define SAHARA_ERRSTATUS_DMA_DIR BIT(8)
116#define SAHARA_ERRSTATUS_GET_DMASZ(x) (((x) >> 9) & 0x3)
117#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
118#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
119#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
120#define SAHARA_REG_FADDR 0x18
121#define SAHARA_REG_CDAR 0x1C
122#define SAHARA_REG_IDAR 0x20
123
124struct sahara_hw_desc {
125 u32 hdr;
126 u32 len1;
127 u32 p1;
128 u32 len2;
129 u32 p2;
130 u32 next;
131};
132
133struct sahara_hw_link {
134 u32 len;
135 u32 p;
136 u32 next;
137};
138
139struct sahara_ctx {
140 /* AES-specific context */
141 int keylen;
142 u8 key[AES_KEYSIZE_128];
143 struct crypto_skcipher *fallback;
144};
145
146struct sahara_aes_reqctx {
147 unsigned long mode;
148 u8 iv_out[AES_BLOCK_SIZE];
149 struct skcipher_request fallback_req; // keep at the end
150};
151
152/*
153 * struct sahara_sha_reqctx - private data per request
154 * @buf: holds data for requests smaller than block_size
155 * @rembuf: used to prepare one block_size-aligned request
156 * @context: hw-specific context for request. Digest is extracted from this
157 * @mode: specifies what type of hw-descriptor needs to be built
158 * @digest_size: length of digest for this request
159 * @context_size: length of hw-context for this request.
160 * Always digest_size + 4
161 * @buf_cnt: number of bytes saved in buf
162 * @sg_in_idx: number of hw links
163 * @in_sg: scatterlist for input data
164 * @in_sg_chain: scatterlists for chained input data
165 * @total: total number of bytes for transfer
166 * @last: is this the last block
167 * @first: is this the first block
168 */
169struct sahara_sha_reqctx {
170 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
171 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
172 u8 context[SHA256_DIGEST_SIZE + 4];
173 unsigned int mode;
174 unsigned int digest_size;
175 unsigned int context_size;
176 unsigned int buf_cnt;
177 unsigned int sg_in_idx;
178 struct scatterlist *in_sg;
179 struct scatterlist in_sg_chain[2];
180 size_t total;
181 unsigned int last;
182 unsigned int first;
183};
184
185struct sahara_dev {
186 struct device *device;
187 unsigned int version;
188 void __iomem *regs_base;
189 struct clk *clk_ipg;
190 struct clk *clk_ahb;
191 struct completion dma_completion;
192
193 struct sahara_ctx *ctx;
194 unsigned long flags;
195
196 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
197 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
198
199 u8 *key_base;
200 dma_addr_t key_phys_base;
201
202 u8 *iv_base;
203 dma_addr_t iv_phys_base;
204
205 u8 *context_base;
206 dma_addr_t context_phys_base;
207
208 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
209 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
210
211 size_t total;
212 struct scatterlist *in_sg;
213 int nb_in_sg;
214 struct scatterlist *out_sg;
215 int nb_out_sg;
216
217 struct crypto_engine *engine;
218};
219
220static struct sahara_dev *dev_ptr;
221
222static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
223{
224 writel(data, dev->regs_base + reg);
225}
226
227static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
228{
229 return readl(dev->regs_base + reg);
230}
231
232static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
233{
234 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
235 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
236 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
237
238 if (dev->flags & FLAGS_CBC) {
239 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
240 hdr ^= SAHARA_HDR_PARITY_BIT;
241 }
242
243 if (dev->flags & FLAGS_ENCRYPT) {
244 hdr |= SAHARA_HDR_SKHA_OP_ENC;
245 hdr ^= SAHARA_HDR_PARITY_BIT;
246 }
247
248 return hdr;
249}
250
251static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
252{
253 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
254 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
255}
256
257static const char *sahara_err_src[16] = {
258 "No error",
259 "Header error",
260 "Descriptor length error",
261 "Descriptor length or pointer error",
262 "Link length error",
263 "Link pointer error",
264 "Input buffer error",
265 "Output buffer error",
266 "Output buffer starvation",
267 "Internal state fault",
268 "General descriptor problem",
269 "Reserved",
270 "Descriptor address error",
271 "Link address error",
272 "CHA error",
273 "DMA error"
274};
275
276static const char *sahara_err_dmasize[4] = {
277 "Byte transfer",
278 "Half-word transfer",
279 "Word transfer",
280 "Reserved"
281};
282
283static const char *sahara_err_dmasrc[8] = {
284 "No error",
285 "AHB bus error",
286 "Internal IP bus error",
287 "Parity error",
288 "DMA crosses 256 byte boundary",
289 "DMA is busy",
290 "Reserved",
291 "DMA HW error"
292};
293
294static const char *sahara_cha_errsrc[12] = {
295 "Input buffer non-empty",
296 "Illegal address",
297 "Illegal mode",
298 "Illegal data size",
299 "Illegal key size",
300 "Write during processing",
301 "CTX read during processing",
302 "HW error",
303 "Input buffer disabled/underflow",
304 "Output buffer disabled/overflow",
305 "DES key parity error",
306 "Reserved"
307};
308
309static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
310
311static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
312{
313 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
314 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
315
316 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
317
318 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
319
320 if (source == SAHARA_ERRSOURCE_DMA) {
321 if (error & SAHARA_ERRSTATUS_DMA_DIR)
322 dev_err(dev->device, " * DMA read.\n");
323 else
324 dev_err(dev->device, " * DMA write.\n");
325
326 dev_err(dev->device, " * %s.\n",
327 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
328 dev_err(dev->device, " * %s.\n",
329 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
330 } else if (source == SAHARA_ERRSOURCE_CHA) {
331 dev_err(dev->device, " * %s.\n",
332 sahara_cha_errsrc[chasrc]);
333 dev_err(dev->device, " * %s.\n",
334 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
335 }
336 dev_err(dev->device, "\n");
337}
338
339static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
340
341static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
342{
343 u8 state;
344
345 if (!__is_defined(DEBUG))
346 return;
347
348 state = SAHARA_STATUS_GET_STATE(status);
349
350 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
351 __func__, status);
352
353 dev_dbg(dev->device, " - State = %d:\n", state);
354 if (state & SAHARA_STATE_COMP_FLAG)
355 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
356
357 dev_dbg(dev->device, " * %s.\n",
358 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
359
360 if (status & SAHARA_STATUS_DAR_FULL)
361 dev_dbg(dev->device, " - DAR Full.\n");
362 if (status & SAHARA_STATUS_ERROR)
363 dev_dbg(dev->device, " - Error.\n");
364 if (status & SAHARA_STATUS_SECURE)
365 dev_dbg(dev->device, " - Secure.\n");
366 if (status & SAHARA_STATUS_FAIL)
367 dev_dbg(dev->device, " - Fail.\n");
368 if (status & SAHARA_STATUS_RNG_RESEED)
369 dev_dbg(dev->device, " - RNG Reseed Request.\n");
370 if (status & SAHARA_STATUS_ACTIVE_RNG)
371 dev_dbg(dev->device, " - RNG Active.\n");
372 if (status & SAHARA_STATUS_ACTIVE_MDHA)
373 dev_dbg(dev->device, " - MDHA Active.\n");
374 if (status & SAHARA_STATUS_ACTIVE_SKHA)
375 dev_dbg(dev->device, " - SKHA Active.\n");
376
377 if (status & SAHARA_STATUS_MODE_BATCH)
378 dev_dbg(dev->device, " - Batch Mode.\n");
379 else if (status & SAHARA_STATUS_MODE_DEDICATED)
380 dev_dbg(dev->device, " - Dedicated Mode.\n");
381 else if (status & SAHARA_STATUS_MODE_DEBUG)
382 dev_dbg(dev->device, " - Debug Mode.\n");
383
384 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
385 SAHARA_STATUS_GET_ISTATE(status));
386
387 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
388 sahara_read(dev, SAHARA_REG_CDAR));
389 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
390 sahara_read(dev, SAHARA_REG_IDAR));
391}
392
393static void sahara_dump_descriptors(struct sahara_dev *dev)
394{
395 int i;
396
397 if (!__is_defined(DEBUG))
398 return;
399
400 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
401 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
402 i, &dev->hw_phys_desc[i]);
403 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
404 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
405 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
406 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
407 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
408 dev_dbg(dev->device, "\tnext = 0x%08x\n",
409 dev->hw_desc[i]->next);
410 }
411 dev_dbg(dev->device, "\n");
412}
413
414static void sahara_dump_links(struct sahara_dev *dev)
415{
416 int i;
417
418 if (!__is_defined(DEBUG))
419 return;
420
421 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
422 dev_dbg(dev->device, "Link (%d) (%pad):\n",
423 i, &dev->hw_phys_link[i]);
424 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
425 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
426 dev_dbg(dev->device, "\tnext = 0x%08x\n",
427 dev->hw_link[i]->next);
428 }
429 dev_dbg(dev->device, "\n");
430}
431
432static int sahara_hw_descriptor_create(struct sahara_dev *dev)
433{
434 struct sahara_ctx *ctx = dev->ctx;
435 struct scatterlist *sg;
436 int ret;
437 int i, j;
438 int idx = 0;
439 u32 len;
440
441 memcpy(dev->key_base, ctx->key, ctx->keylen);
442
443 if (dev->flags & FLAGS_CBC) {
444 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
445 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
446 } else {
447 dev->hw_desc[idx]->len1 = 0;
448 dev->hw_desc[idx]->p1 = 0;
449 }
450 dev->hw_desc[idx]->len2 = ctx->keylen;
451 dev->hw_desc[idx]->p2 = dev->key_phys_base;
452 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
453 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
454
455 idx++;
456
457
458 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
459 if (dev->nb_in_sg < 0) {
460 dev_err(dev->device, "Invalid numbers of src SG.\n");
461 return dev->nb_in_sg;
462 }
463 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
464 if (dev->nb_out_sg < 0) {
465 dev_err(dev->device, "Invalid numbers of dst SG.\n");
466 return dev->nb_out_sg;
467 }
468 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
469 dev_err(dev->device, "not enough hw links (%d)\n",
470 dev->nb_in_sg + dev->nb_out_sg);
471 return -EINVAL;
472 }
473
474 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
475 DMA_TO_DEVICE);
476 if (!ret) {
477 dev_err(dev->device, "couldn't map in sg\n");
478 return -EINVAL;
479 }
480
481 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
482 DMA_FROM_DEVICE);
483 if (!ret) {
484 dev_err(dev->device, "couldn't map out sg\n");
485 goto unmap_in;
486 }
487
488 /* Create input links */
489 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
490 sg = dev->in_sg;
491 len = dev->total;
492 for (i = 0; i < dev->nb_in_sg; i++) {
493 dev->hw_link[i]->len = min(len, sg->length);
494 dev->hw_link[i]->p = sg->dma_address;
495 if (i == (dev->nb_in_sg - 1)) {
496 dev->hw_link[i]->next = 0;
497 } else {
498 len -= min(len, sg->length);
499 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
500 sg = sg_next(sg);
501 }
502 }
503
504 /* Create output links */
505 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
506 sg = dev->out_sg;
507 len = dev->total;
508 for (j = i; j < dev->nb_out_sg + i; j++) {
509 dev->hw_link[j]->len = min(len, sg->length);
510 dev->hw_link[j]->p = sg->dma_address;
511 if (j == (dev->nb_out_sg + i - 1)) {
512 dev->hw_link[j]->next = 0;
513 } else {
514 len -= min(len, sg->length);
515 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
516 sg = sg_next(sg);
517 }
518 }
519
520 /* Fill remaining fields of hw_desc[1] */
521 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
522 dev->hw_desc[idx]->len1 = dev->total;
523 dev->hw_desc[idx]->len2 = dev->total;
524 dev->hw_desc[idx]->next = 0;
525
526 sahara_dump_descriptors(dev);
527 sahara_dump_links(dev);
528
529 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
530
531 return 0;
532
533unmap_in:
534 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
535 DMA_TO_DEVICE);
536
537 return -EINVAL;
538}
539
540static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
541{
542 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
543 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
544 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
545
546 /* Update IV buffer to contain the last ciphertext block */
547 if (rctx->mode & FLAGS_ENCRYPT) {
548 sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
549 ivsize, req->cryptlen - ivsize);
550 } else {
551 memcpy(req->iv, rctx->iv_out, ivsize);
552 }
553}
554
555static int sahara_aes_process(struct skcipher_request *req)
556{
557 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
558 struct sahara_dev *dev = dev_ptr;
559 struct sahara_ctx *ctx;
560 struct sahara_aes_reqctx *rctx;
561 int ret;
562 unsigned long time_left;
563
564 /* Request is ready to be dispatched by the device */
565 dev_dbg(dev->device,
566 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
567 req->cryptlen, req->src, req->dst);
568
569 /* assign new request to device */
570 dev->total = req->cryptlen;
571 dev->in_sg = req->src;
572 dev->out_sg = req->dst;
573
574 rctx = skcipher_request_ctx(req);
575 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
576 rctx->mode &= FLAGS_MODE_MASK;
577 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
578
579 if ((dev->flags & FLAGS_CBC) && req->iv) {
580 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
581
582 memcpy(dev->iv_base, req->iv, ivsize);
583
584 if (!(dev->flags & FLAGS_ENCRYPT)) {
585 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
586 rctx->iv_out, ivsize,
587 req->cryptlen - ivsize);
588 }
589 }
590
591 /* assign new context to device */
592 dev->ctx = ctx;
593
594 reinit_completion(&dev->dma_completion);
595
596 ret = sahara_hw_descriptor_create(dev);
597 if (ret)
598 return -EINVAL;
599
600 time_left = wait_for_completion_timeout(&dev->dma_completion,
601 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
602
603 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
604 DMA_FROM_DEVICE);
605 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
606 DMA_TO_DEVICE);
607
608 if (!time_left) {
609 dev_err(dev->device, "AES timeout\n");
610 return -ETIMEDOUT;
611 }
612
613 if ((dev->flags & FLAGS_CBC) && req->iv)
614 sahara_aes_cbc_update_iv(req);
615
616 return 0;
617}
618
619static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
620 unsigned int keylen)
621{
622 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
623
624 ctx->keylen = keylen;
625
626 /* SAHARA only supports 128bit keys */
627 if (keylen == AES_KEYSIZE_128) {
628 memcpy(ctx->key, key, keylen);
629 return 0;
630 }
631
632 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
633 return -EINVAL;
634
635 /*
636 * The requested key size is not supported by HW, do a fallback.
637 */
638 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
639 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
640 CRYPTO_TFM_REQ_MASK);
641 return crypto_skcipher_setkey(ctx->fallback, key, keylen);
642}
643
644static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
645{
646 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
647 struct sahara_ctx *ctx = crypto_skcipher_ctx(
648 crypto_skcipher_reqtfm(req));
649
650 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
651 skcipher_request_set_callback(&rctx->fallback_req,
652 req->base.flags,
653 req->base.complete,
654 req->base.data);
655 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
656 req->dst, req->cryptlen, req->iv);
657
658 if (mode & FLAGS_ENCRYPT)
659 return crypto_skcipher_encrypt(&rctx->fallback_req);
660
661 return crypto_skcipher_decrypt(&rctx->fallback_req);
662}
663
664static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
665{
666 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
667 struct sahara_ctx *ctx = crypto_skcipher_ctx(
668 crypto_skcipher_reqtfm(req));
669 struct sahara_dev *dev = dev_ptr;
670
671 if (!req->cryptlen)
672 return 0;
673
674 if (unlikely(ctx->keylen != AES_KEYSIZE_128))
675 return sahara_aes_fallback(req, mode);
676
677 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
678 req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
679
680 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
681 return -EINVAL;
682
683 rctx->mode = mode;
684
685 return crypto_transfer_skcipher_request_to_engine(dev->engine, req);
686}
687
688static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
689{
690 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
691}
692
693static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
694{
695 return sahara_aes_crypt(req, 0);
696}
697
698static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
699{
700 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
701}
702
703static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
704{
705 return sahara_aes_crypt(req, FLAGS_CBC);
706}
707
708static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
709{
710 const char *name = crypto_tfm_alg_name(&tfm->base);
711 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
712
713 ctx->fallback = crypto_alloc_skcipher(name, 0,
714 CRYPTO_ALG_NEED_FALLBACK);
715 if (IS_ERR(ctx->fallback)) {
716 pr_err("Error allocating fallback algo %s\n", name);
717 return PTR_ERR(ctx->fallback);
718 }
719
720 crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
721 crypto_skcipher_reqsize(ctx->fallback));
722
723 return 0;
724}
725
726static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
727{
728 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
729
730 crypto_free_skcipher(ctx->fallback);
731}
732
733static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
734 struct sahara_sha_reqctx *rctx)
735{
736 u32 hdr = 0;
737
738 hdr = rctx->mode;
739
740 if (rctx->first) {
741 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
742 hdr |= SAHARA_HDR_MDHA_INIT;
743 } else {
744 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
745 }
746
747 if (rctx->last)
748 hdr |= SAHARA_HDR_MDHA_PDATA;
749
750 if (hweight_long(hdr) % 2 == 0)
751 hdr |= SAHARA_HDR_PARITY_BIT;
752
753 return hdr;
754}
755
756static int sahara_sha_hw_links_create(struct sahara_dev *dev,
757 struct sahara_sha_reqctx *rctx,
758 int start)
759{
760 struct scatterlist *sg;
761 unsigned int len;
762 unsigned int i;
763 int ret;
764
765 dev->in_sg = rctx->in_sg;
766
767 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
768 if (dev->nb_in_sg < 0) {
769 dev_err(dev->device, "Invalid numbers of src SG.\n");
770 return dev->nb_in_sg;
771 }
772 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
773 dev_err(dev->device, "not enough hw links (%d)\n",
774 dev->nb_in_sg + dev->nb_out_sg);
775 return -EINVAL;
776 }
777
778 sg = dev->in_sg;
779 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
780 if (!ret)
781 return -EFAULT;
782
783 len = rctx->total;
784 for (i = start; i < dev->nb_in_sg + start; i++) {
785 dev->hw_link[i]->len = min(len, sg->length);
786 dev->hw_link[i]->p = sg->dma_address;
787 if (i == (dev->nb_in_sg + start - 1)) {
788 dev->hw_link[i]->next = 0;
789 } else {
790 len -= min(len, sg->length);
791 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
792 sg = sg_next(sg);
793 }
794 }
795
796 return i;
797}
798
799static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
800 struct sahara_sha_reqctx *rctx,
801 struct ahash_request *req,
802 int index)
803{
804 unsigned result_len;
805 int i = index;
806
807 if (rctx->first)
808 /* Create initial descriptor: #8*/
809 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
810 else
811 /* Create hash descriptor: #10. Must follow #6. */
812 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
813
814 dev->hw_desc[index]->len1 = rctx->total;
815 if (dev->hw_desc[index]->len1 == 0) {
816 /* if len1 is 0, p1 must be 0, too */
817 dev->hw_desc[index]->p1 = 0;
818 rctx->sg_in_idx = 0;
819 } else {
820 /* Create input links */
821 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
822 i = sahara_sha_hw_links_create(dev, rctx, index);
823
824 rctx->sg_in_idx = index;
825 if (i < 0)
826 return i;
827 }
828
829 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
830
831 /* Save the context for the next operation */
832 result_len = rctx->context_size;
833 dev->hw_link[i]->p = dev->context_phys_base;
834
835 dev->hw_link[i]->len = result_len;
836 dev->hw_desc[index]->len2 = result_len;
837
838 dev->hw_link[i]->next = 0;
839
840 return 0;
841}
842
843/*
844 * Load descriptor aka #6
845 *
846 * To load a previously saved context back to the MDHA unit
847 *
848 * p1: Saved Context
849 * p2: NULL
850 *
851 */
852static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
853 struct sahara_sha_reqctx *rctx,
854 struct ahash_request *req,
855 int index)
856{
857 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
858
859 dev->hw_desc[index]->len1 = rctx->context_size;
860 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
861 dev->hw_desc[index]->len2 = 0;
862 dev->hw_desc[index]->p2 = 0;
863
864 dev->hw_link[index]->len = rctx->context_size;
865 dev->hw_link[index]->p = dev->context_phys_base;
866 dev->hw_link[index]->next = 0;
867
868 return 0;
869}
870
871static int sahara_sha_prepare_request(struct ahash_request *req)
872{
873 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
874 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
875 unsigned int hash_later;
876 unsigned int block_size;
877 unsigned int len;
878
879 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
880
881 /* append bytes from previous operation */
882 len = rctx->buf_cnt + req->nbytes;
883
884 /* only the last transfer can be padded in hardware */
885 if (!rctx->last && (len < block_size)) {
886 /* to few data, save for next operation */
887 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
888 0, req->nbytes, 0);
889 rctx->buf_cnt += req->nbytes;
890
891 return 0;
892 }
893
894 /* add data from previous operation first */
895 if (rctx->buf_cnt)
896 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
897
898 /* data must always be a multiple of block_size */
899 hash_later = rctx->last ? 0 : len & (block_size - 1);
900 if (hash_later) {
901 unsigned int offset = req->nbytes - hash_later;
902 /* Save remaining bytes for later use */
903 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
904 hash_later, 0);
905 }
906
907 rctx->total = len - hash_later;
908 /* have data from previous operation and current */
909 if (rctx->buf_cnt && req->nbytes) {
910 sg_init_table(rctx->in_sg_chain, 2);
911 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
912 sg_chain(rctx->in_sg_chain, 2, req->src);
913 rctx->in_sg = rctx->in_sg_chain;
914 /* only data from previous operation */
915 } else if (rctx->buf_cnt) {
916 rctx->in_sg = rctx->in_sg_chain;
917 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
918 /* no data from previous operation */
919 } else {
920 rctx->in_sg = req->src;
921 }
922
923 /* on next call, we only have the remaining data in the buffer */
924 rctx->buf_cnt = hash_later;
925
926 return -EINPROGRESS;
927}
928
929static int sahara_sha_process(struct ahash_request *req)
930{
931 struct sahara_dev *dev = dev_ptr;
932 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
933 int ret;
934 unsigned long time_left;
935
936 ret = sahara_sha_prepare_request(req);
937 if (!ret)
938 return ret;
939
940 if (rctx->first) {
941 ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
942 if (ret)
943 return ret;
944
945 dev->hw_desc[0]->next = 0;
946 rctx->first = 0;
947 } else {
948 memcpy(dev->context_base, rctx->context, rctx->context_size);
949
950 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
951 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
952 ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
953 if (ret)
954 return ret;
955
956 dev->hw_desc[1]->next = 0;
957 }
958
959 sahara_dump_descriptors(dev);
960 sahara_dump_links(dev);
961
962 reinit_completion(&dev->dma_completion);
963
964 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
965
966 time_left = wait_for_completion_timeout(&dev->dma_completion,
967 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
968
969 if (rctx->sg_in_idx)
970 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
971 DMA_TO_DEVICE);
972
973 if (!time_left) {
974 dev_err(dev->device, "SHA timeout\n");
975 return -ETIMEDOUT;
976 }
977
978 memcpy(rctx->context, dev->context_base, rctx->context_size);
979
980 if (req->result && rctx->last)
981 memcpy(req->result, rctx->context, rctx->digest_size);
982
983 return 0;
984}
985
986static int sahara_do_one_request(struct crypto_engine *engine, void *areq)
987{
988 struct crypto_async_request *async_req = areq;
989 int err;
990
991 if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) {
992 struct ahash_request *req = ahash_request_cast(async_req);
993
994 err = sahara_sha_process(req);
995 local_bh_disable();
996 crypto_finalize_hash_request(engine, req, err);
997 local_bh_enable();
998 } else {
999 struct skcipher_request *req = skcipher_request_cast(async_req);
1000
1001 err = sahara_aes_process(skcipher_request_cast(async_req));
1002 local_bh_disable();
1003 crypto_finalize_skcipher_request(engine, req, err);
1004 local_bh_enable();
1005 }
1006
1007 return 0;
1008}
1009
1010static int sahara_sha_enqueue(struct ahash_request *req, int last)
1011{
1012 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1013 struct sahara_dev *dev = dev_ptr;
1014
1015 if (!req->nbytes && !last)
1016 return 0;
1017
1018 rctx->last = last;
1019
1020 return crypto_transfer_hash_request_to_engine(dev->engine, req);
1021}
1022
1023static int sahara_sha_init(struct ahash_request *req)
1024{
1025 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1026 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1027
1028 memset(rctx, 0, sizeof(*rctx));
1029
1030 switch (crypto_ahash_digestsize(tfm)) {
1031 case SHA1_DIGEST_SIZE:
1032 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1033 rctx->digest_size = SHA1_DIGEST_SIZE;
1034 break;
1035 case SHA256_DIGEST_SIZE:
1036 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1037 rctx->digest_size = SHA256_DIGEST_SIZE;
1038 break;
1039 default:
1040 return -EINVAL;
1041 }
1042
1043 rctx->context_size = rctx->digest_size + 4;
1044 rctx->first = 1;
1045
1046 return 0;
1047}
1048
1049static int sahara_sha_update(struct ahash_request *req)
1050{
1051 return sahara_sha_enqueue(req, 0);
1052}
1053
1054static int sahara_sha_final(struct ahash_request *req)
1055{
1056 req->nbytes = 0;
1057 return sahara_sha_enqueue(req, 1);
1058}
1059
1060static int sahara_sha_finup(struct ahash_request *req)
1061{
1062 return sahara_sha_enqueue(req, 1);
1063}
1064
1065static int sahara_sha_digest(struct ahash_request *req)
1066{
1067 sahara_sha_init(req);
1068
1069 return sahara_sha_finup(req);
1070}
1071
1072static int sahara_sha_export(struct ahash_request *req, void *out)
1073{
1074 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1075
1076 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1077
1078 return 0;
1079}
1080
1081static int sahara_sha_import(struct ahash_request *req, const void *in)
1082{
1083 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1084
1085 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1086
1087 return 0;
1088}
1089
1090static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1091{
1092 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1093 sizeof(struct sahara_sha_reqctx));
1094
1095 return 0;
1096}
1097
1098static struct skcipher_engine_alg aes_algs[] = {
1099{
1100 .base = {
1101 .base.cra_name = "ecb(aes)",
1102 .base.cra_driver_name = "sahara-ecb-aes",
1103 .base.cra_priority = 300,
1104 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1105 .base.cra_blocksize = AES_BLOCK_SIZE,
1106 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1107 .base.cra_alignmask = 0x0,
1108 .base.cra_module = THIS_MODULE,
1109
1110 .init = sahara_aes_init_tfm,
1111 .exit = sahara_aes_exit_tfm,
1112 .min_keysize = AES_MIN_KEY_SIZE,
1113 .max_keysize = AES_MAX_KEY_SIZE,
1114 .setkey = sahara_aes_setkey,
1115 .encrypt = sahara_aes_ecb_encrypt,
1116 .decrypt = sahara_aes_ecb_decrypt,
1117 },
1118 .op = {
1119 .do_one_request = sahara_do_one_request,
1120 },
1121}, {
1122 .base = {
1123 .base.cra_name = "cbc(aes)",
1124 .base.cra_driver_name = "sahara-cbc-aes",
1125 .base.cra_priority = 300,
1126 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1127 .base.cra_blocksize = AES_BLOCK_SIZE,
1128 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1129 .base.cra_alignmask = 0x0,
1130 .base.cra_module = THIS_MODULE,
1131
1132 .init = sahara_aes_init_tfm,
1133 .exit = sahara_aes_exit_tfm,
1134 .min_keysize = AES_MIN_KEY_SIZE,
1135 .max_keysize = AES_MAX_KEY_SIZE,
1136 .ivsize = AES_BLOCK_SIZE,
1137 .setkey = sahara_aes_setkey,
1138 .encrypt = sahara_aes_cbc_encrypt,
1139 .decrypt = sahara_aes_cbc_decrypt,
1140 },
1141 .op = {
1142 .do_one_request = sahara_do_one_request,
1143 },
1144}
1145};
1146
1147static struct ahash_engine_alg sha_v3_algs[] = {
1148{
1149 .base = {
1150 .init = sahara_sha_init,
1151 .update = sahara_sha_update,
1152 .final = sahara_sha_final,
1153 .finup = sahara_sha_finup,
1154 .digest = sahara_sha_digest,
1155 .export = sahara_sha_export,
1156 .import = sahara_sha_import,
1157 .halg.digestsize = SHA1_DIGEST_SIZE,
1158 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1159 .halg.base = {
1160 .cra_name = "sha1",
1161 .cra_driver_name = "sahara-sha1",
1162 .cra_priority = 300,
1163 .cra_flags = CRYPTO_ALG_ASYNC |
1164 CRYPTO_ALG_NEED_FALLBACK,
1165 .cra_blocksize = SHA1_BLOCK_SIZE,
1166 .cra_ctxsize = sizeof(struct sahara_ctx),
1167 .cra_alignmask = 0,
1168 .cra_module = THIS_MODULE,
1169 .cra_init = sahara_sha_cra_init,
1170 }
1171 },
1172 .op = {
1173 .do_one_request = sahara_do_one_request,
1174 },
1175},
1176};
1177
1178static struct ahash_engine_alg sha_v4_algs[] = {
1179{
1180 .base = {
1181 .init = sahara_sha_init,
1182 .update = sahara_sha_update,
1183 .final = sahara_sha_final,
1184 .finup = sahara_sha_finup,
1185 .digest = sahara_sha_digest,
1186 .export = sahara_sha_export,
1187 .import = sahara_sha_import,
1188 .halg.digestsize = SHA256_DIGEST_SIZE,
1189 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1190 .halg.base = {
1191 .cra_name = "sha256",
1192 .cra_driver_name = "sahara-sha256",
1193 .cra_priority = 300,
1194 .cra_flags = CRYPTO_ALG_ASYNC |
1195 CRYPTO_ALG_NEED_FALLBACK,
1196 .cra_blocksize = SHA256_BLOCK_SIZE,
1197 .cra_ctxsize = sizeof(struct sahara_ctx),
1198 .cra_alignmask = 0,
1199 .cra_module = THIS_MODULE,
1200 .cra_init = sahara_sha_cra_init,
1201 }
1202 },
1203 .op = {
1204 .do_one_request = sahara_do_one_request,
1205 },
1206},
1207};
1208
1209static irqreturn_t sahara_irq_handler(int irq, void *data)
1210{
1211 struct sahara_dev *dev = data;
1212 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1213 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1214
1215 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1216 SAHARA_REG_CMD);
1217
1218 sahara_decode_status(dev, stat);
1219
1220 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
1221 return IRQ_NONE;
1222
1223 if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
1224 sahara_decode_error(dev, err);
1225
1226 complete(&dev->dma_completion);
1227
1228 return IRQ_HANDLED;
1229}
1230
1231
1232static int sahara_register_algs(struct sahara_dev *dev)
1233{
1234 int err;
1235
1236 err = crypto_engine_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1237 if (err)
1238 return err;
1239
1240 err = crypto_engine_register_ahashes(sha_v3_algs,
1241 ARRAY_SIZE(sha_v3_algs));
1242 if (err)
1243 goto err_aes_algs;
1244
1245 if (dev->version > SAHARA_VERSION_3) {
1246 err = crypto_engine_register_ahashes(sha_v4_algs,
1247 ARRAY_SIZE(sha_v4_algs));
1248 if (err)
1249 goto err_sha_v3_algs;
1250 }
1251
1252 return 0;
1253
1254err_sha_v3_algs:
1255 crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
1256
1257err_aes_algs:
1258 crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1259
1260 return err;
1261}
1262
1263static void sahara_unregister_algs(struct sahara_dev *dev)
1264{
1265 crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1266 crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
1267
1268 if (dev->version > SAHARA_VERSION_3)
1269 crypto_engine_unregister_ahashes(sha_v4_algs,
1270 ARRAY_SIZE(sha_v4_algs));
1271}
1272
1273static const struct of_device_id sahara_dt_ids[] = {
1274 { .compatible = "fsl,imx53-sahara" },
1275 { .compatible = "fsl,imx27-sahara" },
1276 { /* sentinel */ }
1277};
1278MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1279
1280static int sahara_probe(struct platform_device *pdev)
1281{
1282 struct sahara_dev *dev;
1283 u32 version;
1284 int irq;
1285 int err;
1286 int i;
1287
1288 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1289 if (!dev)
1290 return -ENOMEM;
1291
1292 dev->device = &pdev->dev;
1293 platform_set_drvdata(pdev, dev);
1294
1295 /* Get the base address */
1296 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1297 if (IS_ERR(dev->regs_base))
1298 return PTR_ERR(dev->regs_base);
1299
1300 /* Get the IRQ */
1301 irq = platform_get_irq(pdev, 0);
1302 if (irq < 0)
1303 return irq;
1304
1305 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1306 0, dev_name(&pdev->dev), dev);
1307 if (err)
1308 return dev_err_probe(&pdev->dev, err,
1309 "failed to request irq\n");
1310
1311 /* clocks */
1312 dev->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
1313 if (IS_ERR(dev->clk_ipg))
1314 return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ipg),
1315 "Could not get ipg clock\n");
1316
1317 dev->clk_ahb = devm_clk_get_enabled(&pdev->dev, "ahb");
1318 if (IS_ERR(dev->clk_ahb))
1319 return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ahb),
1320 "Could not get ahb clock\n");
1321
1322 /* Allocate HW descriptors */
1323 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1324 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1325 &dev->hw_phys_desc[0], GFP_KERNEL);
1326 if (!dev->hw_desc[0])
1327 return -ENOMEM;
1328 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1329 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1330 sizeof(struct sahara_hw_desc);
1331
1332 /* Allocate space for iv and key */
1333 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1334 &dev->key_phys_base, GFP_KERNEL);
1335 if (!dev->key_base)
1336 return -ENOMEM;
1337 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1338 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1339
1340 /* Allocate space for context: largest digest + message length field */
1341 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1342 SHA256_DIGEST_SIZE + 4,
1343 &dev->context_phys_base, GFP_KERNEL);
1344 if (!dev->context_base)
1345 return -ENOMEM;
1346
1347 /* Allocate space for HW links */
1348 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1349 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1350 &dev->hw_phys_link[0], GFP_KERNEL);
1351 if (!dev->hw_link[0])
1352 return -ENOMEM;
1353 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1354 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1355 sizeof(struct sahara_hw_link);
1356 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1357 }
1358
1359 dev_ptr = dev;
1360
1361 dev->engine = crypto_engine_alloc_init(&pdev->dev, true);
1362 if (!dev->engine)
1363 return -ENOMEM;
1364
1365 err = crypto_engine_start(dev->engine);
1366 if (err) {
1367 crypto_engine_exit(dev->engine);
1368 return dev_err_probe(&pdev->dev, err,
1369 "Could not start crypto engine\n");
1370 }
1371
1372 init_completion(&dev->dma_completion);
1373
1374 version = sahara_read(dev, SAHARA_REG_VERSION);
1375 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1376 if (version != SAHARA_VERSION_3)
1377 err = -ENODEV;
1378 } else if (of_device_is_compatible(pdev->dev.of_node,
1379 "fsl,imx53-sahara")) {
1380 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1381 err = -ENODEV;
1382 version = (version >> 8) & 0xff;
1383 }
1384 if (err == -ENODEV) {
1385 dev_err_probe(&pdev->dev, err,
1386 "SAHARA version %d not supported\n", version);
1387 goto err_algs;
1388 }
1389
1390 dev->version = version;
1391
1392 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1393 SAHARA_REG_CMD);
1394 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1395 SAHARA_CONTROL_SET_MAXBURST(8) |
1396 SAHARA_CONTROL_RNG_AUTORSD |
1397 SAHARA_CONTROL_ENABLE_INT,
1398 SAHARA_REG_CONTROL);
1399
1400 err = sahara_register_algs(dev);
1401 if (err)
1402 goto err_algs;
1403
1404 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1405
1406 return 0;
1407
1408err_algs:
1409 crypto_engine_exit(dev->engine);
1410
1411 return err;
1412}
1413
1414static void sahara_remove(struct platform_device *pdev)
1415{
1416 struct sahara_dev *dev = platform_get_drvdata(pdev);
1417
1418 crypto_engine_exit(dev->engine);
1419 sahara_unregister_algs(dev);
1420}
1421
1422static struct platform_driver sahara_driver = {
1423 .probe = sahara_probe,
1424 .remove = sahara_remove,
1425 .driver = {
1426 .name = SAHARA_NAME,
1427 .of_match_table = sahara_dt_ids,
1428 },
1429};
1430
1431module_platform_driver(sahara_driver);
1432
1433MODULE_LICENSE("GPL");
1434MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1435MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1436MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
11 * Based on omap-aes.c and tegra-aes.c
12 */
13
14#include <crypto/aes.h>
15#include <crypto/internal/hash.h>
16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/sha1.h>
19#include <crypto/sha2.h>
20
21#include <linux/clk.h>
22#include <linux/dma-mapping.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/irq.h>
26#include <linux/kernel.h>
27#include <linux/kthread.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/platform_device.h>
33
34#define SHA_BUFFER_LEN PAGE_SIZE
35#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
36
37#define SAHARA_NAME "sahara"
38#define SAHARA_VERSION_3 3
39#define SAHARA_VERSION_4 4
40#define SAHARA_TIMEOUT_MS 1000
41#define SAHARA_MAX_HW_DESC 2
42#define SAHARA_MAX_HW_LINK 20
43
44#define FLAGS_MODE_MASK 0x000f
45#define FLAGS_ENCRYPT BIT(0)
46#define FLAGS_CBC BIT(1)
47#define FLAGS_NEW_KEY BIT(3)
48
49#define SAHARA_HDR_BASE 0x00800000
50#define SAHARA_HDR_SKHA_ALG_AES 0
51#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
52#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
53#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
54#define SAHARA_HDR_FORM_DATA (5 << 16)
55#define SAHARA_HDR_FORM_KEY (8 << 16)
56#define SAHARA_HDR_LLO (1 << 24)
57#define SAHARA_HDR_CHA_SKHA (1 << 28)
58#define SAHARA_HDR_CHA_MDHA (2 << 28)
59#define SAHARA_HDR_PARITY_BIT (1 << 31)
60
61#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
62#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
63#define SAHARA_HDR_MDHA_HASH 0xA0850000
64#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
65#define SAHARA_HDR_MDHA_ALG_SHA1 0
66#define SAHARA_HDR_MDHA_ALG_MD5 1
67#define SAHARA_HDR_MDHA_ALG_SHA256 2
68#define SAHARA_HDR_MDHA_ALG_SHA224 3
69#define SAHARA_HDR_MDHA_PDATA (1 << 2)
70#define SAHARA_HDR_MDHA_HMAC (1 << 3)
71#define SAHARA_HDR_MDHA_INIT (1 << 5)
72#define SAHARA_HDR_MDHA_IPAD (1 << 6)
73#define SAHARA_HDR_MDHA_OPAD (1 << 7)
74#define SAHARA_HDR_MDHA_SWAP (1 << 8)
75#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
76#define SAHARA_HDR_MDHA_SSL (1 << 10)
77
78/* SAHARA can only process one request at a time */
79#define SAHARA_QUEUE_LENGTH 1
80
81#define SAHARA_REG_VERSION 0x00
82#define SAHARA_REG_DAR 0x04
83#define SAHARA_REG_CONTROL 0x08
84#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
85#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
86#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
87#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
88#define SAHARA_REG_CMD 0x0C
89#define SAHARA_CMD_RESET (1 << 0)
90#define SAHARA_CMD_CLEAR_INT (1 << 8)
91#define SAHARA_CMD_CLEAR_ERR (1 << 9)
92#define SAHARA_CMD_SINGLE_STEP (1 << 10)
93#define SAHARA_CMD_MODE_BATCH (1 << 16)
94#define SAHARA_CMD_MODE_DEBUG (1 << 18)
95#define SAHARA_REG_STATUS 0x10
96#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
97#define SAHARA_STATE_IDLE 0
98#define SAHARA_STATE_BUSY 1
99#define SAHARA_STATE_ERR 2
100#define SAHARA_STATE_FAULT 3
101#define SAHARA_STATE_COMPLETE 4
102#define SAHARA_STATE_COMP_FLAG (1 << 2)
103#define SAHARA_STATUS_DAR_FULL (1 << 3)
104#define SAHARA_STATUS_ERROR (1 << 4)
105#define SAHARA_STATUS_SECURE (1 << 5)
106#define SAHARA_STATUS_FAIL (1 << 6)
107#define SAHARA_STATUS_INIT (1 << 7)
108#define SAHARA_STATUS_RNG_RESEED (1 << 8)
109#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
110#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
111#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
112#define SAHARA_STATUS_MODE_BATCH (1 << 16)
113#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
114#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
115#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
116#define SAHARA_REG_ERRSTATUS 0x14
117#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
118#define SAHARA_ERRSOURCE_CHA 14
119#define SAHARA_ERRSOURCE_DMA 15
120#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
121#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
122#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
123#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
124#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
125#define SAHARA_REG_FADDR 0x18
126#define SAHARA_REG_CDAR 0x1C
127#define SAHARA_REG_IDAR 0x20
128
129struct sahara_hw_desc {
130 u32 hdr;
131 u32 len1;
132 u32 p1;
133 u32 len2;
134 u32 p2;
135 u32 next;
136};
137
138struct sahara_hw_link {
139 u32 len;
140 u32 p;
141 u32 next;
142};
143
144struct sahara_ctx {
145 unsigned long flags;
146
147 /* AES-specific context */
148 int keylen;
149 u8 key[AES_KEYSIZE_128];
150 struct crypto_skcipher *fallback;
151};
152
153struct sahara_aes_reqctx {
154 unsigned long mode;
155 struct skcipher_request fallback_req; // keep at the end
156};
157
158/*
159 * struct sahara_sha_reqctx - private data per request
160 * @buf: holds data for requests smaller than block_size
161 * @rembuf: used to prepare one block_size-aligned request
162 * @context: hw-specific context for request. Digest is extracted from this
163 * @mode: specifies what type of hw-descriptor needs to be built
164 * @digest_size: length of digest for this request
165 * @context_size: length of hw-context for this request.
166 * Always digest_size + 4
167 * @buf_cnt: number of bytes saved in buf
168 * @sg_in_idx: number of hw links
169 * @in_sg: scatterlist for input data
170 * @in_sg_chain: scatterlists for chained input data
171 * @total: total number of bytes for transfer
172 * @last: is this the last block
173 * @first: is this the first block
174 * @active: inside a transfer
175 */
176struct sahara_sha_reqctx {
177 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
178 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
179 u8 context[SHA256_DIGEST_SIZE + 4];
180 unsigned int mode;
181 unsigned int digest_size;
182 unsigned int context_size;
183 unsigned int buf_cnt;
184 unsigned int sg_in_idx;
185 struct scatterlist *in_sg;
186 struct scatterlist in_sg_chain[2];
187 size_t total;
188 unsigned int last;
189 unsigned int first;
190 unsigned int active;
191};
192
193struct sahara_dev {
194 struct device *device;
195 unsigned int version;
196 void __iomem *regs_base;
197 struct clk *clk_ipg;
198 struct clk *clk_ahb;
199 struct mutex queue_mutex;
200 struct task_struct *kthread;
201 struct completion dma_completion;
202
203 struct sahara_ctx *ctx;
204 struct crypto_queue queue;
205 unsigned long flags;
206
207 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
208 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
209
210 u8 *key_base;
211 dma_addr_t key_phys_base;
212
213 u8 *iv_base;
214 dma_addr_t iv_phys_base;
215
216 u8 *context_base;
217 dma_addr_t context_phys_base;
218
219 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
220 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
221
222 size_t total;
223 struct scatterlist *in_sg;
224 int nb_in_sg;
225 struct scatterlist *out_sg;
226 int nb_out_sg;
227
228 u32 error;
229};
230
231static struct sahara_dev *dev_ptr;
232
233static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
234{
235 writel(data, dev->regs_base + reg);
236}
237
238static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
239{
240 return readl(dev->regs_base + reg);
241}
242
243static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
244{
245 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
246 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
247 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
248
249 if (dev->flags & FLAGS_CBC) {
250 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
251 hdr ^= SAHARA_HDR_PARITY_BIT;
252 }
253
254 if (dev->flags & FLAGS_ENCRYPT) {
255 hdr |= SAHARA_HDR_SKHA_OP_ENC;
256 hdr ^= SAHARA_HDR_PARITY_BIT;
257 }
258
259 return hdr;
260}
261
262static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
263{
264 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
265 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
266}
267
268static const char *sahara_err_src[16] = {
269 "No error",
270 "Header error",
271 "Descriptor length error",
272 "Descriptor length or pointer error",
273 "Link length error",
274 "Link pointer error",
275 "Input buffer error",
276 "Output buffer error",
277 "Output buffer starvation",
278 "Internal state fault",
279 "General descriptor problem",
280 "Reserved",
281 "Descriptor address error",
282 "Link address error",
283 "CHA error",
284 "DMA error"
285};
286
287static const char *sahara_err_dmasize[4] = {
288 "Byte transfer",
289 "Half-word transfer",
290 "Word transfer",
291 "Reserved"
292};
293
294static const char *sahara_err_dmasrc[8] = {
295 "No error",
296 "AHB bus error",
297 "Internal IP bus error",
298 "Parity error",
299 "DMA crosses 256 byte boundary",
300 "DMA is busy",
301 "Reserved",
302 "DMA HW error"
303};
304
305static const char *sahara_cha_errsrc[12] = {
306 "Input buffer non-empty",
307 "Illegal address",
308 "Illegal mode",
309 "Illegal data size",
310 "Illegal key size",
311 "Write during processing",
312 "CTX read during processing",
313 "HW error",
314 "Input buffer disabled/underflow",
315 "Output buffer disabled/overflow",
316 "DES key parity error",
317 "Reserved"
318};
319
320static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
321
322static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
323{
324 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
325 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
326
327 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
328
329 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
330
331 if (source == SAHARA_ERRSOURCE_DMA) {
332 if (error & SAHARA_ERRSTATUS_DMA_DIR)
333 dev_err(dev->device, " * DMA read.\n");
334 else
335 dev_err(dev->device, " * DMA write.\n");
336
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
339 dev_err(dev->device, " * %s.\n",
340 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
341 } else if (source == SAHARA_ERRSOURCE_CHA) {
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_errsrc[chasrc]);
344 dev_err(dev->device, " * %s.\n",
345 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
346 }
347 dev_err(dev->device, "\n");
348}
349
350static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
351
352static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
353{
354 u8 state;
355
356 if (!__is_defined(DEBUG))
357 return;
358
359 state = SAHARA_STATUS_GET_STATE(status);
360
361 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
362 __func__, status);
363
364 dev_dbg(dev->device, " - State = %d:\n", state);
365 if (state & SAHARA_STATE_COMP_FLAG)
366 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
367
368 dev_dbg(dev->device, " * %s.\n",
369 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
370
371 if (status & SAHARA_STATUS_DAR_FULL)
372 dev_dbg(dev->device, " - DAR Full.\n");
373 if (status & SAHARA_STATUS_ERROR)
374 dev_dbg(dev->device, " - Error.\n");
375 if (status & SAHARA_STATUS_SECURE)
376 dev_dbg(dev->device, " - Secure.\n");
377 if (status & SAHARA_STATUS_FAIL)
378 dev_dbg(dev->device, " - Fail.\n");
379 if (status & SAHARA_STATUS_RNG_RESEED)
380 dev_dbg(dev->device, " - RNG Reseed Request.\n");
381 if (status & SAHARA_STATUS_ACTIVE_RNG)
382 dev_dbg(dev->device, " - RNG Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_MDHA)
384 dev_dbg(dev->device, " - MDHA Active.\n");
385 if (status & SAHARA_STATUS_ACTIVE_SKHA)
386 dev_dbg(dev->device, " - SKHA Active.\n");
387
388 if (status & SAHARA_STATUS_MODE_BATCH)
389 dev_dbg(dev->device, " - Batch Mode.\n");
390 else if (status & SAHARA_STATUS_MODE_DEDICATED)
391 dev_dbg(dev->device, " - Dedicated Mode.\n");
392 else if (status & SAHARA_STATUS_MODE_DEBUG)
393 dev_dbg(dev->device, " - Debug Mode.\n");
394
395 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
396 SAHARA_STATUS_GET_ISTATE(status));
397
398 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
399 sahara_read(dev, SAHARA_REG_CDAR));
400 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
401 sahara_read(dev, SAHARA_REG_IDAR));
402}
403
404static void sahara_dump_descriptors(struct sahara_dev *dev)
405{
406 int i;
407
408 if (!__is_defined(DEBUG))
409 return;
410
411 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
412 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
413 i, &dev->hw_phys_desc[i]);
414 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
415 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
416 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
417 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
418 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
419 dev_dbg(dev->device, "\tnext = 0x%08x\n",
420 dev->hw_desc[i]->next);
421 }
422 dev_dbg(dev->device, "\n");
423}
424
425static void sahara_dump_links(struct sahara_dev *dev)
426{
427 int i;
428
429 if (!__is_defined(DEBUG))
430 return;
431
432 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
433 dev_dbg(dev->device, "Link (%d) (%pad):\n",
434 i, &dev->hw_phys_link[i]);
435 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
436 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
437 dev_dbg(dev->device, "\tnext = 0x%08x\n",
438 dev->hw_link[i]->next);
439 }
440 dev_dbg(dev->device, "\n");
441}
442
443static int sahara_hw_descriptor_create(struct sahara_dev *dev)
444{
445 struct sahara_ctx *ctx = dev->ctx;
446 struct scatterlist *sg;
447 int ret;
448 int i, j;
449 int idx = 0;
450
451 /* Copy new key if necessary */
452 if (ctx->flags & FLAGS_NEW_KEY) {
453 memcpy(dev->key_base, ctx->key, ctx->keylen);
454 ctx->flags &= ~FLAGS_NEW_KEY;
455
456 if (dev->flags & FLAGS_CBC) {
457 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
458 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
459 } else {
460 dev->hw_desc[idx]->len1 = 0;
461 dev->hw_desc[idx]->p1 = 0;
462 }
463 dev->hw_desc[idx]->len2 = ctx->keylen;
464 dev->hw_desc[idx]->p2 = dev->key_phys_base;
465 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
466
467 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
468
469 idx++;
470 }
471
472 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
473 if (dev->nb_in_sg < 0) {
474 dev_err(dev->device, "Invalid numbers of src SG.\n");
475 return dev->nb_in_sg;
476 }
477 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
478 if (dev->nb_out_sg < 0) {
479 dev_err(dev->device, "Invalid numbers of dst SG.\n");
480 return dev->nb_out_sg;
481 }
482 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
483 dev_err(dev->device, "not enough hw links (%d)\n",
484 dev->nb_in_sg + dev->nb_out_sg);
485 return -EINVAL;
486 }
487
488 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
489 DMA_TO_DEVICE);
490 if (ret != dev->nb_in_sg) {
491 dev_err(dev->device, "couldn't map in sg\n");
492 goto unmap_in;
493 }
494 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
495 DMA_FROM_DEVICE);
496 if (ret != dev->nb_out_sg) {
497 dev_err(dev->device, "couldn't map out sg\n");
498 goto unmap_out;
499 }
500
501 /* Create input links */
502 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
503 sg = dev->in_sg;
504 for (i = 0; i < dev->nb_in_sg; i++) {
505 dev->hw_link[i]->len = sg->length;
506 dev->hw_link[i]->p = sg->dma_address;
507 if (i == (dev->nb_in_sg - 1)) {
508 dev->hw_link[i]->next = 0;
509 } else {
510 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
511 sg = sg_next(sg);
512 }
513 }
514
515 /* Create output links */
516 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
517 sg = dev->out_sg;
518 for (j = i; j < dev->nb_out_sg + i; j++) {
519 dev->hw_link[j]->len = sg->length;
520 dev->hw_link[j]->p = sg->dma_address;
521 if (j == (dev->nb_out_sg + i - 1)) {
522 dev->hw_link[j]->next = 0;
523 } else {
524 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
525 sg = sg_next(sg);
526 }
527 }
528
529 /* Fill remaining fields of hw_desc[1] */
530 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
531 dev->hw_desc[idx]->len1 = dev->total;
532 dev->hw_desc[idx]->len2 = dev->total;
533 dev->hw_desc[idx]->next = 0;
534
535 sahara_dump_descriptors(dev);
536 sahara_dump_links(dev);
537
538 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
539
540 return 0;
541
542unmap_out:
543 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
544 DMA_FROM_DEVICE);
545unmap_in:
546 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
547 DMA_TO_DEVICE);
548
549 return -EINVAL;
550}
551
552static int sahara_aes_process(struct skcipher_request *req)
553{
554 struct sahara_dev *dev = dev_ptr;
555 struct sahara_ctx *ctx;
556 struct sahara_aes_reqctx *rctx;
557 int ret;
558 unsigned long timeout;
559
560 /* Request is ready to be dispatched by the device */
561 dev_dbg(dev->device,
562 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
563 req->cryptlen, req->src, req->dst);
564
565 /* assign new request to device */
566 dev->total = req->cryptlen;
567 dev->in_sg = req->src;
568 dev->out_sg = req->dst;
569
570 rctx = skcipher_request_ctx(req);
571 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
572 rctx->mode &= FLAGS_MODE_MASK;
573 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
574
575 if ((dev->flags & FLAGS_CBC) && req->iv)
576 memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
577
578 /* assign new context to device */
579 dev->ctx = ctx;
580
581 reinit_completion(&dev->dma_completion);
582
583 ret = sahara_hw_descriptor_create(dev);
584 if (ret)
585 return -EINVAL;
586
587 timeout = wait_for_completion_timeout(&dev->dma_completion,
588 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
589 if (!timeout) {
590 dev_err(dev->device, "AES timeout\n");
591 return -ETIMEDOUT;
592 }
593
594 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
595 DMA_FROM_DEVICE);
596 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
597 DMA_TO_DEVICE);
598
599 return 0;
600}
601
602static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
603 unsigned int keylen)
604{
605 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
606
607 ctx->keylen = keylen;
608
609 /* SAHARA only supports 128bit keys */
610 if (keylen == AES_KEYSIZE_128) {
611 memcpy(ctx->key, key, keylen);
612 ctx->flags |= FLAGS_NEW_KEY;
613 return 0;
614 }
615
616 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
617 return -EINVAL;
618
619 /*
620 * The requested key size is not supported by HW, do a fallback.
621 */
622 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
623 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
624 CRYPTO_TFM_REQ_MASK);
625 return crypto_skcipher_setkey(ctx->fallback, key, keylen);
626}
627
628static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
629{
630 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
631 struct sahara_dev *dev = dev_ptr;
632 int err = 0;
633
634 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
635 req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
636
637 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
638 dev_err(dev->device,
639 "request size is not exact amount of AES blocks\n");
640 return -EINVAL;
641 }
642
643 rctx->mode = mode;
644
645 mutex_lock(&dev->queue_mutex);
646 err = crypto_enqueue_request(&dev->queue, &req->base);
647 mutex_unlock(&dev->queue_mutex);
648
649 wake_up_process(dev->kthread);
650
651 return err;
652}
653
654static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
655{
656 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
657 struct sahara_ctx *ctx = crypto_skcipher_ctx(
658 crypto_skcipher_reqtfm(req));
659
660 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
661 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
662 skcipher_request_set_callback(&rctx->fallback_req,
663 req->base.flags,
664 req->base.complete,
665 req->base.data);
666 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
667 req->dst, req->cryptlen, req->iv);
668 return crypto_skcipher_encrypt(&rctx->fallback_req);
669 }
670
671 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
672}
673
674static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
675{
676 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
677 struct sahara_ctx *ctx = crypto_skcipher_ctx(
678 crypto_skcipher_reqtfm(req));
679
680 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
681 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
682 skcipher_request_set_callback(&rctx->fallback_req,
683 req->base.flags,
684 req->base.complete,
685 req->base.data);
686 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
687 req->dst, req->cryptlen, req->iv);
688 return crypto_skcipher_decrypt(&rctx->fallback_req);
689 }
690
691 return sahara_aes_crypt(req, 0);
692}
693
694static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
695{
696 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
697 struct sahara_ctx *ctx = crypto_skcipher_ctx(
698 crypto_skcipher_reqtfm(req));
699
700 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
701 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
702 skcipher_request_set_callback(&rctx->fallback_req,
703 req->base.flags,
704 req->base.complete,
705 req->base.data);
706 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
707 req->dst, req->cryptlen, req->iv);
708 return crypto_skcipher_encrypt(&rctx->fallback_req);
709 }
710
711 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
712}
713
714static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
715{
716 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
717 struct sahara_ctx *ctx = crypto_skcipher_ctx(
718 crypto_skcipher_reqtfm(req));
719
720 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
721 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
722 skcipher_request_set_callback(&rctx->fallback_req,
723 req->base.flags,
724 req->base.complete,
725 req->base.data);
726 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
727 req->dst, req->cryptlen, req->iv);
728 return crypto_skcipher_decrypt(&rctx->fallback_req);
729 }
730
731 return sahara_aes_crypt(req, FLAGS_CBC);
732}
733
734static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
735{
736 const char *name = crypto_tfm_alg_name(&tfm->base);
737 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
738
739 ctx->fallback = crypto_alloc_skcipher(name, 0,
740 CRYPTO_ALG_NEED_FALLBACK);
741 if (IS_ERR(ctx->fallback)) {
742 pr_err("Error allocating fallback algo %s\n", name);
743 return PTR_ERR(ctx->fallback);
744 }
745
746 crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
747 crypto_skcipher_reqsize(ctx->fallback));
748
749 return 0;
750}
751
752static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
753{
754 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
755
756 crypto_free_skcipher(ctx->fallback);
757}
758
759static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
760 struct sahara_sha_reqctx *rctx)
761{
762 u32 hdr = 0;
763
764 hdr = rctx->mode;
765
766 if (rctx->first) {
767 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
768 hdr |= SAHARA_HDR_MDHA_INIT;
769 } else {
770 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
771 }
772
773 if (rctx->last)
774 hdr |= SAHARA_HDR_MDHA_PDATA;
775
776 if (hweight_long(hdr) % 2 == 0)
777 hdr |= SAHARA_HDR_PARITY_BIT;
778
779 return hdr;
780}
781
782static int sahara_sha_hw_links_create(struct sahara_dev *dev,
783 struct sahara_sha_reqctx *rctx,
784 int start)
785{
786 struct scatterlist *sg;
787 unsigned int i;
788 int ret;
789
790 dev->in_sg = rctx->in_sg;
791
792 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
793 if (dev->nb_in_sg < 0) {
794 dev_err(dev->device, "Invalid numbers of src SG.\n");
795 return dev->nb_in_sg;
796 }
797 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
798 dev_err(dev->device, "not enough hw links (%d)\n",
799 dev->nb_in_sg + dev->nb_out_sg);
800 return -EINVAL;
801 }
802
803 sg = dev->in_sg;
804 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
805 if (!ret)
806 return -EFAULT;
807
808 for (i = start; i < dev->nb_in_sg + start; i++) {
809 dev->hw_link[i]->len = sg->length;
810 dev->hw_link[i]->p = sg->dma_address;
811 if (i == (dev->nb_in_sg + start - 1)) {
812 dev->hw_link[i]->next = 0;
813 } else {
814 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
815 sg = sg_next(sg);
816 }
817 }
818
819 return i;
820}
821
822static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
823 struct sahara_sha_reqctx *rctx,
824 struct ahash_request *req,
825 int index)
826{
827 unsigned result_len;
828 int i = index;
829
830 if (rctx->first)
831 /* Create initial descriptor: #8*/
832 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
833 else
834 /* Create hash descriptor: #10. Must follow #6. */
835 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
836
837 dev->hw_desc[index]->len1 = rctx->total;
838 if (dev->hw_desc[index]->len1 == 0) {
839 /* if len1 is 0, p1 must be 0, too */
840 dev->hw_desc[index]->p1 = 0;
841 rctx->sg_in_idx = 0;
842 } else {
843 /* Create input links */
844 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
845 i = sahara_sha_hw_links_create(dev, rctx, index);
846
847 rctx->sg_in_idx = index;
848 if (i < 0)
849 return i;
850 }
851
852 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
853
854 /* Save the context for the next operation */
855 result_len = rctx->context_size;
856 dev->hw_link[i]->p = dev->context_phys_base;
857
858 dev->hw_link[i]->len = result_len;
859 dev->hw_desc[index]->len2 = result_len;
860
861 dev->hw_link[i]->next = 0;
862
863 return 0;
864}
865
866/*
867 * Load descriptor aka #6
868 *
869 * To load a previously saved context back to the MDHA unit
870 *
871 * p1: Saved Context
872 * p2: NULL
873 *
874 */
875static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
876 struct sahara_sha_reqctx *rctx,
877 struct ahash_request *req,
878 int index)
879{
880 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
881
882 dev->hw_desc[index]->len1 = rctx->context_size;
883 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
884 dev->hw_desc[index]->len2 = 0;
885 dev->hw_desc[index]->p2 = 0;
886
887 dev->hw_link[index]->len = rctx->context_size;
888 dev->hw_link[index]->p = dev->context_phys_base;
889 dev->hw_link[index]->next = 0;
890
891 return 0;
892}
893
894static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
895{
896 if (!sg || !sg->length)
897 return nbytes;
898
899 while (nbytes && sg) {
900 if (nbytes <= sg->length) {
901 sg->length = nbytes;
902 sg_mark_end(sg);
903 break;
904 }
905 nbytes -= sg->length;
906 sg = sg_next(sg);
907 }
908
909 return nbytes;
910}
911
912static int sahara_sha_prepare_request(struct ahash_request *req)
913{
914 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
915 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
916 unsigned int hash_later;
917 unsigned int block_size;
918 unsigned int len;
919
920 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
921
922 /* append bytes from previous operation */
923 len = rctx->buf_cnt + req->nbytes;
924
925 /* only the last transfer can be padded in hardware */
926 if (!rctx->last && (len < block_size)) {
927 /* to few data, save for next operation */
928 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
929 0, req->nbytes, 0);
930 rctx->buf_cnt += req->nbytes;
931
932 return 0;
933 }
934
935 /* add data from previous operation first */
936 if (rctx->buf_cnt)
937 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
938
939 /* data must always be a multiple of block_size */
940 hash_later = rctx->last ? 0 : len & (block_size - 1);
941 if (hash_later) {
942 unsigned int offset = req->nbytes - hash_later;
943 /* Save remaining bytes for later use */
944 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
945 hash_later, 0);
946 }
947
948 /* nbytes should now be multiple of blocksize */
949 req->nbytes = req->nbytes - hash_later;
950
951 sahara_walk_and_recalc(req->src, req->nbytes);
952
953 /* have data from previous operation and current */
954 if (rctx->buf_cnt && req->nbytes) {
955 sg_init_table(rctx->in_sg_chain, 2);
956 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
957
958 sg_chain(rctx->in_sg_chain, 2, req->src);
959
960 rctx->total = req->nbytes + rctx->buf_cnt;
961 rctx->in_sg = rctx->in_sg_chain;
962
963 req->src = rctx->in_sg_chain;
964 /* only data from previous operation */
965 } else if (rctx->buf_cnt) {
966 if (req->src)
967 rctx->in_sg = req->src;
968 else
969 rctx->in_sg = rctx->in_sg_chain;
970 /* buf was copied into rembuf above */
971 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
972 rctx->total = rctx->buf_cnt;
973 /* no data from previous operation */
974 } else {
975 rctx->in_sg = req->src;
976 rctx->total = req->nbytes;
977 req->src = rctx->in_sg;
978 }
979
980 /* on next call, we only have the remaining data in the buffer */
981 rctx->buf_cnt = hash_later;
982
983 return -EINPROGRESS;
984}
985
986static int sahara_sha_process(struct ahash_request *req)
987{
988 struct sahara_dev *dev = dev_ptr;
989 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
990 int ret;
991 unsigned long timeout;
992
993 ret = sahara_sha_prepare_request(req);
994 if (!ret)
995 return ret;
996
997 if (rctx->first) {
998 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
999 dev->hw_desc[0]->next = 0;
1000 rctx->first = 0;
1001 } else {
1002 memcpy(dev->context_base, rctx->context, rctx->context_size);
1003
1004 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1005 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1006 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1007 dev->hw_desc[1]->next = 0;
1008 }
1009
1010 sahara_dump_descriptors(dev);
1011 sahara_dump_links(dev);
1012
1013 reinit_completion(&dev->dma_completion);
1014
1015 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1016
1017 timeout = wait_for_completion_timeout(&dev->dma_completion,
1018 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1019 if (!timeout) {
1020 dev_err(dev->device, "SHA timeout\n");
1021 return -ETIMEDOUT;
1022 }
1023
1024 if (rctx->sg_in_idx)
1025 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1026 DMA_TO_DEVICE);
1027
1028 memcpy(rctx->context, dev->context_base, rctx->context_size);
1029
1030 if (req->result)
1031 memcpy(req->result, rctx->context, rctx->digest_size);
1032
1033 return 0;
1034}
1035
1036static int sahara_queue_manage(void *data)
1037{
1038 struct sahara_dev *dev = (struct sahara_dev *)data;
1039 struct crypto_async_request *async_req;
1040 struct crypto_async_request *backlog;
1041 int ret = 0;
1042
1043 do {
1044 __set_current_state(TASK_INTERRUPTIBLE);
1045
1046 mutex_lock(&dev->queue_mutex);
1047 backlog = crypto_get_backlog(&dev->queue);
1048 async_req = crypto_dequeue_request(&dev->queue);
1049 mutex_unlock(&dev->queue_mutex);
1050
1051 if (backlog)
1052 backlog->complete(backlog, -EINPROGRESS);
1053
1054 if (async_req) {
1055 if (crypto_tfm_alg_type(async_req->tfm) ==
1056 CRYPTO_ALG_TYPE_AHASH) {
1057 struct ahash_request *req =
1058 ahash_request_cast(async_req);
1059
1060 ret = sahara_sha_process(req);
1061 } else {
1062 struct skcipher_request *req =
1063 skcipher_request_cast(async_req);
1064
1065 ret = sahara_aes_process(req);
1066 }
1067
1068 async_req->complete(async_req, ret);
1069
1070 continue;
1071 }
1072
1073 schedule();
1074 } while (!kthread_should_stop());
1075
1076 return 0;
1077}
1078
1079static int sahara_sha_enqueue(struct ahash_request *req, int last)
1080{
1081 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1082 struct sahara_dev *dev = dev_ptr;
1083 int ret;
1084
1085 if (!req->nbytes && !last)
1086 return 0;
1087
1088 rctx->last = last;
1089
1090 if (!rctx->active) {
1091 rctx->active = 1;
1092 rctx->first = 1;
1093 }
1094
1095 mutex_lock(&dev->queue_mutex);
1096 ret = crypto_enqueue_request(&dev->queue, &req->base);
1097 mutex_unlock(&dev->queue_mutex);
1098
1099 wake_up_process(dev->kthread);
1100
1101 return ret;
1102}
1103
1104static int sahara_sha_init(struct ahash_request *req)
1105{
1106 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1107 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1108
1109 memset(rctx, 0, sizeof(*rctx));
1110
1111 switch (crypto_ahash_digestsize(tfm)) {
1112 case SHA1_DIGEST_SIZE:
1113 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1114 rctx->digest_size = SHA1_DIGEST_SIZE;
1115 break;
1116 case SHA256_DIGEST_SIZE:
1117 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1118 rctx->digest_size = SHA256_DIGEST_SIZE;
1119 break;
1120 default:
1121 return -EINVAL;
1122 }
1123
1124 rctx->context_size = rctx->digest_size + 4;
1125 rctx->active = 0;
1126
1127 return 0;
1128}
1129
1130static int sahara_sha_update(struct ahash_request *req)
1131{
1132 return sahara_sha_enqueue(req, 0);
1133}
1134
1135static int sahara_sha_final(struct ahash_request *req)
1136{
1137 req->nbytes = 0;
1138 return sahara_sha_enqueue(req, 1);
1139}
1140
1141static int sahara_sha_finup(struct ahash_request *req)
1142{
1143 return sahara_sha_enqueue(req, 1);
1144}
1145
1146static int sahara_sha_digest(struct ahash_request *req)
1147{
1148 sahara_sha_init(req);
1149
1150 return sahara_sha_finup(req);
1151}
1152
1153static int sahara_sha_export(struct ahash_request *req, void *out)
1154{
1155 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1156
1157 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1158
1159 return 0;
1160}
1161
1162static int sahara_sha_import(struct ahash_request *req, const void *in)
1163{
1164 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1165
1166 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1167
1168 return 0;
1169}
1170
1171static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1172{
1173 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1174 sizeof(struct sahara_sha_reqctx) +
1175 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1176
1177 return 0;
1178}
1179
1180static struct skcipher_alg aes_algs[] = {
1181{
1182 .base.cra_name = "ecb(aes)",
1183 .base.cra_driver_name = "sahara-ecb-aes",
1184 .base.cra_priority = 300,
1185 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1186 .base.cra_blocksize = AES_BLOCK_SIZE,
1187 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1188 .base.cra_alignmask = 0x0,
1189 .base.cra_module = THIS_MODULE,
1190
1191 .init = sahara_aes_init_tfm,
1192 .exit = sahara_aes_exit_tfm,
1193 .min_keysize = AES_MIN_KEY_SIZE ,
1194 .max_keysize = AES_MAX_KEY_SIZE,
1195 .setkey = sahara_aes_setkey,
1196 .encrypt = sahara_aes_ecb_encrypt,
1197 .decrypt = sahara_aes_ecb_decrypt,
1198}, {
1199 .base.cra_name = "cbc(aes)",
1200 .base.cra_driver_name = "sahara-cbc-aes",
1201 .base.cra_priority = 300,
1202 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1203 .base.cra_blocksize = AES_BLOCK_SIZE,
1204 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1205 .base.cra_alignmask = 0x0,
1206 .base.cra_module = THIS_MODULE,
1207
1208 .init = sahara_aes_init_tfm,
1209 .exit = sahara_aes_exit_tfm,
1210 .min_keysize = AES_MIN_KEY_SIZE ,
1211 .max_keysize = AES_MAX_KEY_SIZE,
1212 .ivsize = AES_BLOCK_SIZE,
1213 .setkey = sahara_aes_setkey,
1214 .encrypt = sahara_aes_cbc_encrypt,
1215 .decrypt = sahara_aes_cbc_decrypt,
1216}
1217};
1218
1219static struct ahash_alg sha_v3_algs[] = {
1220{
1221 .init = sahara_sha_init,
1222 .update = sahara_sha_update,
1223 .final = sahara_sha_final,
1224 .finup = sahara_sha_finup,
1225 .digest = sahara_sha_digest,
1226 .export = sahara_sha_export,
1227 .import = sahara_sha_import,
1228 .halg.digestsize = SHA1_DIGEST_SIZE,
1229 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1230 .halg.base = {
1231 .cra_name = "sha1",
1232 .cra_driver_name = "sahara-sha1",
1233 .cra_priority = 300,
1234 .cra_flags = CRYPTO_ALG_ASYNC |
1235 CRYPTO_ALG_NEED_FALLBACK,
1236 .cra_blocksize = SHA1_BLOCK_SIZE,
1237 .cra_ctxsize = sizeof(struct sahara_ctx),
1238 .cra_alignmask = 0,
1239 .cra_module = THIS_MODULE,
1240 .cra_init = sahara_sha_cra_init,
1241 }
1242},
1243};
1244
1245static struct ahash_alg sha_v4_algs[] = {
1246{
1247 .init = sahara_sha_init,
1248 .update = sahara_sha_update,
1249 .final = sahara_sha_final,
1250 .finup = sahara_sha_finup,
1251 .digest = sahara_sha_digest,
1252 .export = sahara_sha_export,
1253 .import = sahara_sha_import,
1254 .halg.digestsize = SHA256_DIGEST_SIZE,
1255 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1256 .halg.base = {
1257 .cra_name = "sha256",
1258 .cra_driver_name = "sahara-sha256",
1259 .cra_priority = 300,
1260 .cra_flags = CRYPTO_ALG_ASYNC |
1261 CRYPTO_ALG_NEED_FALLBACK,
1262 .cra_blocksize = SHA256_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct sahara_ctx),
1264 .cra_alignmask = 0,
1265 .cra_module = THIS_MODULE,
1266 .cra_init = sahara_sha_cra_init,
1267 }
1268},
1269};
1270
1271static irqreturn_t sahara_irq_handler(int irq, void *data)
1272{
1273 struct sahara_dev *dev = (struct sahara_dev *)data;
1274 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1275 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1276
1277 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1278 SAHARA_REG_CMD);
1279
1280 sahara_decode_status(dev, stat);
1281
1282 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1283 return IRQ_NONE;
1284 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1285 dev->error = 0;
1286 } else {
1287 sahara_decode_error(dev, err);
1288 dev->error = -EINVAL;
1289 }
1290
1291 complete(&dev->dma_completion);
1292
1293 return IRQ_HANDLED;
1294}
1295
1296
1297static int sahara_register_algs(struct sahara_dev *dev)
1298{
1299 int err;
1300 unsigned int i, j, k, l;
1301
1302 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1303 err = crypto_register_skcipher(&aes_algs[i]);
1304 if (err)
1305 goto err_aes_algs;
1306 }
1307
1308 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1309 err = crypto_register_ahash(&sha_v3_algs[k]);
1310 if (err)
1311 goto err_sha_v3_algs;
1312 }
1313
1314 if (dev->version > SAHARA_VERSION_3)
1315 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1316 err = crypto_register_ahash(&sha_v4_algs[l]);
1317 if (err)
1318 goto err_sha_v4_algs;
1319 }
1320
1321 return 0;
1322
1323err_sha_v4_algs:
1324 for (j = 0; j < l; j++)
1325 crypto_unregister_ahash(&sha_v4_algs[j]);
1326
1327err_sha_v3_algs:
1328 for (j = 0; j < k; j++)
1329 crypto_unregister_ahash(&sha_v3_algs[j]);
1330
1331err_aes_algs:
1332 for (j = 0; j < i; j++)
1333 crypto_unregister_skcipher(&aes_algs[j]);
1334
1335 return err;
1336}
1337
1338static void sahara_unregister_algs(struct sahara_dev *dev)
1339{
1340 unsigned int i;
1341
1342 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1343 crypto_unregister_skcipher(&aes_algs[i]);
1344
1345 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1346 crypto_unregister_ahash(&sha_v3_algs[i]);
1347
1348 if (dev->version > SAHARA_VERSION_3)
1349 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1350 crypto_unregister_ahash(&sha_v4_algs[i]);
1351}
1352
1353static const struct of_device_id sahara_dt_ids[] = {
1354 { .compatible = "fsl,imx53-sahara" },
1355 { .compatible = "fsl,imx27-sahara" },
1356 { /* sentinel */ }
1357};
1358MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1359
1360static int sahara_probe(struct platform_device *pdev)
1361{
1362 struct sahara_dev *dev;
1363 u32 version;
1364 int irq;
1365 int err;
1366 int i;
1367
1368 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1369 if (!dev)
1370 return -ENOMEM;
1371
1372 dev->device = &pdev->dev;
1373 platform_set_drvdata(pdev, dev);
1374
1375 /* Get the base address */
1376 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1377 if (IS_ERR(dev->regs_base))
1378 return PTR_ERR(dev->regs_base);
1379
1380 /* Get the IRQ */
1381 irq = platform_get_irq(pdev, 0);
1382 if (irq < 0)
1383 return irq;
1384
1385 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1386 0, dev_name(&pdev->dev), dev);
1387 if (err) {
1388 dev_err(&pdev->dev, "failed to request irq\n");
1389 return err;
1390 }
1391
1392 /* clocks */
1393 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1394 if (IS_ERR(dev->clk_ipg)) {
1395 dev_err(&pdev->dev, "Could not get ipg clock\n");
1396 return PTR_ERR(dev->clk_ipg);
1397 }
1398
1399 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1400 if (IS_ERR(dev->clk_ahb)) {
1401 dev_err(&pdev->dev, "Could not get ahb clock\n");
1402 return PTR_ERR(dev->clk_ahb);
1403 }
1404
1405 /* Allocate HW descriptors */
1406 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1407 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1408 &dev->hw_phys_desc[0], GFP_KERNEL);
1409 if (!dev->hw_desc[0]) {
1410 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1411 return -ENOMEM;
1412 }
1413 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1414 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1415 sizeof(struct sahara_hw_desc);
1416
1417 /* Allocate space for iv and key */
1418 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1419 &dev->key_phys_base, GFP_KERNEL);
1420 if (!dev->key_base) {
1421 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1422 return -ENOMEM;
1423 }
1424 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1425 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1426
1427 /* Allocate space for context: largest digest + message length field */
1428 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1429 SHA256_DIGEST_SIZE + 4,
1430 &dev->context_phys_base, GFP_KERNEL);
1431 if (!dev->context_base) {
1432 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1433 return -ENOMEM;
1434 }
1435
1436 /* Allocate space for HW links */
1437 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1438 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1439 &dev->hw_phys_link[0], GFP_KERNEL);
1440 if (!dev->hw_link[0]) {
1441 dev_err(&pdev->dev, "Could not allocate hw links\n");
1442 return -ENOMEM;
1443 }
1444 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1445 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1446 sizeof(struct sahara_hw_link);
1447 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1448 }
1449
1450 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1451
1452 mutex_init(&dev->queue_mutex);
1453
1454 dev_ptr = dev;
1455
1456 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1457 if (IS_ERR(dev->kthread)) {
1458 return PTR_ERR(dev->kthread);
1459 }
1460
1461 init_completion(&dev->dma_completion);
1462
1463 err = clk_prepare_enable(dev->clk_ipg);
1464 if (err)
1465 return err;
1466 err = clk_prepare_enable(dev->clk_ahb);
1467 if (err)
1468 goto clk_ipg_disable;
1469
1470 version = sahara_read(dev, SAHARA_REG_VERSION);
1471 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1472 if (version != SAHARA_VERSION_3)
1473 err = -ENODEV;
1474 } else if (of_device_is_compatible(pdev->dev.of_node,
1475 "fsl,imx53-sahara")) {
1476 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1477 err = -ENODEV;
1478 version = (version >> 8) & 0xff;
1479 }
1480 if (err == -ENODEV) {
1481 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1482 version);
1483 goto err_algs;
1484 }
1485
1486 dev->version = version;
1487
1488 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1489 SAHARA_REG_CMD);
1490 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1491 SAHARA_CONTROL_SET_MAXBURST(8) |
1492 SAHARA_CONTROL_RNG_AUTORSD |
1493 SAHARA_CONTROL_ENABLE_INT,
1494 SAHARA_REG_CONTROL);
1495
1496 err = sahara_register_algs(dev);
1497 if (err)
1498 goto err_algs;
1499
1500 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1501
1502 return 0;
1503
1504err_algs:
1505 kthread_stop(dev->kthread);
1506 dev_ptr = NULL;
1507 clk_disable_unprepare(dev->clk_ahb);
1508clk_ipg_disable:
1509 clk_disable_unprepare(dev->clk_ipg);
1510
1511 return err;
1512}
1513
1514static int sahara_remove(struct platform_device *pdev)
1515{
1516 struct sahara_dev *dev = platform_get_drvdata(pdev);
1517
1518 kthread_stop(dev->kthread);
1519
1520 sahara_unregister_algs(dev);
1521
1522 clk_disable_unprepare(dev->clk_ipg);
1523 clk_disable_unprepare(dev->clk_ahb);
1524
1525 dev_ptr = NULL;
1526
1527 return 0;
1528}
1529
1530static struct platform_driver sahara_driver = {
1531 .probe = sahara_probe,
1532 .remove = sahara_remove,
1533 .driver = {
1534 .name = SAHARA_NAME,
1535 .of_match_table = sahara_dt_ids,
1536 },
1537};
1538
1539module_platform_driver(sahara_driver);
1540
1541MODULE_LICENSE("GPL");
1542MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1543MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1544MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");