Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
11 * Based on omap-aes.c and tegra-aes.c
12 */
13
14#include <crypto/aes.h>
15#include <crypto/internal/hash.h>
16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/sha.h>
19
20#include <linux/clk.h>
21#include <linux/crypto.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/kthread.h>
27#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/platform_device.h>
32
33#define SHA_BUFFER_LEN PAGE_SIZE
34#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
35
36#define SAHARA_NAME "sahara"
37#define SAHARA_VERSION_3 3
38#define SAHARA_VERSION_4 4
39#define SAHARA_TIMEOUT_MS 1000
40#define SAHARA_MAX_HW_DESC 2
41#define SAHARA_MAX_HW_LINK 20
42
43#define FLAGS_MODE_MASK 0x000f
44#define FLAGS_ENCRYPT BIT(0)
45#define FLAGS_CBC BIT(1)
46#define FLAGS_NEW_KEY BIT(3)
47
48#define SAHARA_HDR_BASE 0x00800000
49#define SAHARA_HDR_SKHA_ALG_AES 0
50#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
51#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
52#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
53#define SAHARA_HDR_FORM_DATA (5 << 16)
54#define SAHARA_HDR_FORM_KEY (8 << 16)
55#define SAHARA_HDR_LLO (1 << 24)
56#define SAHARA_HDR_CHA_SKHA (1 << 28)
57#define SAHARA_HDR_CHA_MDHA (2 << 28)
58#define SAHARA_HDR_PARITY_BIT (1 << 31)
59
60#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
61#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
62#define SAHARA_HDR_MDHA_HASH 0xA0850000
63#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
64#define SAHARA_HDR_MDHA_ALG_SHA1 0
65#define SAHARA_HDR_MDHA_ALG_MD5 1
66#define SAHARA_HDR_MDHA_ALG_SHA256 2
67#define SAHARA_HDR_MDHA_ALG_SHA224 3
68#define SAHARA_HDR_MDHA_PDATA (1 << 2)
69#define SAHARA_HDR_MDHA_HMAC (1 << 3)
70#define SAHARA_HDR_MDHA_INIT (1 << 5)
71#define SAHARA_HDR_MDHA_IPAD (1 << 6)
72#define SAHARA_HDR_MDHA_OPAD (1 << 7)
73#define SAHARA_HDR_MDHA_SWAP (1 << 8)
74#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
75#define SAHARA_HDR_MDHA_SSL (1 << 10)
76
77/* SAHARA can only process one request at a time */
78#define SAHARA_QUEUE_LENGTH 1
79
80#define SAHARA_REG_VERSION 0x00
81#define SAHARA_REG_DAR 0x04
82#define SAHARA_REG_CONTROL 0x08
83#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
84#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
85#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
86#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
87#define SAHARA_REG_CMD 0x0C
88#define SAHARA_CMD_RESET (1 << 0)
89#define SAHARA_CMD_CLEAR_INT (1 << 8)
90#define SAHARA_CMD_CLEAR_ERR (1 << 9)
91#define SAHARA_CMD_SINGLE_STEP (1 << 10)
92#define SAHARA_CMD_MODE_BATCH (1 << 16)
93#define SAHARA_CMD_MODE_DEBUG (1 << 18)
94#define SAHARA_REG_STATUS 0x10
95#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
96#define SAHARA_STATE_IDLE 0
97#define SAHARA_STATE_BUSY 1
98#define SAHARA_STATE_ERR 2
99#define SAHARA_STATE_FAULT 3
100#define SAHARA_STATE_COMPLETE 4
101#define SAHARA_STATE_COMP_FLAG (1 << 2)
102#define SAHARA_STATUS_DAR_FULL (1 << 3)
103#define SAHARA_STATUS_ERROR (1 << 4)
104#define SAHARA_STATUS_SECURE (1 << 5)
105#define SAHARA_STATUS_FAIL (1 << 6)
106#define SAHARA_STATUS_INIT (1 << 7)
107#define SAHARA_STATUS_RNG_RESEED (1 << 8)
108#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
109#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
110#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
111#define SAHARA_STATUS_MODE_BATCH (1 << 16)
112#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
113#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
114#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
115#define SAHARA_REG_ERRSTATUS 0x14
116#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
117#define SAHARA_ERRSOURCE_CHA 14
118#define SAHARA_ERRSOURCE_DMA 15
119#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
120#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
121#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
122#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
123#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
124#define SAHARA_REG_FADDR 0x18
125#define SAHARA_REG_CDAR 0x1C
126#define SAHARA_REG_IDAR 0x20
127
128struct sahara_hw_desc {
129 u32 hdr;
130 u32 len1;
131 u32 p1;
132 u32 len2;
133 u32 p2;
134 u32 next;
135};
136
137struct sahara_hw_link {
138 u32 len;
139 u32 p;
140 u32 next;
141};
142
143struct sahara_ctx {
144 unsigned long flags;
145
146 /* AES-specific context */
147 int keylen;
148 u8 key[AES_KEYSIZE_128];
149 struct crypto_skcipher *fallback;
150};
151
152struct sahara_aes_reqctx {
153 unsigned long mode;
154 struct skcipher_request fallback_req; // keep at the end
155};
156
157/*
158 * struct sahara_sha_reqctx - private data per request
159 * @buf: holds data for requests smaller than block_size
160 * @rembuf: used to prepare one block_size-aligned request
161 * @context: hw-specific context for request. Digest is extracted from this
162 * @mode: specifies what type of hw-descriptor needs to be built
163 * @digest_size: length of digest for this request
164 * @context_size: length of hw-context for this request.
165 * Always digest_size + 4
166 * @buf_cnt: number of bytes saved in buf
167 * @sg_in_idx: number of hw links
168 * @in_sg: scatterlist for input data
169 * @in_sg_chain: scatterlists for chained input data
170 * @total: total number of bytes for transfer
171 * @last: is this the last block
172 * @first: is this the first block
173 * @active: inside a transfer
174 */
175struct sahara_sha_reqctx {
176 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
177 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
178 u8 context[SHA256_DIGEST_SIZE + 4];
179 unsigned int mode;
180 unsigned int digest_size;
181 unsigned int context_size;
182 unsigned int buf_cnt;
183 unsigned int sg_in_idx;
184 struct scatterlist *in_sg;
185 struct scatterlist in_sg_chain[2];
186 size_t total;
187 unsigned int last;
188 unsigned int first;
189 unsigned int active;
190};
191
192struct sahara_dev {
193 struct device *device;
194 unsigned int version;
195 void __iomem *regs_base;
196 struct clk *clk_ipg;
197 struct clk *clk_ahb;
198 struct mutex queue_mutex;
199 struct task_struct *kthread;
200 struct completion dma_completion;
201
202 struct sahara_ctx *ctx;
203 struct crypto_queue queue;
204 unsigned long flags;
205
206 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
207 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
208
209 u8 *key_base;
210 dma_addr_t key_phys_base;
211
212 u8 *iv_base;
213 dma_addr_t iv_phys_base;
214
215 u8 *context_base;
216 dma_addr_t context_phys_base;
217
218 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
219 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
220
221 size_t total;
222 struct scatterlist *in_sg;
223 int nb_in_sg;
224 struct scatterlist *out_sg;
225 int nb_out_sg;
226
227 u32 error;
228};
229
230static struct sahara_dev *dev_ptr;
231
232static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
233{
234 writel(data, dev->regs_base + reg);
235}
236
237static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
238{
239 return readl(dev->regs_base + reg);
240}
241
242static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
243{
244 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
245 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
246 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
247
248 if (dev->flags & FLAGS_CBC) {
249 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
250 hdr ^= SAHARA_HDR_PARITY_BIT;
251 }
252
253 if (dev->flags & FLAGS_ENCRYPT) {
254 hdr |= SAHARA_HDR_SKHA_OP_ENC;
255 hdr ^= SAHARA_HDR_PARITY_BIT;
256 }
257
258 return hdr;
259}
260
261static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
262{
263 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
264 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
265}
266
267static const char *sahara_err_src[16] = {
268 "No error",
269 "Header error",
270 "Descriptor length error",
271 "Descriptor length or pointer error",
272 "Link length error",
273 "Link pointer error",
274 "Input buffer error",
275 "Output buffer error",
276 "Output buffer starvation",
277 "Internal state fault",
278 "General descriptor problem",
279 "Reserved",
280 "Descriptor address error",
281 "Link address error",
282 "CHA error",
283 "DMA error"
284};
285
286static const char *sahara_err_dmasize[4] = {
287 "Byte transfer",
288 "Half-word transfer",
289 "Word transfer",
290 "Reserved"
291};
292
293static const char *sahara_err_dmasrc[8] = {
294 "No error",
295 "AHB bus error",
296 "Internal IP bus error",
297 "Parity error",
298 "DMA crosses 256 byte boundary",
299 "DMA is busy",
300 "Reserved",
301 "DMA HW error"
302};
303
304static const char *sahara_cha_errsrc[12] = {
305 "Input buffer non-empty",
306 "Illegal address",
307 "Illegal mode",
308 "Illegal data size",
309 "Illegal key size",
310 "Write during processing",
311 "CTX read during processing",
312 "HW error",
313 "Input buffer disabled/underflow",
314 "Output buffer disabled/overflow",
315 "DES key parity error",
316 "Reserved"
317};
318
319static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
320
321static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
322{
323 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
324 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
325
326 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
327
328 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
329
330 if (source == SAHARA_ERRSOURCE_DMA) {
331 if (error & SAHARA_ERRSTATUS_DMA_DIR)
332 dev_err(dev->device, " * DMA read.\n");
333 else
334 dev_err(dev->device, " * DMA write.\n");
335
336 dev_err(dev->device, " * %s.\n",
337 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
338 dev_err(dev->device, " * %s.\n",
339 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
340 } else if (source == SAHARA_ERRSOURCE_CHA) {
341 dev_err(dev->device, " * %s.\n",
342 sahara_cha_errsrc[chasrc]);
343 dev_err(dev->device, " * %s.\n",
344 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
345 }
346 dev_err(dev->device, "\n");
347}
348
349static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
350
351static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
352{
353 u8 state;
354
355 if (!__is_defined(DEBUG))
356 return;
357
358 state = SAHARA_STATUS_GET_STATE(status);
359
360 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
361 __func__, status);
362
363 dev_dbg(dev->device, " - State = %d:\n", state);
364 if (state & SAHARA_STATE_COMP_FLAG)
365 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
366
367 dev_dbg(dev->device, " * %s.\n",
368 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
369
370 if (status & SAHARA_STATUS_DAR_FULL)
371 dev_dbg(dev->device, " - DAR Full.\n");
372 if (status & SAHARA_STATUS_ERROR)
373 dev_dbg(dev->device, " - Error.\n");
374 if (status & SAHARA_STATUS_SECURE)
375 dev_dbg(dev->device, " - Secure.\n");
376 if (status & SAHARA_STATUS_FAIL)
377 dev_dbg(dev->device, " - Fail.\n");
378 if (status & SAHARA_STATUS_RNG_RESEED)
379 dev_dbg(dev->device, " - RNG Reseed Request.\n");
380 if (status & SAHARA_STATUS_ACTIVE_RNG)
381 dev_dbg(dev->device, " - RNG Active.\n");
382 if (status & SAHARA_STATUS_ACTIVE_MDHA)
383 dev_dbg(dev->device, " - MDHA Active.\n");
384 if (status & SAHARA_STATUS_ACTIVE_SKHA)
385 dev_dbg(dev->device, " - SKHA Active.\n");
386
387 if (status & SAHARA_STATUS_MODE_BATCH)
388 dev_dbg(dev->device, " - Batch Mode.\n");
389 else if (status & SAHARA_STATUS_MODE_DEDICATED)
390 dev_dbg(dev->device, " - Dedicated Mode.\n");
391 else if (status & SAHARA_STATUS_MODE_DEBUG)
392 dev_dbg(dev->device, " - Debug Mode.\n");
393
394 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
395 SAHARA_STATUS_GET_ISTATE(status));
396
397 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
398 sahara_read(dev, SAHARA_REG_CDAR));
399 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
400 sahara_read(dev, SAHARA_REG_IDAR));
401}
402
403static void sahara_dump_descriptors(struct sahara_dev *dev)
404{
405 int i;
406
407 if (!__is_defined(DEBUG))
408 return;
409
410 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
411 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
412 i, &dev->hw_phys_desc[i]);
413 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
414 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
415 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
416 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
417 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
418 dev_dbg(dev->device, "\tnext = 0x%08x\n",
419 dev->hw_desc[i]->next);
420 }
421 dev_dbg(dev->device, "\n");
422}
423
424static void sahara_dump_links(struct sahara_dev *dev)
425{
426 int i;
427
428 if (!__is_defined(DEBUG))
429 return;
430
431 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
432 dev_dbg(dev->device, "Link (%d) (%pad):\n",
433 i, &dev->hw_phys_link[i]);
434 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
435 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
436 dev_dbg(dev->device, "\tnext = 0x%08x\n",
437 dev->hw_link[i]->next);
438 }
439 dev_dbg(dev->device, "\n");
440}
441
442static int sahara_hw_descriptor_create(struct sahara_dev *dev)
443{
444 struct sahara_ctx *ctx = dev->ctx;
445 struct scatterlist *sg;
446 int ret;
447 int i, j;
448 int idx = 0;
449
450 /* Copy new key if necessary */
451 if (ctx->flags & FLAGS_NEW_KEY) {
452 memcpy(dev->key_base, ctx->key, ctx->keylen);
453 ctx->flags &= ~FLAGS_NEW_KEY;
454
455 if (dev->flags & FLAGS_CBC) {
456 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
457 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
458 } else {
459 dev->hw_desc[idx]->len1 = 0;
460 dev->hw_desc[idx]->p1 = 0;
461 }
462 dev->hw_desc[idx]->len2 = ctx->keylen;
463 dev->hw_desc[idx]->p2 = dev->key_phys_base;
464 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
465
466 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
467
468 idx++;
469 }
470
471 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
472 if (dev->nb_in_sg < 0) {
473 dev_err(dev->device, "Invalid numbers of src SG.\n");
474 return dev->nb_in_sg;
475 }
476 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
477 if (dev->nb_out_sg < 0) {
478 dev_err(dev->device, "Invalid numbers of dst SG.\n");
479 return dev->nb_out_sg;
480 }
481 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
482 dev_err(dev->device, "not enough hw links (%d)\n",
483 dev->nb_in_sg + dev->nb_out_sg);
484 return -EINVAL;
485 }
486
487 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
488 DMA_TO_DEVICE);
489 if (ret != dev->nb_in_sg) {
490 dev_err(dev->device, "couldn't map in sg\n");
491 goto unmap_in;
492 }
493 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
494 DMA_FROM_DEVICE);
495 if (ret != dev->nb_out_sg) {
496 dev_err(dev->device, "couldn't map out sg\n");
497 goto unmap_out;
498 }
499
500 /* Create input links */
501 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
502 sg = dev->in_sg;
503 for (i = 0; i < dev->nb_in_sg; i++) {
504 dev->hw_link[i]->len = sg->length;
505 dev->hw_link[i]->p = sg->dma_address;
506 if (i == (dev->nb_in_sg - 1)) {
507 dev->hw_link[i]->next = 0;
508 } else {
509 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
510 sg = sg_next(sg);
511 }
512 }
513
514 /* Create output links */
515 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
516 sg = dev->out_sg;
517 for (j = i; j < dev->nb_out_sg + i; j++) {
518 dev->hw_link[j]->len = sg->length;
519 dev->hw_link[j]->p = sg->dma_address;
520 if (j == (dev->nb_out_sg + i - 1)) {
521 dev->hw_link[j]->next = 0;
522 } else {
523 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
524 sg = sg_next(sg);
525 }
526 }
527
528 /* Fill remaining fields of hw_desc[1] */
529 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
530 dev->hw_desc[idx]->len1 = dev->total;
531 dev->hw_desc[idx]->len2 = dev->total;
532 dev->hw_desc[idx]->next = 0;
533
534 sahara_dump_descriptors(dev);
535 sahara_dump_links(dev);
536
537 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
538
539 return 0;
540
541unmap_out:
542 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
543 DMA_FROM_DEVICE);
544unmap_in:
545 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
546 DMA_TO_DEVICE);
547
548 return -EINVAL;
549}
550
551static int sahara_aes_process(struct skcipher_request *req)
552{
553 struct sahara_dev *dev = dev_ptr;
554 struct sahara_ctx *ctx;
555 struct sahara_aes_reqctx *rctx;
556 int ret;
557 unsigned long timeout;
558
559 /* Request is ready to be dispatched by the device */
560 dev_dbg(dev->device,
561 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
562 req->cryptlen, req->src, req->dst);
563
564 /* assign new request to device */
565 dev->total = req->cryptlen;
566 dev->in_sg = req->src;
567 dev->out_sg = req->dst;
568
569 rctx = skcipher_request_ctx(req);
570 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
571 rctx->mode &= FLAGS_MODE_MASK;
572 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
573
574 if ((dev->flags & FLAGS_CBC) && req->iv)
575 memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
576
577 /* assign new context to device */
578 dev->ctx = ctx;
579
580 reinit_completion(&dev->dma_completion);
581
582 ret = sahara_hw_descriptor_create(dev);
583 if (ret)
584 return -EINVAL;
585
586 timeout = wait_for_completion_timeout(&dev->dma_completion,
587 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
588 if (!timeout) {
589 dev_err(dev->device, "AES timeout\n");
590 return -ETIMEDOUT;
591 }
592
593 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
594 DMA_FROM_DEVICE);
595 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
596 DMA_TO_DEVICE);
597
598 return 0;
599}
600
601static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
602 unsigned int keylen)
603{
604 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
605
606 ctx->keylen = keylen;
607
608 /* SAHARA only supports 128bit keys */
609 if (keylen == AES_KEYSIZE_128) {
610 memcpy(ctx->key, key, keylen);
611 ctx->flags |= FLAGS_NEW_KEY;
612 return 0;
613 }
614
615 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
616 return -EINVAL;
617
618 /*
619 * The requested key size is not supported by HW, do a fallback.
620 */
621 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
622 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
623 CRYPTO_TFM_REQ_MASK);
624 return crypto_skcipher_setkey(ctx->fallback, key, keylen);
625}
626
627static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
628{
629 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
630 struct sahara_dev *dev = dev_ptr;
631 int err = 0;
632
633 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
634 req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
635
636 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
637 dev_err(dev->device,
638 "request size is not exact amount of AES blocks\n");
639 return -EINVAL;
640 }
641
642 rctx->mode = mode;
643
644 mutex_lock(&dev->queue_mutex);
645 err = crypto_enqueue_request(&dev->queue, &req->base);
646 mutex_unlock(&dev->queue_mutex);
647
648 wake_up_process(dev->kthread);
649
650 return err;
651}
652
653static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
654{
655 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
656 struct sahara_ctx *ctx = crypto_skcipher_ctx(
657 crypto_skcipher_reqtfm(req));
658
659 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
660 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
661 skcipher_request_set_callback(&rctx->fallback_req,
662 req->base.flags,
663 req->base.complete,
664 req->base.data);
665 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
666 req->dst, req->cryptlen, req->iv);
667 return crypto_skcipher_encrypt(&rctx->fallback_req);
668 }
669
670 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
671}
672
673static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
674{
675 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
676 struct sahara_ctx *ctx = crypto_skcipher_ctx(
677 crypto_skcipher_reqtfm(req));
678
679 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
680 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
681 skcipher_request_set_callback(&rctx->fallback_req,
682 req->base.flags,
683 req->base.complete,
684 req->base.data);
685 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
686 req->dst, req->cryptlen, req->iv);
687 return crypto_skcipher_decrypt(&rctx->fallback_req);
688 }
689
690 return sahara_aes_crypt(req, 0);
691}
692
693static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
694{
695 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
696 struct sahara_ctx *ctx = crypto_skcipher_ctx(
697 crypto_skcipher_reqtfm(req));
698
699 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
700 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
701 skcipher_request_set_callback(&rctx->fallback_req,
702 req->base.flags,
703 req->base.complete,
704 req->base.data);
705 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
706 req->dst, req->cryptlen, req->iv);
707 return crypto_skcipher_encrypt(&rctx->fallback_req);
708 }
709
710 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
711}
712
713static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
714{
715 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
716 struct sahara_ctx *ctx = crypto_skcipher_ctx(
717 crypto_skcipher_reqtfm(req));
718
719 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
720 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
721 skcipher_request_set_callback(&rctx->fallback_req,
722 req->base.flags,
723 req->base.complete,
724 req->base.data);
725 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
726 req->dst, req->cryptlen, req->iv);
727 return crypto_skcipher_decrypt(&rctx->fallback_req);
728 }
729
730 return sahara_aes_crypt(req, FLAGS_CBC);
731}
732
733static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
734{
735 const char *name = crypto_tfm_alg_name(&tfm->base);
736 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
737
738 ctx->fallback = crypto_alloc_skcipher(name, 0,
739 CRYPTO_ALG_NEED_FALLBACK);
740 if (IS_ERR(ctx->fallback)) {
741 pr_err("Error allocating fallback algo %s\n", name);
742 return PTR_ERR(ctx->fallback);
743 }
744
745 crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
746 crypto_skcipher_reqsize(ctx->fallback));
747
748 return 0;
749}
750
751static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
752{
753 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
754
755 crypto_free_skcipher(ctx->fallback);
756}
757
758static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
759 struct sahara_sha_reqctx *rctx)
760{
761 u32 hdr = 0;
762
763 hdr = rctx->mode;
764
765 if (rctx->first) {
766 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
767 hdr |= SAHARA_HDR_MDHA_INIT;
768 } else {
769 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
770 }
771
772 if (rctx->last)
773 hdr |= SAHARA_HDR_MDHA_PDATA;
774
775 if (hweight_long(hdr) % 2 == 0)
776 hdr |= SAHARA_HDR_PARITY_BIT;
777
778 return hdr;
779}
780
781static int sahara_sha_hw_links_create(struct sahara_dev *dev,
782 struct sahara_sha_reqctx *rctx,
783 int start)
784{
785 struct scatterlist *sg;
786 unsigned int i;
787 int ret;
788
789 dev->in_sg = rctx->in_sg;
790
791 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
792 if (dev->nb_in_sg < 0) {
793 dev_err(dev->device, "Invalid numbers of src SG.\n");
794 return dev->nb_in_sg;
795 }
796 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
797 dev_err(dev->device, "not enough hw links (%d)\n",
798 dev->nb_in_sg + dev->nb_out_sg);
799 return -EINVAL;
800 }
801
802 sg = dev->in_sg;
803 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
804 if (!ret)
805 return -EFAULT;
806
807 for (i = start; i < dev->nb_in_sg + start; i++) {
808 dev->hw_link[i]->len = sg->length;
809 dev->hw_link[i]->p = sg->dma_address;
810 if (i == (dev->nb_in_sg + start - 1)) {
811 dev->hw_link[i]->next = 0;
812 } else {
813 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
814 sg = sg_next(sg);
815 }
816 }
817
818 return i;
819}
820
821static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
822 struct sahara_sha_reqctx *rctx,
823 struct ahash_request *req,
824 int index)
825{
826 unsigned result_len;
827 int i = index;
828
829 if (rctx->first)
830 /* Create initial descriptor: #8*/
831 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
832 else
833 /* Create hash descriptor: #10. Must follow #6. */
834 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
835
836 dev->hw_desc[index]->len1 = rctx->total;
837 if (dev->hw_desc[index]->len1 == 0) {
838 /* if len1 is 0, p1 must be 0, too */
839 dev->hw_desc[index]->p1 = 0;
840 rctx->sg_in_idx = 0;
841 } else {
842 /* Create input links */
843 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
844 i = sahara_sha_hw_links_create(dev, rctx, index);
845
846 rctx->sg_in_idx = index;
847 if (i < 0)
848 return i;
849 }
850
851 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
852
853 /* Save the context for the next operation */
854 result_len = rctx->context_size;
855 dev->hw_link[i]->p = dev->context_phys_base;
856
857 dev->hw_link[i]->len = result_len;
858 dev->hw_desc[index]->len2 = result_len;
859
860 dev->hw_link[i]->next = 0;
861
862 return 0;
863}
864
865/*
866 * Load descriptor aka #6
867 *
868 * To load a previously saved context back to the MDHA unit
869 *
870 * p1: Saved Context
871 * p2: NULL
872 *
873 */
874static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
875 struct sahara_sha_reqctx *rctx,
876 struct ahash_request *req,
877 int index)
878{
879 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
880
881 dev->hw_desc[index]->len1 = rctx->context_size;
882 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
883 dev->hw_desc[index]->len2 = 0;
884 dev->hw_desc[index]->p2 = 0;
885
886 dev->hw_link[index]->len = rctx->context_size;
887 dev->hw_link[index]->p = dev->context_phys_base;
888 dev->hw_link[index]->next = 0;
889
890 return 0;
891}
892
893static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
894{
895 if (!sg || !sg->length)
896 return nbytes;
897
898 while (nbytes && sg) {
899 if (nbytes <= sg->length) {
900 sg->length = nbytes;
901 sg_mark_end(sg);
902 break;
903 }
904 nbytes -= sg->length;
905 sg = sg_next(sg);
906 }
907
908 return nbytes;
909}
910
911static int sahara_sha_prepare_request(struct ahash_request *req)
912{
913 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
914 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
915 unsigned int hash_later;
916 unsigned int block_size;
917 unsigned int len;
918
919 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
920
921 /* append bytes from previous operation */
922 len = rctx->buf_cnt + req->nbytes;
923
924 /* only the last transfer can be padded in hardware */
925 if (!rctx->last && (len < block_size)) {
926 /* to few data, save for next operation */
927 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
928 0, req->nbytes, 0);
929 rctx->buf_cnt += req->nbytes;
930
931 return 0;
932 }
933
934 /* add data from previous operation first */
935 if (rctx->buf_cnt)
936 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
937
938 /* data must always be a multiple of block_size */
939 hash_later = rctx->last ? 0 : len & (block_size - 1);
940 if (hash_later) {
941 unsigned int offset = req->nbytes - hash_later;
942 /* Save remaining bytes for later use */
943 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
944 hash_later, 0);
945 }
946
947 /* nbytes should now be multiple of blocksize */
948 req->nbytes = req->nbytes - hash_later;
949
950 sahara_walk_and_recalc(req->src, req->nbytes);
951
952 /* have data from previous operation and current */
953 if (rctx->buf_cnt && req->nbytes) {
954 sg_init_table(rctx->in_sg_chain, 2);
955 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
956
957 sg_chain(rctx->in_sg_chain, 2, req->src);
958
959 rctx->total = req->nbytes + rctx->buf_cnt;
960 rctx->in_sg = rctx->in_sg_chain;
961
962 req->src = rctx->in_sg_chain;
963 /* only data from previous operation */
964 } else if (rctx->buf_cnt) {
965 if (req->src)
966 rctx->in_sg = req->src;
967 else
968 rctx->in_sg = rctx->in_sg_chain;
969 /* buf was copied into rembuf above */
970 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
971 rctx->total = rctx->buf_cnt;
972 /* no data from previous operation */
973 } else {
974 rctx->in_sg = req->src;
975 rctx->total = req->nbytes;
976 req->src = rctx->in_sg;
977 }
978
979 /* on next call, we only have the remaining data in the buffer */
980 rctx->buf_cnt = hash_later;
981
982 return -EINPROGRESS;
983}
984
985static int sahara_sha_process(struct ahash_request *req)
986{
987 struct sahara_dev *dev = dev_ptr;
988 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
989 int ret;
990 unsigned long timeout;
991
992 ret = sahara_sha_prepare_request(req);
993 if (!ret)
994 return ret;
995
996 if (rctx->first) {
997 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
998 dev->hw_desc[0]->next = 0;
999 rctx->first = 0;
1000 } else {
1001 memcpy(dev->context_base, rctx->context, rctx->context_size);
1002
1003 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1004 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1005 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1006 dev->hw_desc[1]->next = 0;
1007 }
1008
1009 sahara_dump_descriptors(dev);
1010 sahara_dump_links(dev);
1011
1012 reinit_completion(&dev->dma_completion);
1013
1014 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1015
1016 timeout = wait_for_completion_timeout(&dev->dma_completion,
1017 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1018 if (!timeout) {
1019 dev_err(dev->device, "SHA timeout\n");
1020 return -ETIMEDOUT;
1021 }
1022
1023 if (rctx->sg_in_idx)
1024 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1025 DMA_TO_DEVICE);
1026
1027 memcpy(rctx->context, dev->context_base, rctx->context_size);
1028
1029 if (req->result)
1030 memcpy(req->result, rctx->context, rctx->digest_size);
1031
1032 return 0;
1033}
1034
1035static int sahara_queue_manage(void *data)
1036{
1037 struct sahara_dev *dev = (struct sahara_dev *)data;
1038 struct crypto_async_request *async_req;
1039 struct crypto_async_request *backlog;
1040 int ret = 0;
1041
1042 do {
1043 __set_current_state(TASK_INTERRUPTIBLE);
1044
1045 mutex_lock(&dev->queue_mutex);
1046 backlog = crypto_get_backlog(&dev->queue);
1047 async_req = crypto_dequeue_request(&dev->queue);
1048 mutex_unlock(&dev->queue_mutex);
1049
1050 if (backlog)
1051 backlog->complete(backlog, -EINPROGRESS);
1052
1053 if (async_req) {
1054 if (crypto_tfm_alg_type(async_req->tfm) ==
1055 CRYPTO_ALG_TYPE_AHASH) {
1056 struct ahash_request *req =
1057 ahash_request_cast(async_req);
1058
1059 ret = sahara_sha_process(req);
1060 } else {
1061 struct skcipher_request *req =
1062 skcipher_request_cast(async_req);
1063
1064 ret = sahara_aes_process(req);
1065 }
1066
1067 async_req->complete(async_req, ret);
1068
1069 continue;
1070 }
1071
1072 schedule();
1073 } while (!kthread_should_stop());
1074
1075 return 0;
1076}
1077
1078static int sahara_sha_enqueue(struct ahash_request *req, int last)
1079{
1080 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1081 struct sahara_dev *dev = dev_ptr;
1082 int ret;
1083
1084 if (!req->nbytes && !last)
1085 return 0;
1086
1087 rctx->last = last;
1088
1089 if (!rctx->active) {
1090 rctx->active = 1;
1091 rctx->first = 1;
1092 }
1093
1094 mutex_lock(&dev->queue_mutex);
1095 ret = crypto_enqueue_request(&dev->queue, &req->base);
1096 mutex_unlock(&dev->queue_mutex);
1097
1098 wake_up_process(dev->kthread);
1099
1100 return ret;
1101}
1102
1103static int sahara_sha_init(struct ahash_request *req)
1104{
1105 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1106 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1107
1108 memset(rctx, 0, sizeof(*rctx));
1109
1110 switch (crypto_ahash_digestsize(tfm)) {
1111 case SHA1_DIGEST_SIZE:
1112 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1113 rctx->digest_size = SHA1_DIGEST_SIZE;
1114 break;
1115 case SHA256_DIGEST_SIZE:
1116 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1117 rctx->digest_size = SHA256_DIGEST_SIZE;
1118 break;
1119 default:
1120 return -EINVAL;
1121 }
1122
1123 rctx->context_size = rctx->digest_size + 4;
1124 rctx->active = 0;
1125
1126 return 0;
1127}
1128
1129static int sahara_sha_update(struct ahash_request *req)
1130{
1131 return sahara_sha_enqueue(req, 0);
1132}
1133
1134static int sahara_sha_final(struct ahash_request *req)
1135{
1136 req->nbytes = 0;
1137 return sahara_sha_enqueue(req, 1);
1138}
1139
1140static int sahara_sha_finup(struct ahash_request *req)
1141{
1142 return sahara_sha_enqueue(req, 1);
1143}
1144
1145static int sahara_sha_digest(struct ahash_request *req)
1146{
1147 sahara_sha_init(req);
1148
1149 return sahara_sha_finup(req);
1150}
1151
1152static int sahara_sha_export(struct ahash_request *req, void *out)
1153{
1154 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1155
1156 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1157
1158 return 0;
1159}
1160
1161static int sahara_sha_import(struct ahash_request *req, const void *in)
1162{
1163 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1164
1165 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1166
1167 return 0;
1168}
1169
1170static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1171{
1172 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1173 sizeof(struct sahara_sha_reqctx) +
1174 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1175
1176 return 0;
1177}
1178
1179static struct skcipher_alg aes_algs[] = {
1180{
1181 .base.cra_name = "ecb(aes)",
1182 .base.cra_driver_name = "sahara-ecb-aes",
1183 .base.cra_priority = 300,
1184 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1185 .base.cra_blocksize = AES_BLOCK_SIZE,
1186 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1187 .base.cra_alignmask = 0x0,
1188 .base.cra_module = THIS_MODULE,
1189
1190 .init = sahara_aes_init_tfm,
1191 .exit = sahara_aes_exit_tfm,
1192 .min_keysize = AES_MIN_KEY_SIZE ,
1193 .max_keysize = AES_MAX_KEY_SIZE,
1194 .setkey = sahara_aes_setkey,
1195 .encrypt = sahara_aes_ecb_encrypt,
1196 .decrypt = sahara_aes_ecb_decrypt,
1197}, {
1198 .base.cra_name = "cbc(aes)",
1199 .base.cra_driver_name = "sahara-cbc-aes",
1200 .base.cra_priority = 300,
1201 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1202 .base.cra_blocksize = AES_BLOCK_SIZE,
1203 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1204 .base.cra_alignmask = 0x0,
1205 .base.cra_module = THIS_MODULE,
1206
1207 .init = sahara_aes_init_tfm,
1208 .exit = sahara_aes_exit_tfm,
1209 .min_keysize = AES_MIN_KEY_SIZE ,
1210 .max_keysize = AES_MAX_KEY_SIZE,
1211 .ivsize = AES_BLOCK_SIZE,
1212 .setkey = sahara_aes_setkey,
1213 .encrypt = sahara_aes_cbc_encrypt,
1214 .decrypt = sahara_aes_cbc_decrypt,
1215}
1216};
1217
1218static struct ahash_alg sha_v3_algs[] = {
1219{
1220 .init = sahara_sha_init,
1221 .update = sahara_sha_update,
1222 .final = sahara_sha_final,
1223 .finup = sahara_sha_finup,
1224 .digest = sahara_sha_digest,
1225 .export = sahara_sha_export,
1226 .import = sahara_sha_import,
1227 .halg.digestsize = SHA1_DIGEST_SIZE,
1228 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1229 .halg.base = {
1230 .cra_name = "sha1",
1231 .cra_driver_name = "sahara-sha1",
1232 .cra_priority = 300,
1233 .cra_flags = CRYPTO_ALG_ASYNC |
1234 CRYPTO_ALG_NEED_FALLBACK,
1235 .cra_blocksize = SHA1_BLOCK_SIZE,
1236 .cra_ctxsize = sizeof(struct sahara_ctx),
1237 .cra_alignmask = 0,
1238 .cra_module = THIS_MODULE,
1239 .cra_init = sahara_sha_cra_init,
1240 }
1241},
1242};
1243
1244static struct ahash_alg sha_v4_algs[] = {
1245{
1246 .init = sahara_sha_init,
1247 .update = sahara_sha_update,
1248 .final = sahara_sha_final,
1249 .finup = sahara_sha_finup,
1250 .digest = sahara_sha_digest,
1251 .export = sahara_sha_export,
1252 .import = sahara_sha_import,
1253 .halg.digestsize = SHA256_DIGEST_SIZE,
1254 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1255 .halg.base = {
1256 .cra_name = "sha256",
1257 .cra_driver_name = "sahara-sha256",
1258 .cra_priority = 300,
1259 .cra_flags = CRYPTO_ALG_ASYNC |
1260 CRYPTO_ALG_NEED_FALLBACK,
1261 .cra_blocksize = SHA256_BLOCK_SIZE,
1262 .cra_ctxsize = sizeof(struct sahara_ctx),
1263 .cra_alignmask = 0,
1264 .cra_module = THIS_MODULE,
1265 .cra_init = sahara_sha_cra_init,
1266 }
1267},
1268};
1269
1270static irqreturn_t sahara_irq_handler(int irq, void *data)
1271{
1272 struct sahara_dev *dev = (struct sahara_dev *)data;
1273 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1274 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1275
1276 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1277 SAHARA_REG_CMD);
1278
1279 sahara_decode_status(dev, stat);
1280
1281 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1282 return IRQ_NONE;
1283 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1284 dev->error = 0;
1285 } else {
1286 sahara_decode_error(dev, err);
1287 dev->error = -EINVAL;
1288 }
1289
1290 complete(&dev->dma_completion);
1291
1292 return IRQ_HANDLED;
1293}
1294
1295
1296static int sahara_register_algs(struct sahara_dev *dev)
1297{
1298 int err;
1299 unsigned int i, j, k, l;
1300
1301 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1302 err = crypto_register_skcipher(&aes_algs[i]);
1303 if (err)
1304 goto err_aes_algs;
1305 }
1306
1307 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1308 err = crypto_register_ahash(&sha_v3_algs[k]);
1309 if (err)
1310 goto err_sha_v3_algs;
1311 }
1312
1313 if (dev->version > SAHARA_VERSION_3)
1314 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1315 err = crypto_register_ahash(&sha_v4_algs[l]);
1316 if (err)
1317 goto err_sha_v4_algs;
1318 }
1319
1320 return 0;
1321
1322err_sha_v4_algs:
1323 for (j = 0; j < l; j++)
1324 crypto_unregister_ahash(&sha_v4_algs[j]);
1325
1326err_sha_v3_algs:
1327 for (j = 0; j < k; j++)
1328 crypto_unregister_ahash(&sha_v3_algs[j]);
1329
1330err_aes_algs:
1331 for (j = 0; j < i; j++)
1332 crypto_unregister_skcipher(&aes_algs[j]);
1333
1334 return err;
1335}
1336
1337static void sahara_unregister_algs(struct sahara_dev *dev)
1338{
1339 unsigned int i;
1340
1341 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1342 crypto_unregister_skcipher(&aes_algs[i]);
1343
1344 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1345 crypto_unregister_ahash(&sha_v3_algs[i]);
1346
1347 if (dev->version > SAHARA_VERSION_3)
1348 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1349 crypto_unregister_ahash(&sha_v4_algs[i]);
1350}
1351
1352static const struct platform_device_id sahara_platform_ids[] = {
1353 { .name = "sahara-imx27" },
1354 { /* sentinel */ }
1355};
1356MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1357
1358static const struct of_device_id sahara_dt_ids[] = {
1359 { .compatible = "fsl,imx53-sahara" },
1360 { .compatible = "fsl,imx27-sahara" },
1361 { /* sentinel */ }
1362};
1363MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1364
1365static int sahara_probe(struct platform_device *pdev)
1366{
1367 struct sahara_dev *dev;
1368 u32 version;
1369 int irq;
1370 int err;
1371 int i;
1372
1373 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1374 if (!dev)
1375 return -ENOMEM;
1376
1377 dev->device = &pdev->dev;
1378 platform_set_drvdata(pdev, dev);
1379
1380 /* Get the base address */
1381 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1382 if (IS_ERR(dev->regs_base))
1383 return PTR_ERR(dev->regs_base);
1384
1385 /* Get the IRQ */
1386 irq = platform_get_irq(pdev, 0);
1387 if (irq < 0)
1388 return irq;
1389
1390 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1391 0, dev_name(&pdev->dev), dev);
1392 if (err) {
1393 dev_err(&pdev->dev, "failed to request irq\n");
1394 return err;
1395 }
1396
1397 /* clocks */
1398 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1399 if (IS_ERR(dev->clk_ipg)) {
1400 dev_err(&pdev->dev, "Could not get ipg clock\n");
1401 return PTR_ERR(dev->clk_ipg);
1402 }
1403
1404 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1405 if (IS_ERR(dev->clk_ahb)) {
1406 dev_err(&pdev->dev, "Could not get ahb clock\n");
1407 return PTR_ERR(dev->clk_ahb);
1408 }
1409
1410 /* Allocate HW descriptors */
1411 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1412 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1413 &dev->hw_phys_desc[0], GFP_KERNEL);
1414 if (!dev->hw_desc[0]) {
1415 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1416 return -ENOMEM;
1417 }
1418 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1419 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1420 sizeof(struct sahara_hw_desc);
1421
1422 /* Allocate space for iv and key */
1423 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1424 &dev->key_phys_base, GFP_KERNEL);
1425 if (!dev->key_base) {
1426 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1427 return -ENOMEM;
1428 }
1429 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1430 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1431
1432 /* Allocate space for context: largest digest + message length field */
1433 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1434 SHA256_DIGEST_SIZE + 4,
1435 &dev->context_phys_base, GFP_KERNEL);
1436 if (!dev->context_base) {
1437 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1438 return -ENOMEM;
1439 }
1440
1441 /* Allocate space for HW links */
1442 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1443 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1444 &dev->hw_phys_link[0], GFP_KERNEL);
1445 if (!dev->hw_link[0]) {
1446 dev_err(&pdev->dev, "Could not allocate hw links\n");
1447 return -ENOMEM;
1448 }
1449 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1450 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1451 sizeof(struct sahara_hw_link);
1452 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1453 }
1454
1455 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1456
1457 mutex_init(&dev->queue_mutex);
1458
1459 dev_ptr = dev;
1460
1461 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1462 if (IS_ERR(dev->kthread)) {
1463 return PTR_ERR(dev->kthread);
1464 }
1465
1466 init_completion(&dev->dma_completion);
1467
1468 err = clk_prepare_enable(dev->clk_ipg);
1469 if (err)
1470 return err;
1471 err = clk_prepare_enable(dev->clk_ahb);
1472 if (err)
1473 goto clk_ipg_disable;
1474
1475 version = sahara_read(dev, SAHARA_REG_VERSION);
1476 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1477 if (version != SAHARA_VERSION_3)
1478 err = -ENODEV;
1479 } else if (of_device_is_compatible(pdev->dev.of_node,
1480 "fsl,imx53-sahara")) {
1481 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1482 err = -ENODEV;
1483 version = (version >> 8) & 0xff;
1484 }
1485 if (err == -ENODEV) {
1486 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1487 version);
1488 goto err_algs;
1489 }
1490
1491 dev->version = version;
1492
1493 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1494 SAHARA_REG_CMD);
1495 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1496 SAHARA_CONTROL_SET_MAXBURST(8) |
1497 SAHARA_CONTROL_RNG_AUTORSD |
1498 SAHARA_CONTROL_ENABLE_INT,
1499 SAHARA_REG_CONTROL);
1500
1501 err = sahara_register_algs(dev);
1502 if (err)
1503 goto err_algs;
1504
1505 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1506
1507 return 0;
1508
1509err_algs:
1510 kthread_stop(dev->kthread);
1511 dev_ptr = NULL;
1512 clk_disable_unprepare(dev->clk_ahb);
1513clk_ipg_disable:
1514 clk_disable_unprepare(dev->clk_ipg);
1515
1516 return err;
1517}
1518
1519static int sahara_remove(struct platform_device *pdev)
1520{
1521 struct sahara_dev *dev = platform_get_drvdata(pdev);
1522
1523 kthread_stop(dev->kthread);
1524
1525 sahara_unregister_algs(dev);
1526
1527 clk_disable_unprepare(dev->clk_ipg);
1528 clk_disable_unprepare(dev->clk_ahb);
1529
1530 dev_ptr = NULL;
1531
1532 return 0;
1533}
1534
1535static struct platform_driver sahara_driver = {
1536 .probe = sahara_probe,
1537 .remove = sahara_remove,
1538 .driver = {
1539 .name = SAHARA_NAME,
1540 .of_match_table = sahara_dt_ids,
1541 },
1542 .id_table = sahara_platform_ids,
1543};
1544
1545module_platform_driver(sahara_driver);
1546
1547MODULE_LICENSE("GPL");
1548MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1549MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1550MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
8 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
11 * Based on omap-aes.c and tegra-aes.c
12 */
13
14#include <crypto/aes.h>
15#include <crypto/internal/hash.h>
16#include <crypto/internal/skcipher.h>
17#include <crypto/scatterwalk.h>
18#include <crypto/sha.h>
19
20#include <linux/clk.h>
21#include <linux/crypto.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/kthread.h>
27#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/platform_device.h>
32
33#define SHA_BUFFER_LEN PAGE_SIZE
34#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
35
36#define SAHARA_NAME "sahara"
37#define SAHARA_VERSION_3 3
38#define SAHARA_VERSION_4 4
39#define SAHARA_TIMEOUT_MS 1000
40#define SAHARA_MAX_HW_DESC 2
41#define SAHARA_MAX_HW_LINK 20
42
43#define FLAGS_MODE_MASK 0x000f
44#define FLAGS_ENCRYPT BIT(0)
45#define FLAGS_CBC BIT(1)
46#define FLAGS_NEW_KEY BIT(3)
47
48#define SAHARA_HDR_BASE 0x00800000
49#define SAHARA_HDR_SKHA_ALG_AES 0
50#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
51#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
52#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
53#define SAHARA_HDR_FORM_DATA (5 << 16)
54#define SAHARA_HDR_FORM_KEY (8 << 16)
55#define SAHARA_HDR_LLO (1 << 24)
56#define SAHARA_HDR_CHA_SKHA (1 << 28)
57#define SAHARA_HDR_CHA_MDHA (2 << 28)
58#define SAHARA_HDR_PARITY_BIT (1 << 31)
59
60#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
61#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
62#define SAHARA_HDR_MDHA_HASH 0xA0850000
63#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
64#define SAHARA_HDR_MDHA_ALG_SHA1 0
65#define SAHARA_HDR_MDHA_ALG_MD5 1
66#define SAHARA_HDR_MDHA_ALG_SHA256 2
67#define SAHARA_HDR_MDHA_ALG_SHA224 3
68#define SAHARA_HDR_MDHA_PDATA (1 << 2)
69#define SAHARA_HDR_MDHA_HMAC (1 << 3)
70#define SAHARA_HDR_MDHA_INIT (1 << 5)
71#define SAHARA_HDR_MDHA_IPAD (1 << 6)
72#define SAHARA_HDR_MDHA_OPAD (1 << 7)
73#define SAHARA_HDR_MDHA_SWAP (1 << 8)
74#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
75#define SAHARA_HDR_MDHA_SSL (1 << 10)
76
77/* SAHARA can only process one request at a time */
78#define SAHARA_QUEUE_LENGTH 1
79
80#define SAHARA_REG_VERSION 0x00
81#define SAHARA_REG_DAR 0x04
82#define SAHARA_REG_CONTROL 0x08
83#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
84#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
85#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
86#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
87#define SAHARA_REG_CMD 0x0C
88#define SAHARA_CMD_RESET (1 << 0)
89#define SAHARA_CMD_CLEAR_INT (1 << 8)
90#define SAHARA_CMD_CLEAR_ERR (1 << 9)
91#define SAHARA_CMD_SINGLE_STEP (1 << 10)
92#define SAHARA_CMD_MODE_BATCH (1 << 16)
93#define SAHARA_CMD_MODE_DEBUG (1 << 18)
94#define SAHARA_REG_STATUS 0x10
95#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
96#define SAHARA_STATE_IDLE 0
97#define SAHARA_STATE_BUSY 1
98#define SAHARA_STATE_ERR 2
99#define SAHARA_STATE_FAULT 3
100#define SAHARA_STATE_COMPLETE 4
101#define SAHARA_STATE_COMP_FLAG (1 << 2)
102#define SAHARA_STATUS_DAR_FULL (1 << 3)
103#define SAHARA_STATUS_ERROR (1 << 4)
104#define SAHARA_STATUS_SECURE (1 << 5)
105#define SAHARA_STATUS_FAIL (1 << 6)
106#define SAHARA_STATUS_INIT (1 << 7)
107#define SAHARA_STATUS_RNG_RESEED (1 << 8)
108#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
109#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
110#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
111#define SAHARA_STATUS_MODE_BATCH (1 << 16)
112#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
113#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
114#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
115#define SAHARA_REG_ERRSTATUS 0x14
116#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
117#define SAHARA_ERRSOURCE_CHA 14
118#define SAHARA_ERRSOURCE_DMA 15
119#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
120#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
121#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
122#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
123#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
124#define SAHARA_REG_FADDR 0x18
125#define SAHARA_REG_CDAR 0x1C
126#define SAHARA_REG_IDAR 0x20
127
128struct sahara_hw_desc {
129 u32 hdr;
130 u32 len1;
131 u32 p1;
132 u32 len2;
133 u32 p2;
134 u32 next;
135};
136
137struct sahara_hw_link {
138 u32 len;
139 u32 p;
140 u32 next;
141};
142
143struct sahara_ctx {
144 unsigned long flags;
145
146 /* AES-specific context */
147 int keylen;
148 u8 key[AES_KEYSIZE_128];
149 struct crypto_sync_skcipher *fallback;
150};
151
152struct sahara_aes_reqctx {
153 unsigned long mode;
154};
155
156/*
157 * struct sahara_sha_reqctx - private data per request
158 * @buf: holds data for requests smaller than block_size
159 * @rembuf: used to prepare one block_size-aligned request
160 * @context: hw-specific context for request. Digest is extracted from this
161 * @mode: specifies what type of hw-descriptor needs to be built
162 * @digest_size: length of digest for this request
163 * @context_size: length of hw-context for this request.
164 * Always digest_size + 4
165 * @buf_cnt: number of bytes saved in buf
166 * @sg_in_idx: number of hw links
167 * @in_sg: scatterlist for input data
168 * @in_sg_chain: scatterlists for chained input data
169 * @total: total number of bytes for transfer
170 * @last: is this the last block
171 * @first: is this the first block
172 * @active: inside a transfer
173 */
174struct sahara_sha_reqctx {
175 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
177 u8 context[SHA256_DIGEST_SIZE + 4];
178 unsigned int mode;
179 unsigned int digest_size;
180 unsigned int context_size;
181 unsigned int buf_cnt;
182 unsigned int sg_in_idx;
183 struct scatterlist *in_sg;
184 struct scatterlist in_sg_chain[2];
185 size_t total;
186 unsigned int last;
187 unsigned int first;
188 unsigned int active;
189};
190
191struct sahara_dev {
192 struct device *device;
193 unsigned int version;
194 void __iomem *regs_base;
195 struct clk *clk_ipg;
196 struct clk *clk_ahb;
197 struct mutex queue_mutex;
198 struct task_struct *kthread;
199 struct completion dma_completion;
200
201 struct sahara_ctx *ctx;
202 struct crypto_queue queue;
203 unsigned long flags;
204
205 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
206 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
207
208 u8 *key_base;
209 dma_addr_t key_phys_base;
210
211 u8 *iv_base;
212 dma_addr_t iv_phys_base;
213
214 u8 *context_base;
215 dma_addr_t context_phys_base;
216
217 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
218 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
219
220 size_t total;
221 struct scatterlist *in_sg;
222 int nb_in_sg;
223 struct scatterlist *out_sg;
224 int nb_out_sg;
225
226 u32 error;
227};
228
229static struct sahara_dev *dev_ptr;
230
231static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
232{
233 writel(data, dev->regs_base + reg);
234}
235
236static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
237{
238 return readl(dev->regs_base + reg);
239}
240
241static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
242{
243 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
244 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
245 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
246
247 if (dev->flags & FLAGS_CBC) {
248 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
249 hdr ^= SAHARA_HDR_PARITY_BIT;
250 }
251
252 if (dev->flags & FLAGS_ENCRYPT) {
253 hdr |= SAHARA_HDR_SKHA_OP_ENC;
254 hdr ^= SAHARA_HDR_PARITY_BIT;
255 }
256
257 return hdr;
258}
259
260static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
261{
262 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
263 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
264}
265
266static const char *sahara_err_src[16] = {
267 "No error",
268 "Header error",
269 "Descriptor length error",
270 "Descriptor length or pointer error",
271 "Link length error",
272 "Link pointer error",
273 "Input buffer error",
274 "Output buffer error",
275 "Output buffer starvation",
276 "Internal state fault",
277 "General descriptor problem",
278 "Reserved",
279 "Descriptor address error",
280 "Link address error",
281 "CHA error",
282 "DMA error"
283};
284
285static const char *sahara_err_dmasize[4] = {
286 "Byte transfer",
287 "Half-word transfer",
288 "Word transfer",
289 "Reserved"
290};
291
292static const char *sahara_err_dmasrc[8] = {
293 "No error",
294 "AHB bus error",
295 "Internal IP bus error",
296 "Parity error",
297 "DMA crosses 256 byte boundary",
298 "DMA is busy",
299 "Reserved",
300 "DMA HW error"
301};
302
303static const char *sahara_cha_errsrc[12] = {
304 "Input buffer non-empty",
305 "Illegal address",
306 "Illegal mode",
307 "Illegal data size",
308 "Illegal key size",
309 "Write during processing",
310 "CTX read during processing",
311 "HW error",
312 "Input buffer disabled/underflow",
313 "Output buffer disabled/overflow",
314 "DES key parity error",
315 "Reserved"
316};
317
318static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
319
320static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
321{
322 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
323 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
324
325 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
326
327 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
328
329 if (source == SAHARA_ERRSOURCE_DMA) {
330 if (error & SAHARA_ERRSTATUS_DMA_DIR)
331 dev_err(dev->device, " * DMA read.\n");
332 else
333 dev_err(dev->device, " * DMA write.\n");
334
335 dev_err(dev->device, " * %s.\n",
336 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
339 } else if (source == SAHARA_ERRSOURCE_CHA) {
340 dev_err(dev->device, " * %s.\n",
341 sahara_cha_errsrc[chasrc]);
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
344 }
345 dev_err(dev->device, "\n");
346}
347
348static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
349
350static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
351{
352 u8 state;
353
354 if (!__is_defined(DEBUG))
355 return;
356
357 state = SAHARA_STATUS_GET_STATE(status);
358
359 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
360 __func__, status);
361
362 dev_dbg(dev->device, " - State = %d:\n", state);
363 if (state & SAHARA_STATE_COMP_FLAG)
364 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
365
366 dev_dbg(dev->device, " * %s.\n",
367 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
368
369 if (status & SAHARA_STATUS_DAR_FULL)
370 dev_dbg(dev->device, " - DAR Full.\n");
371 if (status & SAHARA_STATUS_ERROR)
372 dev_dbg(dev->device, " - Error.\n");
373 if (status & SAHARA_STATUS_SECURE)
374 dev_dbg(dev->device, " - Secure.\n");
375 if (status & SAHARA_STATUS_FAIL)
376 dev_dbg(dev->device, " - Fail.\n");
377 if (status & SAHARA_STATUS_RNG_RESEED)
378 dev_dbg(dev->device, " - RNG Reseed Request.\n");
379 if (status & SAHARA_STATUS_ACTIVE_RNG)
380 dev_dbg(dev->device, " - RNG Active.\n");
381 if (status & SAHARA_STATUS_ACTIVE_MDHA)
382 dev_dbg(dev->device, " - MDHA Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_SKHA)
384 dev_dbg(dev->device, " - SKHA Active.\n");
385
386 if (status & SAHARA_STATUS_MODE_BATCH)
387 dev_dbg(dev->device, " - Batch Mode.\n");
388 else if (status & SAHARA_STATUS_MODE_DEDICATED)
389 dev_dbg(dev->device, " - Dedicated Mode.\n");
390 else if (status & SAHARA_STATUS_MODE_DEBUG)
391 dev_dbg(dev->device, " - Debug Mode.\n");
392
393 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
394 SAHARA_STATUS_GET_ISTATE(status));
395
396 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
397 sahara_read(dev, SAHARA_REG_CDAR));
398 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
399 sahara_read(dev, SAHARA_REG_IDAR));
400}
401
402static void sahara_dump_descriptors(struct sahara_dev *dev)
403{
404 int i;
405
406 if (!__is_defined(DEBUG))
407 return;
408
409 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
410 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
411 i, &dev->hw_phys_desc[i]);
412 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
413 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
414 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
415 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
416 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
417 dev_dbg(dev->device, "\tnext = 0x%08x\n",
418 dev->hw_desc[i]->next);
419 }
420 dev_dbg(dev->device, "\n");
421}
422
423static void sahara_dump_links(struct sahara_dev *dev)
424{
425 int i;
426
427 if (!__is_defined(DEBUG))
428 return;
429
430 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
431 dev_dbg(dev->device, "Link (%d) (%pad):\n",
432 i, &dev->hw_phys_link[i]);
433 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
434 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
435 dev_dbg(dev->device, "\tnext = 0x%08x\n",
436 dev->hw_link[i]->next);
437 }
438 dev_dbg(dev->device, "\n");
439}
440
441static int sahara_hw_descriptor_create(struct sahara_dev *dev)
442{
443 struct sahara_ctx *ctx = dev->ctx;
444 struct scatterlist *sg;
445 int ret;
446 int i, j;
447 int idx = 0;
448
449 /* Copy new key if necessary */
450 if (ctx->flags & FLAGS_NEW_KEY) {
451 memcpy(dev->key_base, ctx->key, ctx->keylen);
452 ctx->flags &= ~FLAGS_NEW_KEY;
453
454 if (dev->flags & FLAGS_CBC) {
455 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
456 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
457 } else {
458 dev->hw_desc[idx]->len1 = 0;
459 dev->hw_desc[idx]->p1 = 0;
460 }
461 dev->hw_desc[idx]->len2 = ctx->keylen;
462 dev->hw_desc[idx]->p2 = dev->key_phys_base;
463 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
464
465 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
466
467 idx++;
468 }
469
470 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
471 if (dev->nb_in_sg < 0) {
472 dev_err(dev->device, "Invalid numbers of src SG.\n");
473 return dev->nb_in_sg;
474 }
475 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
476 if (dev->nb_out_sg < 0) {
477 dev_err(dev->device, "Invalid numbers of dst SG.\n");
478 return dev->nb_out_sg;
479 }
480 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
481 dev_err(dev->device, "not enough hw links (%d)\n",
482 dev->nb_in_sg + dev->nb_out_sg);
483 return -EINVAL;
484 }
485
486 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
487 DMA_TO_DEVICE);
488 if (ret != dev->nb_in_sg) {
489 dev_err(dev->device, "couldn't map in sg\n");
490 goto unmap_in;
491 }
492 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
493 DMA_FROM_DEVICE);
494 if (ret != dev->nb_out_sg) {
495 dev_err(dev->device, "couldn't map out sg\n");
496 goto unmap_out;
497 }
498
499 /* Create input links */
500 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
501 sg = dev->in_sg;
502 for (i = 0; i < dev->nb_in_sg; i++) {
503 dev->hw_link[i]->len = sg->length;
504 dev->hw_link[i]->p = sg->dma_address;
505 if (i == (dev->nb_in_sg - 1)) {
506 dev->hw_link[i]->next = 0;
507 } else {
508 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
509 sg = sg_next(sg);
510 }
511 }
512
513 /* Create output links */
514 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
515 sg = dev->out_sg;
516 for (j = i; j < dev->nb_out_sg + i; j++) {
517 dev->hw_link[j]->len = sg->length;
518 dev->hw_link[j]->p = sg->dma_address;
519 if (j == (dev->nb_out_sg + i - 1)) {
520 dev->hw_link[j]->next = 0;
521 } else {
522 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
523 sg = sg_next(sg);
524 }
525 }
526
527 /* Fill remaining fields of hw_desc[1] */
528 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
529 dev->hw_desc[idx]->len1 = dev->total;
530 dev->hw_desc[idx]->len2 = dev->total;
531 dev->hw_desc[idx]->next = 0;
532
533 sahara_dump_descriptors(dev);
534 sahara_dump_links(dev);
535
536 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
537
538 return 0;
539
540unmap_out:
541 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
542 DMA_FROM_DEVICE);
543unmap_in:
544 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
545 DMA_TO_DEVICE);
546
547 return -EINVAL;
548}
549
550static int sahara_aes_process(struct ablkcipher_request *req)
551{
552 struct sahara_dev *dev = dev_ptr;
553 struct sahara_ctx *ctx;
554 struct sahara_aes_reqctx *rctx;
555 int ret;
556 unsigned long timeout;
557
558 /* Request is ready to be dispatched by the device */
559 dev_dbg(dev->device,
560 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
561 req->nbytes, req->src, req->dst);
562
563 /* assign new request to device */
564 dev->total = req->nbytes;
565 dev->in_sg = req->src;
566 dev->out_sg = req->dst;
567
568 rctx = ablkcipher_request_ctx(req);
569 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
570 rctx->mode &= FLAGS_MODE_MASK;
571 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
572
573 if ((dev->flags & FLAGS_CBC) && req->info)
574 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
575
576 /* assign new context to device */
577 dev->ctx = ctx;
578
579 reinit_completion(&dev->dma_completion);
580
581 ret = sahara_hw_descriptor_create(dev);
582 if (ret)
583 return -EINVAL;
584
585 timeout = wait_for_completion_timeout(&dev->dma_completion,
586 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
587 if (!timeout) {
588 dev_err(dev->device, "AES timeout\n");
589 return -ETIMEDOUT;
590 }
591
592 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
593 DMA_FROM_DEVICE);
594 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
595 DMA_TO_DEVICE);
596
597 return 0;
598}
599
600static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
601 unsigned int keylen)
602{
603 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
604 int ret;
605
606 ctx->keylen = keylen;
607
608 /* SAHARA only supports 128bit keys */
609 if (keylen == AES_KEYSIZE_128) {
610 memcpy(ctx->key, key, keylen);
611 ctx->flags |= FLAGS_NEW_KEY;
612 return 0;
613 }
614
615 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
616 return -EINVAL;
617
618 /*
619 * The requested key size is not supported by HW, do a fallback.
620 */
621 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
622 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
623 CRYPTO_TFM_REQ_MASK);
624
625 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
626
627 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
628 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
629 CRYPTO_TFM_RES_MASK;
630 return ret;
631}
632
633static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
634{
635 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
636 struct sahara_dev *dev = dev_ptr;
637 int err = 0;
638
639 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
640 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
641
642 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
643 dev_err(dev->device,
644 "request size is not exact amount of AES blocks\n");
645 return -EINVAL;
646 }
647
648 rctx->mode = mode;
649
650 mutex_lock(&dev->queue_mutex);
651 err = ablkcipher_enqueue_request(&dev->queue, req);
652 mutex_unlock(&dev->queue_mutex);
653
654 wake_up_process(dev->kthread);
655
656 return err;
657}
658
659static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
660{
661 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
662 crypto_ablkcipher_reqtfm(req));
663 int err;
664
665 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
666 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
667
668 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
669 skcipher_request_set_callback(subreq, req->base.flags,
670 NULL, NULL);
671 skcipher_request_set_crypt(subreq, req->src, req->dst,
672 req->nbytes, req->info);
673 err = crypto_skcipher_encrypt(subreq);
674 skcipher_request_zero(subreq);
675 return err;
676 }
677
678 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
679}
680
681static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
682{
683 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
684 crypto_ablkcipher_reqtfm(req));
685 int err;
686
687 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
688 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
689
690 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
691 skcipher_request_set_callback(subreq, req->base.flags,
692 NULL, NULL);
693 skcipher_request_set_crypt(subreq, req->src, req->dst,
694 req->nbytes, req->info);
695 err = crypto_skcipher_decrypt(subreq);
696 skcipher_request_zero(subreq);
697 return err;
698 }
699
700 return sahara_aes_crypt(req, 0);
701}
702
703static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
704{
705 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
706 crypto_ablkcipher_reqtfm(req));
707 int err;
708
709 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
710 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
711
712 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
713 skcipher_request_set_callback(subreq, req->base.flags,
714 NULL, NULL);
715 skcipher_request_set_crypt(subreq, req->src, req->dst,
716 req->nbytes, req->info);
717 err = crypto_skcipher_encrypt(subreq);
718 skcipher_request_zero(subreq);
719 return err;
720 }
721
722 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
723}
724
725static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
726{
727 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
728 crypto_ablkcipher_reqtfm(req));
729 int err;
730
731 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
732 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
733
734 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
735 skcipher_request_set_callback(subreq, req->base.flags,
736 NULL, NULL);
737 skcipher_request_set_crypt(subreq, req->src, req->dst,
738 req->nbytes, req->info);
739 err = crypto_skcipher_decrypt(subreq);
740 skcipher_request_zero(subreq);
741 return err;
742 }
743
744 return sahara_aes_crypt(req, FLAGS_CBC);
745}
746
747static int sahara_aes_cra_init(struct crypto_tfm *tfm)
748{
749 const char *name = crypto_tfm_alg_name(tfm);
750 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
751
752 ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
753 CRYPTO_ALG_NEED_FALLBACK);
754 if (IS_ERR(ctx->fallback)) {
755 pr_err("Error allocating fallback algo %s\n", name);
756 return PTR_ERR(ctx->fallback);
757 }
758
759 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
760
761 return 0;
762}
763
764static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
765{
766 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
767
768 crypto_free_sync_skcipher(ctx->fallback);
769}
770
771static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
772 struct sahara_sha_reqctx *rctx)
773{
774 u32 hdr = 0;
775
776 hdr = rctx->mode;
777
778 if (rctx->first) {
779 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
780 hdr |= SAHARA_HDR_MDHA_INIT;
781 } else {
782 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
783 }
784
785 if (rctx->last)
786 hdr |= SAHARA_HDR_MDHA_PDATA;
787
788 if (hweight_long(hdr) % 2 == 0)
789 hdr |= SAHARA_HDR_PARITY_BIT;
790
791 return hdr;
792}
793
794static int sahara_sha_hw_links_create(struct sahara_dev *dev,
795 struct sahara_sha_reqctx *rctx,
796 int start)
797{
798 struct scatterlist *sg;
799 unsigned int i;
800 int ret;
801
802 dev->in_sg = rctx->in_sg;
803
804 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
805 if (dev->nb_in_sg < 0) {
806 dev_err(dev->device, "Invalid numbers of src SG.\n");
807 return dev->nb_in_sg;
808 }
809 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
810 dev_err(dev->device, "not enough hw links (%d)\n",
811 dev->nb_in_sg + dev->nb_out_sg);
812 return -EINVAL;
813 }
814
815 sg = dev->in_sg;
816 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
817 if (!ret)
818 return -EFAULT;
819
820 for (i = start; i < dev->nb_in_sg + start; i++) {
821 dev->hw_link[i]->len = sg->length;
822 dev->hw_link[i]->p = sg->dma_address;
823 if (i == (dev->nb_in_sg + start - 1)) {
824 dev->hw_link[i]->next = 0;
825 } else {
826 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
827 sg = sg_next(sg);
828 }
829 }
830
831 return i;
832}
833
834static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
835 struct sahara_sha_reqctx *rctx,
836 struct ahash_request *req,
837 int index)
838{
839 unsigned result_len;
840 int i = index;
841
842 if (rctx->first)
843 /* Create initial descriptor: #8*/
844 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
845 else
846 /* Create hash descriptor: #10. Must follow #6. */
847 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
848
849 dev->hw_desc[index]->len1 = rctx->total;
850 if (dev->hw_desc[index]->len1 == 0) {
851 /* if len1 is 0, p1 must be 0, too */
852 dev->hw_desc[index]->p1 = 0;
853 rctx->sg_in_idx = 0;
854 } else {
855 /* Create input links */
856 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
857 i = sahara_sha_hw_links_create(dev, rctx, index);
858
859 rctx->sg_in_idx = index;
860 if (i < 0)
861 return i;
862 }
863
864 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
865
866 /* Save the context for the next operation */
867 result_len = rctx->context_size;
868 dev->hw_link[i]->p = dev->context_phys_base;
869
870 dev->hw_link[i]->len = result_len;
871 dev->hw_desc[index]->len2 = result_len;
872
873 dev->hw_link[i]->next = 0;
874
875 return 0;
876}
877
878/*
879 * Load descriptor aka #6
880 *
881 * To load a previously saved context back to the MDHA unit
882 *
883 * p1: Saved Context
884 * p2: NULL
885 *
886 */
887static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
888 struct sahara_sha_reqctx *rctx,
889 struct ahash_request *req,
890 int index)
891{
892 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
893
894 dev->hw_desc[index]->len1 = rctx->context_size;
895 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
896 dev->hw_desc[index]->len2 = 0;
897 dev->hw_desc[index]->p2 = 0;
898
899 dev->hw_link[index]->len = rctx->context_size;
900 dev->hw_link[index]->p = dev->context_phys_base;
901 dev->hw_link[index]->next = 0;
902
903 return 0;
904}
905
906static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
907{
908 if (!sg || !sg->length)
909 return nbytes;
910
911 while (nbytes && sg) {
912 if (nbytes <= sg->length) {
913 sg->length = nbytes;
914 sg_mark_end(sg);
915 break;
916 }
917 nbytes -= sg->length;
918 sg = sg_next(sg);
919 }
920
921 return nbytes;
922}
923
924static int sahara_sha_prepare_request(struct ahash_request *req)
925{
926 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
927 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
928 unsigned int hash_later;
929 unsigned int block_size;
930 unsigned int len;
931
932 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
933
934 /* append bytes from previous operation */
935 len = rctx->buf_cnt + req->nbytes;
936
937 /* only the last transfer can be padded in hardware */
938 if (!rctx->last && (len < block_size)) {
939 /* to few data, save for next operation */
940 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
941 0, req->nbytes, 0);
942 rctx->buf_cnt += req->nbytes;
943
944 return 0;
945 }
946
947 /* add data from previous operation first */
948 if (rctx->buf_cnt)
949 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
950
951 /* data must always be a multiple of block_size */
952 hash_later = rctx->last ? 0 : len & (block_size - 1);
953 if (hash_later) {
954 unsigned int offset = req->nbytes - hash_later;
955 /* Save remaining bytes for later use */
956 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
957 hash_later, 0);
958 }
959
960 /* nbytes should now be multiple of blocksize */
961 req->nbytes = req->nbytes - hash_later;
962
963 sahara_walk_and_recalc(req->src, req->nbytes);
964
965 /* have data from previous operation and current */
966 if (rctx->buf_cnt && req->nbytes) {
967 sg_init_table(rctx->in_sg_chain, 2);
968 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
969
970 sg_chain(rctx->in_sg_chain, 2, req->src);
971
972 rctx->total = req->nbytes + rctx->buf_cnt;
973 rctx->in_sg = rctx->in_sg_chain;
974
975 req->src = rctx->in_sg_chain;
976 /* only data from previous operation */
977 } else if (rctx->buf_cnt) {
978 if (req->src)
979 rctx->in_sg = req->src;
980 else
981 rctx->in_sg = rctx->in_sg_chain;
982 /* buf was copied into rembuf above */
983 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
984 rctx->total = rctx->buf_cnt;
985 /* no data from previous operation */
986 } else {
987 rctx->in_sg = req->src;
988 rctx->total = req->nbytes;
989 req->src = rctx->in_sg;
990 }
991
992 /* on next call, we only have the remaining data in the buffer */
993 rctx->buf_cnt = hash_later;
994
995 return -EINPROGRESS;
996}
997
998static int sahara_sha_process(struct ahash_request *req)
999{
1000 struct sahara_dev *dev = dev_ptr;
1001 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1002 int ret;
1003 unsigned long timeout;
1004
1005 ret = sahara_sha_prepare_request(req);
1006 if (!ret)
1007 return ret;
1008
1009 if (rctx->first) {
1010 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1011 dev->hw_desc[0]->next = 0;
1012 rctx->first = 0;
1013 } else {
1014 memcpy(dev->context_base, rctx->context, rctx->context_size);
1015
1016 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1017 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1018 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1019 dev->hw_desc[1]->next = 0;
1020 }
1021
1022 sahara_dump_descriptors(dev);
1023 sahara_dump_links(dev);
1024
1025 reinit_completion(&dev->dma_completion);
1026
1027 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1028
1029 timeout = wait_for_completion_timeout(&dev->dma_completion,
1030 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1031 if (!timeout) {
1032 dev_err(dev->device, "SHA timeout\n");
1033 return -ETIMEDOUT;
1034 }
1035
1036 if (rctx->sg_in_idx)
1037 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1038 DMA_TO_DEVICE);
1039
1040 memcpy(rctx->context, dev->context_base, rctx->context_size);
1041
1042 if (req->result)
1043 memcpy(req->result, rctx->context, rctx->digest_size);
1044
1045 return 0;
1046}
1047
1048static int sahara_queue_manage(void *data)
1049{
1050 struct sahara_dev *dev = (struct sahara_dev *)data;
1051 struct crypto_async_request *async_req;
1052 struct crypto_async_request *backlog;
1053 int ret = 0;
1054
1055 do {
1056 __set_current_state(TASK_INTERRUPTIBLE);
1057
1058 mutex_lock(&dev->queue_mutex);
1059 backlog = crypto_get_backlog(&dev->queue);
1060 async_req = crypto_dequeue_request(&dev->queue);
1061 mutex_unlock(&dev->queue_mutex);
1062
1063 if (backlog)
1064 backlog->complete(backlog, -EINPROGRESS);
1065
1066 if (async_req) {
1067 if (crypto_tfm_alg_type(async_req->tfm) ==
1068 CRYPTO_ALG_TYPE_AHASH) {
1069 struct ahash_request *req =
1070 ahash_request_cast(async_req);
1071
1072 ret = sahara_sha_process(req);
1073 } else {
1074 struct ablkcipher_request *req =
1075 ablkcipher_request_cast(async_req);
1076
1077 ret = sahara_aes_process(req);
1078 }
1079
1080 async_req->complete(async_req, ret);
1081
1082 continue;
1083 }
1084
1085 schedule();
1086 } while (!kthread_should_stop());
1087
1088 return 0;
1089}
1090
1091static int sahara_sha_enqueue(struct ahash_request *req, int last)
1092{
1093 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1094 struct sahara_dev *dev = dev_ptr;
1095 int ret;
1096
1097 if (!req->nbytes && !last)
1098 return 0;
1099
1100 rctx->last = last;
1101
1102 if (!rctx->active) {
1103 rctx->active = 1;
1104 rctx->first = 1;
1105 }
1106
1107 mutex_lock(&dev->queue_mutex);
1108 ret = crypto_enqueue_request(&dev->queue, &req->base);
1109 mutex_unlock(&dev->queue_mutex);
1110
1111 wake_up_process(dev->kthread);
1112
1113 return ret;
1114}
1115
1116static int sahara_sha_init(struct ahash_request *req)
1117{
1118 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1119 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1120
1121 memset(rctx, 0, sizeof(*rctx));
1122
1123 switch (crypto_ahash_digestsize(tfm)) {
1124 case SHA1_DIGEST_SIZE:
1125 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1126 rctx->digest_size = SHA1_DIGEST_SIZE;
1127 break;
1128 case SHA256_DIGEST_SIZE:
1129 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1130 rctx->digest_size = SHA256_DIGEST_SIZE;
1131 break;
1132 default:
1133 return -EINVAL;
1134 }
1135
1136 rctx->context_size = rctx->digest_size + 4;
1137 rctx->active = 0;
1138
1139 return 0;
1140}
1141
1142static int sahara_sha_update(struct ahash_request *req)
1143{
1144 return sahara_sha_enqueue(req, 0);
1145}
1146
1147static int sahara_sha_final(struct ahash_request *req)
1148{
1149 req->nbytes = 0;
1150 return sahara_sha_enqueue(req, 1);
1151}
1152
1153static int sahara_sha_finup(struct ahash_request *req)
1154{
1155 return sahara_sha_enqueue(req, 1);
1156}
1157
1158static int sahara_sha_digest(struct ahash_request *req)
1159{
1160 sahara_sha_init(req);
1161
1162 return sahara_sha_finup(req);
1163}
1164
1165static int sahara_sha_export(struct ahash_request *req, void *out)
1166{
1167 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1168
1169 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1170
1171 return 0;
1172}
1173
1174static int sahara_sha_import(struct ahash_request *req, const void *in)
1175{
1176 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1177
1178 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1179
1180 return 0;
1181}
1182
1183static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1184{
1185 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1186 sizeof(struct sahara_sha_reqctx) +
1187 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1188
1189 return 0;
1190}
1191
1192static struct crypto_alg aes_algs[] = {
1193{
1194 .cra_name = "ecb(aes)",
1195 .cra_driver_name = "sahara-ecb-aes",
1196 .cra_priority = 300,
1197 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1198 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1199 .cra_blocksize = AES_BLOCK_SIZE,
1200 .cra_ctxsize = sizeof(struct sahara_ctx),
1201 .cra_alignmask = 0x0,
1202 .cra_type = &crypto_ablkcipher_type,
1203 .cra_module = THIS_MODULE,
1204 .cra_init = sahara_aes_cra_init,
1205 .cra_exit = sahara_aes_cra_exit,
1206 .cra_u.ablkcipher = {
1207 .min_keysize = AES_MIN_KEY_SIZE ,
1208 .max_keysize = AES_MAX_KEY_SIZE,
1209 .setkey = sahara_aes_setkey,
1210 .encrypt = sahara_aes_ecb_encrypt,
1211 .decrypt = sahara_aes_ecb_decrypt,
1212 }
1213}, {
1214 .cra_name = "cbc(aes)",
1215 .cra_driver_name = "sahara-cbc-aes",
1216 .cra_priority = 300,
1217 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1218 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1219 .cra_blocksize = AES_BLOCK_SIZE,
1220 .cra_ctxsize = sizeof(struct sahara_ctx),
1221 .cra_alignmask = 0x0,
1222 .cra_type = &crypto_ablkcipher_type,
1223 .cra_module = THIS_MODULE,
1224 .cra_init = sahara_aes_cra_init,
1225 .cra_exit = sahara_aes_cra_exit,
1226 .cra_u.ablkcipher = {
1227 .min_keysize = AES_MIN_KEY_SIZE ,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .ivsize = AES_BLOCK_SIZE,
1230 .setkey = sahara_aes_setkey,
1231 .encrypt = sahara_aes_cbc_encrypt,
1232 .decrypt = sahara_aes_cbc_decrypt,
1233 }
1234}
1235};
1236
1237static struct ahash_alg sha_v3_algs[] = {
1238{
1239 .init = sahara_sha_init,
1240 .update = sahara_sha_update,
1241 .final = sahara_sha_final,
1242 .finup = sahara_sha_finup,
1243 .digest = sahara_sha_digest,
1244 .export = sahara_sha_export,
1245 .import = sahara_sha_import,
1246 .halg.digestsize = SHA1_DIGEST_SIZE,
1247 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1248 .halg.base = {
1249 .cra_name = "sha1",
1250 .cra_driver_name = "sahara-sha1",
1251 .cra_priority = 300,
1252 .cra_flags = CRYPTO_ALG_ASYNC |
1253 CRYPTO_ALG_NEED_FALLBACK,
1254 .cra_blocksize = SHA1_BLOCK_SIZE,
1255 .cra_ctxsize = sizeof(struct sahara_ctx),
1256 .cra_alignmask = 0,
1257 .cra_module = THIS_MODULE,
1258 .cra_init = sahara_sha_cra_init,
1259 }
1260},
1261};
1262
1263static struct ahash_alg sha_v4_algs[] = {
1264{
1265 .init = sahara_sha_init,
1266 .update = sahara_sha_update,
1267 .final = sahara_sha_final,
1268 .finup = sahara_sha_finup,
1269 .digest = sahara_sha_digest,
1270 .export = sahara_sha_export,
1271 .import = sahara_sha_import,
1272 .halg.digestsize = SHA256_DIGEST_SIZE,
1273 .halg.statesize = sizeof(struct sahara_sha_reqctx),
1274 .halg.base = {
1275 .cra_name = "sha256",
1276 .cra_driver_name = "sahara-sha256",
1277 .cra_priority = 300,
1278 .cra_flags = CRYPTO_ALG_ASYNC |
1279 CRYPTO_ALG_NEED_FALLBACK,
1280 .cra_blocksize = SHA256_BLOCK_SIZE,
1281 .cra_ctxsize = sizeof(struct sahara_ctx),
1282 .cra_alignmask = 0,
1283 .cra_module = THIS_MODULE,
1284 .cra_init = sahara_sha_cra_init,
1285 }
1286},
1287};
1288
1289static irqreturn_t sahara_irq_handler(int irq, void *data)
1290{
1291 struct sahara_dev *dev = (struct sahara_dev *)data;
1292 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1293 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1294
1295 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1296 SAHARA_REG_CMD);
1297
1298 sahara_decode_status(dev, stat);
1299
1300 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1301 return IRQ_NONE;
1302 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1303 dev->error = 0;
1304 } else {
1305 sahara_decode_error(dev, err);
1306 dev->error = -EINVAL;
1307 }
1308
1309 complete(&dev->dma_completion);
1310
1311 return IRQ_HANDLED;
1312}
1313
1314
1315static int sahara_register_algs(struct sahara_dev *dev)
1316{
1317 int err;
1318 unsigned int i, j, k, l;
1319
1320 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1321 err = crypto_register_alg(&aes_algs[i]);
1322 if (err)
1323 goto err_aes_algs;
1324 }
1325
1326 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1327 err = crypto_register_ahash(&sha_v3_algs[k]);
1328 if (err)
1329 goto err_sha_v3_algs;
1330 }
1331
1332 if (dev->version > SAHARA_VERSION_3)
1333 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1334 err = crypto_register_ahash(&sha_v4_algs[l]);
1335 if (err)
1336 goto err_sha_v4_algs;
1337 }
1338
1339 return 0;
1340
1341err_sha_v4_algs:
1342 for (j = 0; j < l; j++)
1343 crypto_unregister_ahash(&sha_v4_algs[j]);
1344
1345err_sha_v3_algs:
1346 for (j = 0; j < k; j++)
1347 crypto_unregister_ahash(&sha_v3_algs[j]);
1348
1349err_aes_algs:
1350 for (j = 0; j < i; j++)
1351 crypto_unregister_alg(&aes_algs[j]);
1352
1353 return err;
1354}
1355
1356static void sahara_unregister_algs(struct sahara_dev *dev)
1357{
1358 unsigned int i;
1359
1360 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1361 crypto_unregister_alg(&aes_algs[i]);
1362
1363 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1364 crypto_unregister_ahash(&sha_v3_algs[i]);
1365
1366 if (dev->version > SAHARA_VERSION_3)
1367 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1368 crypto_unregister_ahash(&sha_v4_algs[i]);
1369}
1370
1371static const struct platform_device_id sahara_platform_ids[] = {
1372 { .name = "sahara-imx27" },
1373 { /* sentinel */ }
1374};
1375MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1376
1377static const struct of_device_id sahara_dt_ids[] = {
1378 { .compatible = "fsl,imx53-sahara" },
1379 { .compatible = "fsl,imx27-sahara" },
1380 { /* sentinel */ }
1381};
1382MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1383
1384static int sahara_probe(struct platform_device *pdev)
1385{
1386 struct sahara_dev *dev;
1387 u32 version;
1388 int irq;
1389 int err;
1390 int i;
1391
1392 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1393 if (!dev)
1394 return -ENOMEM;
1395
1396 dev->device = &pdev->dev;
1397 platform_set_drvdata(pdev, dev);
1398
1399 /* Get the base address */
1400 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
1401 if (IS_ERR(dev->regs_base))
1402 return PTR_ERR(dev->regs_base);
1403
1404 /* Get the IRQ */
1405 irq = platform_get_irq(pdev, 0);
1406 if (irq < 0)
1407 return irq;
1408
1409 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1410 0, dev_name(&pdev->dev), dev);
1411 if (err) {
1412 dev_err(&pdev->dev, "failed to request irq\n");
1413 return err;
1414 }
1415
1416 /* clocks */
1417 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1418 if (IS_ERR(dev->clk_ipg)) {
1419 dev_err(&pdev->dev, "Could not get ipg clock\n");
1420 return PTR_ERR(dev->clk_ipg);
1421 }
1422
1423 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1424 if (IS_ERR(dev->clk_ahb)) {
1425 dev_err(&pdev->dev, "Could not get ahb clock\n");
1426 return PTR_ERR(dev->clk_ahb);
1427 }
1428
1429 /* Allocate HW descriptors */
1430 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1431 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1432 &dev->hw_phys_desc[0], GFP_KERNEL);
1433 if (!dev->hw_desc[0]) {
1434 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1435 return -ENOMEM;
1436 }
1437 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1438 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1439 sizeof(struct sahara_hw_desc);
1440
1441 /* Allocate space for iv and key */
1442 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1443 &dev->key_phys_base, GFP_KERNEL);
1444 if (!dev->key_base) {
1445 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1446 return -ENOMEM;
1447 }
1448 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1449 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1450
1451 /* Allocate space for context: largest digest + message length field */
1452 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1453 SHA256_DIGEST_SIZE + 4,
1454 &dev->context_phys_base, GFP_KERNEL);
1455 if (!dev->context_base) {
1456 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1457 return -ENOMEM;
1458 }
1459
1460 /* Allocate space for HW links */
1461 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1462 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1463 &dev->hw_phys_link[0], GFP_KERNEL);
1464 if (!dev->hw_link[0]) {
1465 dev_err(&pdev->dev, "Could not allocate hw links\n");
1466 return -ENOMEM;
1467 }
1468 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1469 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1470 sizeof(struct sahara_hw_link);
1471 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1472 }
1473
1474 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1475
1476 mutex_init(&dev->queue_mutex);
1477
1478 dev_ptr = dev;
1479
1480 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1481 if (IS_ERR(dev->kthread)) {
1482 return PTR_ERR(dev->kthread);
1483 }
1484
1485 init_completion(&dev->dma_completion);
1486
1487 err = clk_prepare_enable(dev->clk_ipg);
1488 if (err)
1489 return err;
1490 err = clk_prepare_enable(dev->clk_ahb);
1491 if (err)
1492 goto clk_ipg_disable;
1493
1494 version = sahara_read(dev, SAHARA_REG_VERSION);
1495 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1496 if (version != SAHARA_VERSION_3)
1497 err = -ENODEV;
1498 } else if (of_device_is_compatible(pdev->dev.of_node,
1499 "fsl,imx53-sahara")) {
1500 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1501 err = -ENODEV;
1502 version = (version >> 8) & 0xff;
1503 }
1504 if (err == -ENODEV) {
1505 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1506 version);
1507 goto err_algs;
1508 }
1509
1510 dev->version = version;
1511
1512 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1513 SAHARA_REG_CMD);
1514 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1515 SAHARA_CONTROL_SET_MAXBURST(8) |
1516 SAHARA_CONTROL_RNG_AUTORSD |
1517 SAHARA_CONTROL_ENABLE_INT,
1518 SAHARA_REG_CONTROL);
1519
1520 err = sahara_register_algs(dev);
1521 if (err)
1522 goto err_algs;
1523
1524 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1525
1526 return 0;
1527
1528err_algs:
1529 kthread_stop(dev->kthread);
1530 dev_ptr = NULL;
1531 clk_disable_unprepare(dev->clk_ahb);
1532clk_ipg_disable:
1533 clk_disable_unprepare(dev->clk_ipg);
1534
1535 return err;
1536}
1537
1538static int sahara_remove(struct platform_device *pdev)
1539{
1540 struct sahara_dev *dev = platform_get_drvdata(pdev);
1541
1542 kthread_stop(dev->kthread);
1543
1544 sahara_unregister_algs(dev);
1545
1546 clk_disable_unprepare(dev->clk_ipg);
1547 clk_disable_unprepare(dev->clk_ahb);
1548
1549 dev_ptr = NULL;
1550
1551 return 0;
1552}
1553
1554static struct platform_driver sahara_driver = {
1555 .probe = sahara_probe,
1556 .remove = sahara_remove,
1557 .driver = {
1558 .name = SAHARA_NAME,
1559 .of_match_table = sahara_dt_ids,
1560 },
1561 .id_table = sahara_platform_ids,
1562};
1563
1564module_platform_driver(sahara_driver);
1565
1566MODULE_LICENSE("GPL");
1567MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1568MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1569MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");