Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 *
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 *
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/interrupt.h>
20#include <linux/crypto.h>
21#include <linux/hw_random.h>
22#include <linux/of.h>
23#include <linux/of_irq.h>
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/spinlock.h>
28#include <linux/rtnetlink.h>
29#include <linux/slab.h>
30
31#include <crypto/algapi.h>
32#include <crypto/aes.h>
33#include <crypto/internal/des.h>
34#include <crypto/sha1.h>
35#include <crypto/sha2.h>
36#include <crypto/md5.h>
37#include <crypto/internal/aead.h>
38#include <crypto/authenc.h>
39#include <crypto/internal/skcipher.h>
40#include <crypto/hash.h>
41#include <crypto/internal/hash.h>
42#include <crypto/scatterwalk.h>
43
44#include "talitos.h"
45
46static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
47 unsigned int len, bool is_sec1)
48{
49 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 if (is_sec1) {
51 ptr->len1 = cpu_to_be16(len);
52 } else {
53 ptr->len = cpu_to_be16(len);
54 ptr->eptr = upper_32_bits(dma_addr);
55 }
56}
57
58static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
59 struct talitos_ptr *src_ptr, bool is_sec1)
60{
61 dst_ptr->ptr = src_ptr->ptr;
62 if (is_sec1) {
63 dst_ptr->len1 = src_ptr->len1;
64 } else {
65 dst_ptr->len = src_ptr->len;
66 dst_ptr->eptr = src_ptr->eptr;
67 }
68}
69
70static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
71 bool is_sec1)
72{
73 if (is_sec1)
74 return be16_to_cpu(ptr->len1);
75 else
76 return be16_to_cpu(ptr->len);
77}
78
79static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
80 bool is_sec1)
81{
82 if (!is_sec1)
83 ptr->j_extent = val;
84}
85
86static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
87{
88 if (!is_sec1)
89 ptr->j_extent |= val;
90}
91
92/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void __map_single_talitos_ptr(struct device *dev,
96 struct talitos_ptr *ptr,
97 unsigned int len, void *data,
98 enum dma_data_direction dir,
99 unsigned long attrs)
100{
101 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
102 struct talitos_private *priv = dev_get_drvdata(dev);
103 bool is_sec1 = has_ftr_sec1(priv);
104
105 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
106}
107
108static void map_single_talitos_ptr(struct device *dev,
109 struct talitos_ptr *ptr,
110 unsigned int len, void *data,
111 enum dma_data_direction dir)
112{
113 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
114}
115
116static void map_single_talitos_ptr_nosync(struct device *dev,
117 struct talitos_ptr *ptr,
118 unsigned int len, void *data,
119 enum dma_data_direction dir)
120{
121 __map_single_talitos_ptr(dev, ptr, len, data, dir,
122 DMA_ATTR_SKIP_CPU_SYNC);
123}
124
125/*
126 * unmap bus single (contiguous) h/w descriptor pointer
127 */
128static void unmap_single_talitos_ptr(struct device *dev,
129 struct talitos_ptr *ptr,
130 enum dma_data_direction dir)
131{
132 struct talitos_private *priv = dev_get_drvdata(dev);
133 bool is_sec1 = has_ftr_sec1(priv);
134
135 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
136 from_talitos_ptr_len(ptr, is_sec1), dir);
137}
138
139static int reset_channel(struct device *dev, int ch)
140{
141 struct talitos_private *priv = dev_get_drvdata(dev);
142 unsigned int timeout = TALITOS_TIMEOUT;
143 bool is_sec1 = has_ftr_sec1(priv);
144
145 if (is_sec1) {
146 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
147 TALITOS1_CCCR_LO_RESET);
148
149 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
150 TALITOS1_CCCR_LO_RESET) && --timeout)
151 cpu_relax();
152 } else {
153 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
154 TALITOS2_CCCR_RESET);
155
156 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
157 TALITOS2_CCCR_RESET) && --timeout)
158 cpu_relax();
159 }
160
161 if (timeout == 0) {
162 dev_err(dev, "failed to reset channel %d\n", ch);
163 return -EIO;
164 }
165
166 /* set 36-bit addressing, done writeback enable and done IRQ enable */
167 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
168 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
169 /* enable chaining descriptors */
170 if (is_sec1)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_NE);
173
174 /* and ICCR writeback, if available */
175 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
176 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
177 TALITOS_CCCR_LO_IWSE);
178
179 return 0;
180}
181
182static int reset_device(struct device *dev)
183{
184 struct talitos_private *priv = dev_get_drvdata(dev);
185 unsigned int timeout = TALITOS_TIMEOUT;
186 bool is_sec1 = has_ftr_sec1(priv);
187 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188
189 setbits32(priv->reg + TALITOS_MCR, mcr);
190
191 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
192 && --timeout)
193 cpu_relax();
194
195 if (priv->irq[1]) {
196 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
197 setbits32(priv->reg + TALITOS_MCR, mcr);
198 }
199
200 if (timeout == 0) {
201 dev_err(dev, "failed to reset device\n");
202 return -EIO;
203 }
204
205 return 0;
206}
207
208/*
209 * Reset and initialize the device
210 */
211static int init_device(struct device *dev)
212{
213 struct talitos_private *priv = dev_get_drvdata(dev);
214 int ch, err;
215 bool is_sec1 = has_ftr_sec1(priv);
216
217 /*
218 * Master reset
219 * errata documentation: warning: certain SEC interrupts
220 * are not fully cleared by writing the MCR:SWR bit,
221 * set bit twice to completely reset
222 */
223 err = reset_device(dev);
224 if (err)
225 return err;
226
227 err = reset_device(dev);
228 if (err)
229 return err;
230
231 /* reset channels */
232 for (ch = 0; ch < priv->num_channels; ch++) {
233 err = reset_channel(dev, ch);
234 if (err)
235 return err;
236 }
237
238 /* enable channel done and error interrupts */
239 if (is_sec1) {
240 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
241 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
242 /* disable parity error check in DEU (erroneous? test vect.) */
243 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 } else {
245 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
246 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
247 }
248
249 /* disable integrity check error interrupts (use writeback instead) */
250 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
251 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
252 TALITOS_MDEUICR_LO_ICE);
253
254 return 0;
255}
256
257/**
258 * talitos_submit - submits a descriptor to the device for processing
259 * @dev: the SEC device to be used
260 * @ch: the SEC device channel to be used
261 * @desc: the descriptor to be processed by the device
262 * @callback: whom to call when processing is complete
263 * @context: a handle for use by caller (optional)
264 *
265 * desc must contain valid dma-mapped (bus physical) address pointers.
266 * callback must check err and feedback in descriptor header
267 * for device processing status.
268 */
269static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
270 void (*callback)(struct device *dev,
271 struct talitos_desc *desc,
272 void *context, int error),
273 void *context)
274{
275 struct talitos_private *priv = dev_get_drvdata(dev);
276 struct talitos_request *request;
277 unsigned long flags;
278 int head;
279 bool is_sec1 = has_ftr_sec1(priv);
280
281 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282
283 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
284 /* h/w fifo is full */
285 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
286 return -EAGAIN;
287 }
288
289 head = priv->chan[ch].head;
290 request = &priv->chan[ch].fifo[head];
291
292 /* map descriptor and save caller data */
293 if (is_sec1) {
294 desc->hdr1 = desc->hdr;
295 request->dma_desc = dma_map_single(dev, &desc->hdr1,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
298 } else {
299 request->dma_desc = dma_map_single(dev, desc,
300 TALITOS_DESC_SIZE,
301 DMA_BIDIRECTIONAL);
302 }
303 request->callback = callback;
304 request->context = context;
305
306 /* increment fifo head */
307 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
308
309 smp_wmb();
310 request->desc = desc;
311
312 /* GO! */
313 wmb();
314 out_be32(priv->chan[ch].reg + TALITOS_FF,
315 upper_32_bits(request->dma_desc));
316 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
317 lower_32_bits(request->dma_desc));
318
319 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
320
321 return -EINPROGRESS;
322}
323
324static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
325{
326 struct talitos_edesc *edesc;
327
328 if (!is_sec1)
329 return request->desc->hdr;
330
331 if (!request->desc->next_desc)
332 return request->desc->hdr1;
333
334 edesc = container_of(request->desc, struct talitos_edesc, desc);
335
336 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
337}
338
339/*
340 * process what was done, notify callback of error if not
341 */
342static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
343{
344 struct talitos_private *priv = dev_get_drvdata(dev);
345 struct talitos_request *request, saved_req;
346 unsigned long flags;
347 int tail, status;
348 bool is_sec1 = has_ftr_sec1(priv);
349
350 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
351
352 tail = priv->chan[ch].tail;
353 while (priv->chan[ch].fifo[tail].desc) {
354 __be32 hdr;
355
356 request = &priv->chan[ch].fifo[tail];
357
358 /* descriptors with their done bits set don't get the error */
359 rmb();
360 hdr = get_request_hdr(request, is_sec1);
361
362 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
363 status = 0;
364 else
365 if (!error)
366 break;
367 else
368 status = error;
369
370 dma_unmap_single(dev, request->dma_desc,
371 TALITOS_DESC_SIZE,
372 DMA_BIDIRECTIONAL);
373
374 /* copy entries so we can call callback outside lock */
375 saved_req.desc = request->desc;
376 saved_req.callback = request->callback;
377 saved_req.context = request->context;
378
379 /* release request entry in fifo */
380 smp_wmb();
381 request->desc = NULL;
382
383 /* increment fifo tail */
384 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
385
386 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
387
388 atomic_dec(&priv->chan[ch].submit_count);
389
390 saved_req.callback(dev, saved_req.desc, saved_req.context,
391 status);
392 /* channel may resume processing in single desc error case */
393 if (error && !reset_ch && status == error)
394 return;
395 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
396 tail = priv->chan[ch].tail;
397 }
398
399 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
400}
401
402/*
403 * process completed requests for channels that have done status
404 */
405#define DEF_TALITOS1_DONE(name, ch_done_mask) \
406static void talitos1_done_##name(unsigned long data) \
407{ \
408 struct device *dev = (struct device *)data; \
409 struct talitos_private *priv = dev_get_drvdata(dev); \
410 unsigned long flags; \
411 \
412 if (ch_done_mask & 0x10000000) \
413 flush_channel(dev, 0, 0, 0); \
414 if (ch_done_mask & 0x40000000) \
415 flush_channel(dev, 1, 0, 0); \
416 if (ch_done_mask & 0x00010000) \
417 flush_channel(dev, 2, 0, 0); \
418 if (ch_done_mask & 0x00040000) \
419 flush_channel(dev, 3, 0, 0); \
420 \
421 /* At this point, all completed channels have been processed */ \
422 /* Unmask done interrupts for channels completed later on. */ \
423 spin_lock_irqsave(&priv->reg_lock, flags); \
424 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
425 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
426 spin_unlock_irqrestore(&priv->reg_lock, flags); \
427}
428
429DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
430DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
431
432#define DEF_TALITOS2_DONE(name, ch_done_mask) \
433static void talitos2_done_##name(unsigned long data) \
434{ \
435 struct device *dev = (struct device *)data; \
436 struct talitos_private *priv = dev_get_drvdata(dev); \
437 unsigned long flags; \
438 \
439 if (ch_done_mask & 1) \
440 flush_channel(dev, 0, 0, 0); \
441 if (ch_done_mask & (1 << 2)) \
442 flush_channel(dev, 1, 0, 0); \
443 if (ch_done_mask & (1 << 4)) \
444 flush_channel(dev, 2, 0, 0); \
445 if (ch_done_mask & (1 << 6)) \
446 flush_channel(dev, 3, 0, 0); \
447 \
448 /* At this point, all completed channels have been processed */ \
449 /* Unmask done interrupts for channels completed later on. */ \
450 spin_lock_irqsave(&priv->reg_lock, flags); \
451 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
452 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
453 spin_unlock_irqrestore(&priv->reg_lock, flags); \
454}
455
456DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
457DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
458DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
459DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
460
461/*
462 * locate current (offending) descriptor
463 */
464static __be32 current_desc_hdr(struct device *dev, int ch)
465{
466 struct talitos_private *priv = dev_get_drvdata(dev);
467 int tail, iter;
468 dma_addr_t cur_desc;
469
470 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
471 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
472
473 if (!cur_desc) {
474 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
475 return 0;
476 }
477
478 tail = priv->chan[ch].tail;
479
480 iter = tail;
481 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
482 priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
483 iter = (iter + 1) & (priv->fifo_len - 1);
484 if (iter == tail) {
485 dev_err(dev, "couldn't locate current descriptor\n");
486 return 0;
487 }
488 }
489
490 if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
491 struct talitos_edesc *edesc;
492
493 edesc = container_of(priv->chan[ch].fifo[iter].desc,
494 struct talitos_edesc, desc);
495 return ((struct talitos_desc *)
496 (edesc->buf + edesc->dma_len))->hdr;
497 }
498
499 return priv->chan[ch].fifo[iter].desc->hdr;
500}
501
502/*
503 * user diagnostics; report root cause of error based on execution unit status
504 */
505static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
506{
507 struct talitos_private *priv = dev_get_drvdata(dev);
508 int i;
509
510 if (!desc_hdr)
511 desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
512
513 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
514 case DESC_HDR_SEL0_AFEU:
515 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_afeu + TALITOS_EUISR),
517 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
518 break;
519 case DESC_HDR_SEL0_DEU:
520 dev_err(dev, "DEUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_deu + TALITOS_EUISR),
522 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
523 break;
524 case DESC_HDR_SEL0_MDEUA:
525 case DESC_HDR_SEL0_MDEUB:
526 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
527 in_be32(priv->reg_mdeu + TALITOS_EUISR),
528 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
529 break;
530 case DESC_HDR_SEL0_RNG:
531 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
532 in_be32(priv->reg_rngu + TALITOS_ISR),
533 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
534 break;
535 case DESC_HDR_SEL0_PKEU:
536 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_pkeu + TALITOS_EUISR),
538 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
539 break;
540 case DESC_HDR_SEL0_AESU:
541 dev_err(dev, "AESUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_aesu + TALITOS_EUISR),
543 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
544 break;
545 case DESC_HDR_SEL0_CRCU:
546 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
547 in_be32(priv->reg_crcu + TALITOS_EUISR),
548 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
549 break;
550 case DESC_HDR_SEL0_KEU:
551 dev_err(dev, "KEUISR 0x%08x_%08x\n",
552 in_be32(priv->reg_pkeu + TALITOS_EUISR),
553 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
554 break;
555 }
556
557 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
558 case DESC_HDR_SEL1_MDEUA:
559 case DESC_HDR_SEL1_MDEUB:
560 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
561 in_be32(priv->reg_mdeu + TALITOS_EUISR),
562 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
563 break;
564 case DESC_HDR_SEL1_CRCU:
565 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
566 in_be32(priv->reg_crcu + TALITOS_EUISR),
567 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
568 break;
569 }
570
571 for (i = 0; i < 8; i++)
572 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
574 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
575}
576
577/*
578 * recover from error interrupts
579 */
580static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
581{
582 struct talitos_private *priv = dev_get_drvdata(dev);
583 unsigned int timeout = TALITOS_TIMEOUT;
584 int ch, error, reset_dev = 0;
585 u32 v_lo;
586 bool is_sec1 = has_ftr_sec1(priv);
587 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
588
589 for (ch = 0; ch < priv->num_channels; ch++) {
590 /* skip channels without errors */
591 if (is_sec1) {
592 /* bits 29, 31, 17, 19 */
593 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
594 continue;
595 } else {
596 if (!(isr & (1 << (ch * 2 + 1))))
597 continue;
598 }
599
600 error = -EINVAL;
601
602 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
603
604 if (v_lo & TALITOS_CCPSR_LO_DOF) {
605 dev_err(dev, "double fetch fifo overflow error\n");
606 error = -EAGAIN;
607 reset_ch = 1;
608 }
609 if (v_lo & TALITOS_CCPSR_LO_SOF) {
610 /* h/w dropped descriptor */
611 dev_err(dev, "single fetch fifo overflow error\n");
612 error = -EAGAIN;
613 }
614 if (v_lo & TALITOS_CCPSR_LO_MDTE)
615 dev_err(dev, "master data transfer error\n");
616 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
617 dev_err(dev, is_sec1 ? "pointer not complete error\n"
618 : "s/g data length zero error\n");
619 if (v_lo & TALITOS_CCPSR_LO_FPZ)
620 dev_err(dev, is_sec1 ? "parity error\n"
621 : "fetch pointer zero error\n");
622 if (v_lo & TALITOS_CCPSR_LO_IDH)
623 dev_err(dev, "illegal descriptor header error\n");
624 if (v_lo & TALITOS_CCPSR_LO_IEU)
625 dev_err(dev, is_sec1 ? "static assignment error\n"
626 : "invalid exec unit error\n");
627 if (v_lo & TALITOS_CCPSR_LO_EU)
628 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
629 if (!is_sec1) {
630 if (v_lo & TALITOS_CCPSR_LO_GB)
631 dev_err(dev, "gather boundary error\n");
632 if (v_lo & TALITOS_CCPSR_LO_GRL)
633 dev_err(dev, "gather return/length error\n");
634 if (v_lo & TALITOS_CCPSR_LO_SB)
635 dev_err(dev, "scatter boundary error\n");
636 if (v_lo & TALITOS_CCPSR_LO_SRL)
637 dev_err(dev, "scatter return/length error\n");
638 }
639
640 flush_channel(dev, ch, error, reset_ch);
641
642 if (reset_ch) {
643 reset_channel(dev, ch);
644 } else {
645 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
646 TALITOS2_CCCR_CONT);
647 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
648 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
649 TALITOS2_CCCR_CONT) && --timeout)
650 cpu_relax();
651 if (timeout == 0) {
652 dev_err(dev, "failed to restart channel %d\n",
653 ch);
654 reset_dev = 1;
655 }
656 }
657 }
658 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
659 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
660 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
661 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
662 isr, isr_lo);
663 else
664 dev_err(dev, "done overflow, internal time out, or "
665 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
666
667 /* purge request queues */
668 for (ch = 0; ch < priv->num_channels; ch++)
669 flush_channel(dev, ch, -EIO, 1);
670
671 /* reset and reinitialize the device */
672 init_device(dev);
673 }
674}
675
676#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
677static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
678{ \
679 struct device *dev = data; \
680 struct talitos_private *priv = dev_get_drvdata(dev); \
681 u32 isr, isr_lo; \
682 unsigned long flags; \
683 \
684 spin_lock_irqsave(&priv->reg_lock, flags); \
685 isr = in_be32(priv->reg + TALITOS_ISR); \
686 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
687 /* Acknowledge interrupt */ \
688 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
689 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
690 \
691 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
692 spin_unlock_irqrestore(&priv->reg_lock, flags); \
693 talitos_error(dev, isr & ch_err_mask, isr_lo); \
694 } \
695 else { \
696 if (likely(isr & ch_done_mask)) { \
697 /* mask further done interrupts. */ \
698 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
699 /* done_task will unmask done interrupts at exit */ \
700 tasklet_schedule(&priv->done_task[tlet]); \
701 } \
702 spin_unlock_irqrestore(&priv->reg_lock, flags); \
703 } \
704 \
705 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
706 IRQ_NONE; \
707}
708
709DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
710
711#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
712static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
713{ \
714 struct device *dev = data; \
715 struct talitos_private *priv = dev_get_drvdata(dev); \
716 u32 isr, isr_lo; \
717 unsigned long flags; \
718 \
719 spin_lock_irqsave(&priv->reg_lock, flags); \
720 isr = in_be32(priv->reg + TALITOS_ISR); \
721 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
722 /* Acknowledge interrupt */ \
723 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
724 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
725 \
726 if (unlikely(isr & ch_err_mask || isr_lo)) { \
727 spin_unlock_irqrestore(&priv->reg_lock, flags); \
728 talitos_error(dev, isr & ch_err_mask, isr_lo); \
729 } \
730 else { \
731 if (likely(isr & ch_done_mask)) { \
732 /* mask further done interrupts. */ \
733 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
734 /* done_task will unmask done interrupts at exit */ \
735 tasklet_schedule(&priv->done_task[tlet]); \
736 } \
737 spin_unlock_irqrestore(&priv->reg_lock, flags); \
738 } \
739 \
740 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
741 IRQ_NONE; \
742}
743
744DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
745DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
746 0)
747DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
748 1)
749
750/*
751 * hwrng
752 */
753static int talitos_rng_data_present(struct hwrng *rng, int wait)
754{
755 struct device *dev = (struct device *)rng->priv;
756 struct talitos_private *priv = dev_get_drvdata(dev);
757 u32 ofl;
758 int i;
759
760 for (i = 0; i < 20; i++) {
761 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
762 TALITOS_RNGUSR_LO_OFL;
763 if (ofl || !wait)
764 break;
765 udelay(10);
766 }
767
768 return !!ofl;
769}
770
771static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
772{
773 struct device *dev = (struct device *)rng->priv;
774 struct talitos_private *priv = dev_get_drvdata(dev);
775
776 /* rng fifo requires 64-bit accesses */
777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
778 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
779
780 return sizeof(u32);
781}
782
783static int talitos_rng_init(struct hwrng *rng)
784{
785 struct device *dev = (struct device *)rng->priv;
786 struct talitos_private *priv = dev_get_drvdata(dev);
787 unsigned int timeout = TALITOS_TIMEOUT;
788
789 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
790 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
791 & TALITOS_RNGUSR_LO_RD)
792 && --timeout)
793 cpu_relax();
794 if (timeout == 0) {
795 dev_err(dev, "failed to reset rng hw\n");
796 return -ENODEV;
797 }
798
799 /* start generating */
800 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
801
802 return 0;
803}
804
805static int talitos_register_rng(struct device *dev)
806{
807 struct talitos_private *priv = dev_get_drvdata(dev);
808 int err;
809
810 priv->rng.name = dev_driver_string(dev);
811 priv->rng.init = talitos_rng_init;
812 priv->rng.data_present = talitos_rng_data_present;
813 priv->rng.data_read = talitos_rng_data_read;
814 priv->rng.priv = (unsigned long)dev;
815
816 err = hwrng_register(&priv->rng);
817 if (!err)
818 priv->rng_registered = true;
819
820 return err;
821}
822
823static void talitos_unregister_rng(struct device *dev)
824{
825 struct talitos_private *priv = dev_get_drvdata(dev);
826
827 if (!priv->rng_registered)
828 return;
829
830 hwrng_unregister(&priv->rng);
831 priv->rng_registered = false;
832}
833
834/*
835 * crypto alg
836 */
837#define TALITOS_CRA_PRIORITY 3000
838/*
839 * Defines a priority for doing AEAD with descriptors type
840 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
841 */
842#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
843#ifdef CONFIG_CRYPTO_DEV_TALITOS2
844#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
845#else
846#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
847#endif
848#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
849
850struct talitos_ctx {
851 struct device *dev;
852 int ch;
853 __be32 desc_hdr_template;
854 u8 key[TALITOS_MAX_KEY_SIZE];
855 u8 iv[TALITOS_MAX_IV_LENGTH];
856 dma_addr_t dma_key;
857 unsigned int keylen;
858 unsigned int enckeylen;
859 unsigned int authkeylen;
860};
861
862#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
863#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
864
865struct talitos_ahash_req_ctx {
866 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
867 unsigned int hw_context_size;
868 u8 buf[2][HASH_MAX_BLOCK_SIZE];
869 int buf_idx;
870 unsigned int swinit;
871 unsigned int first;
872 unsigned int last;
873 unsigned int to_hash_later;
874 unsigned int nbuf;
875 struct scatterlist bufsl[2];
876 struct scatterlist *psrc;
877};
878
879struct talitos_export_state {
880 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
881 u8 buf[HASH_MAX_BLOCK_SIZE];
882 unsigned int swinit;
883 unsigned int first;
884 unsigned int last;
885 unsigned int to_hash_later;
886 unsigned int nbuf;
887};
888
889static int aead_setkey(struct crypto_aead *authenc,
890 const u8 *key, unsigned int keylen)
891{
892 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
893 struct device *dev = ctx->dev;
894 struct crypto_authenc_keys keys;
895
896 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
897 goto badkey;
898
899 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
900 goto badkey;
901
902 if (ctx->keylen)
903 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
904
905 memcpy(ctx->key, keys.authkey, keys.authkeylen);
906 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
907
908 ctx->keylen = keys.authkeylen + keys.enckeylen;
909 ctx->enckeylen = keys.enckeylen;
910 ctx->authkeylen = keys.authkeylen;
911 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
912 DMA_TO_DEVICE);
913
914 memzero_explicit(&keys, sizeof(keys));
915 return 0;
916
917badkey:
918 memzero_explicit(&keys, sizeof(keys));
919 return -EINVAL;
920}
921
922static int aead_des3_setkey(struct crypto_aead *authenc,
923 const u8 *key, unsigned int keylen)
924{
925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 struct device *dev = ctx->dev;
927 struct crypto_authenc_keys keys;
928 int err;
929
930 err = crypto_authenc_extractkeys(&keys, key, keylen);
931 if (unlikely(err))
932 goto out;
933
934 err = -EINVAL;
935 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
936 goto out;
937
938 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
939 if (err)
940 goto out;
941
942 if (ctx->keylen)
943 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
944
945 memcpy(ctx->key, keys.authkey, keys.authkeylen);
946 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
947
948 ctx->keylen = keys.authkeylen + keys.enckeylen;
949 ctx->enckeylen = keys.enckeylen;
950 ctx->authkeylen = keys.authkeylen;
951 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
952 DMA_TO_DEVICE);
953
954out:
955 memzero_explicit(&keys, sizeof(keys));
956 return err;
957}
958
959static void talitos_sg_unmap(struct device *dev,
960 struct talitos_edesc *edesc,
961 struct scatterlist *src,
962 struct scatterlist *dst,
963 unsigned int len, unsigned int offset)
964{
965 struct talitos_private *priv = dev_get_drvdata(dev);
966 bool is_sec1 = has_ftr_sec1(priv);
967 unsigned int src_nents = edesc->src_nents ? : 1;
968 unsigned int dst_nents = edesc->dst_nents ? : 1;
969
970 if (is_sec1 && dst && dst_nents > 1) {
971 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
972 len, DMA_FROM_DEVICE);
973 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
974 offset);
975 }
976 if (src != dst) {
977 if (src_nents == 1 || !is_sec1)
978 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
979
980 if (dst && (dst_nents == 1 || !is_sec1))
981 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
982 } else if (src_nents == 1 || !is_sec1) {
983 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
984 }
985}
986
987static void ipsec_esp_unmap(struct device *dev,
988 struct talitos_edesc *edesc,
989 struct aead_request *areq, bool encrypt)
990{
991 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
992 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
993 unsigned int ivsize = crypto_aead_ivsize(aead);
994 unsigned int authsize = crypto_aead_authsize(aead);
995 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
996 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
997 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
998
999 if (is_ipsec_esp)
1000 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1001 DMA_FROM_DEVICE);
1002 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1003
1004 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1005 cryptlen + authsize, areq->assoclen);
1006
1007 if (edesc->dma_len)
1008 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1009 DMA_BIDIRECTIONAL);
1010
1011 if (!is_ipsec_esp) {
1012 unsigned int dst_nents = edesc->dst_nents ? : 1;
1013
1014 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1015 areq->assoclen + cryptlen - ivsize);
1016 }
1017}
1018
1019/*
1020 * ipsec_esp descriptor callbacks
1021 */
1022static void ipsec_esp_encrypt_done(struct device *dev,
1023 struct talitos_desc *desc, void *context,
1024 int err)
1025{
1026 struct aead_request *areq = context;
1027 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1028 unsigned int ivsize = crypto_aead_ivsize(authenc);
1029 struct talitos_edesc *edesc;
1030
1031 edesc = container_of(desc, struct talitos_edesc, desc);
1032
1033 ipsec_esp_unmap(dev, edesc, areq, true);
1034
1035 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1036
1037 kfree(edesc);
1038
1039 aead_request_complete(areq, err);
1040}
1041
1042static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1043 struct talitos_desc *desc,
1044 void *context, int err)
1045{
1046 struct aead_request *req = context;
1047 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1048 unsigned int authsize = crypto_aead_authsize(authenc);
1049 struct talitos_edesc *edesc;
1050 char *oicv, *icv;
1051
1052 edesc = container_of(desc, struct talitos_edesc, desc);
1053
1054 ipsec_esp_unmap(dev, edesc, req, false);
1055
1056 if (!err) {
1057 /* auth check */
1058 oicv = edesc->buf + edesc->dma_len;
1059 icv = oicv - authsize;
1060
1061 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062 }
1063
1064 kfree(edesc);
1065
1066 aead_request_complete(req, err);
1067}
1068
1069static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070 struct talitos_desc *desc,
1071 void *context, int err)
1072{
1073 struct aead_request *req = context;
1074 struct talitos_edesc *edesc;
1075
1076 edesc = container_of(desc, struct talitos_edesc, desc);
1077
1078 ipsec_esp_unmap(dev, edesc, req, false);
1079
1080 /* check ICV auth status */
1081 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082 DESC_HDR_LO_ICCR1_PASS))
1083 err = -EBADMSG;
1084
1085 kfree(edesc);
1086
1087 aead_request_complete(req, err);
1088}
1089
1090/*
1091 * convert scatterlist to SEC h/w link table format
1092 * stop at cryptlen bytes
1093 */
1094static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095 unsigned int offset, int datalen, int elen,
1096 struct talitos_ptr *link_tbl_ptr, int align)
1097{
1098 int n_sg = elen ? sg_count + 1 : sg_count;
1099 int count = 0;
1100 int cryptlen = datalen + elen;
1101 int padding = ALIGN(cryptlen, align) - cryptlen;
1102
1103 while (cryptlen && sg && n_sg--) {
1104 unsigned int len = sg_dma_len(sg);
1105
1106 if (offset >= len) {
1107 offset -= len;
1108 goto next;
1109 }
1110
1111 len -= offset;
1112
1113 if (len > cryptlen)
1114 len = cryptlen;
1115
1116 if (datalen > 0 && len > datalen) {
1117 to_talitos_ptr(link_tbl_ptr + count,
1118 sg_dma_address(sg) + offset, datalen, 0);
1119 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1120 count++;
1121 len -= datalen;
1122 offset += datalen;
1123 }
1124 to_talitos_ptr(link_tbl_ptr + count,
1125 sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1126 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1127 count++;
1128 cryptlen -= len;
1129 datalen -= len;
1130 offset = 0;
1131
1132next:
1133 sg = sg_next(sg);
1134 }
1135
1136 /* tag end of link table */
1137 if (count > 0)
1138 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1139 DESC_PTR_LNKTBL_RET, 0);
1140
1141 return count;
1142}
1143
1144static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1145 unsigned int len, struct talitos_edesc *edesc,
1146 struct talitos_ptr *ptr, int sg_count,
1147 unsigned int offset, int tbl_off, int elen,
1148 bool force, int align)
1149{
1150 struct talitos_private *priv = dev_get_drvdata(dev);
1151 bool is_sec1 = has_ftr_sec1(priv);
1152 int aligned_len = ALIGN(len, align);
1153
1154 if (!src) {
1155 to_talitos_ptr(ptr, 0, 0, is_sec1);
1156 return 1;
1157 }
1158 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1159 if (sg_count == 1 && !force) {
1160 to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1161 return sg_count;
1162 }
1163 if (is_sec1) {
1164 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1165 return sg_count;
1166 }
1167 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1168 &edesc->link_tbl[tbl_off], align);
1169 if (sg_count == 1 && !force) {
1170 /* Only one segment now, so no link tbl needed*/
1171 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1172 return sg_count;
1173 }
1174 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1175 tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1176 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1177
1178 return sg_count;
1179}
1180
1181static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1182 unsigned int len, struct talitos_edesc *edesc,
1183 struct talitos_ptr *ptr, int sg_count,
1184 unsigned int offset, int tbl_off)
1185{
1186 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1187 tbl_off, 0, false, 1);
1188}
1189
1190/*
1191 * fill in and submit ipsec_esp descriptor
1192 */
1193static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1194 bool encrypt,
1195 void (*callback)(struct device *dev,
1196 struct talitos_desc *desc,
1197 void *context, int error))
1198{
1199 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1200 unsigned int authsize = crypto_aead_authsize(aead);
1201 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1202 struct device *dev = ctx->dev;
1203 struct talitos_desc *desc = &edesc->desc;
1204 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1205 unsigned int ivsize = crypto_aead_ivsize(aead);
1206 int tbl_off = 0;
1207 int sg_count, ret;
1208 int elen = 0;
1209 bool sync_needed = false;
1210 struct talitos_private *priv = dev_get_drvdata(dev);
1211 bool is_sec1 = has_ftr_sec1(priv);
1212 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1213 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1214 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1215 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1216
1217 /* hmac key */
1218 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1219
1220 sg_count = edesc->src_nents ?: 1;
1221 if (is_sec1 && sg_count > 1)
1222 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1223 areq->assoclen + cryptlen);
1224 else
1225 sg_count = dma_map_sg(dev, areq->src, sg_count,
1226 (areq->src == areq->dst) ?
1227 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1228
1229 /* hmac data */
1230 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1231 &desc->ptr[1], sg_count, 0, tbl_off);
1232
1233 if (ret > 1) {
1234 tbl_off += ret;
1235 sync_needed = true;
1236 }
1237
1238 /* cipher iv */
1239 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1240
1241 /* cipher key */
1242 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1243 ctx->enckeylen, is_sec1);
1244
1245 /*
1246 * cipher in
1247 * map and adjust cipher len to aead request cryptlen.
1248 * extent is bytes of HMAC postpended to ciphertext,
1249 * typically 12 for ipsec
1250 */
1251 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1252 elen = authsize;
1253
1254 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1255 sg_count, areq->assoclen, tbl_off, elen,
1256 false, 1);
1257
1258 if (ret > 1) {
1259 tbl_off += ret;
1260 sync_needed = true;
1261 }
1262
1263 /* cipher out */
1264 if (areq->src != areq->dst) {
1265 sg_count = edesc->dst_nents ? : 1;
1266 if (!is_sec1 || sg_count == 1)
1267 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1268 }
1269
1270 if (is_ipsec_esp && encrypt)
1271 elen = authsize;
1272 else
1273 elen = 0;
1274 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1275 sg_count, areq->assoclen, tbl_off, elen,
1276 is_ipsec_esp && !encrypt, 1);
1277 tbl_off += ret;
1278
1279 if (!encrypt && is_ipsec_esp) {
1280 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1281
1282 /* Add an entry to the link table for ICV data */
1283 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1284 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1285
1286 /* icv data follows link tables */
1287 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1288 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1289 sync_needed = true;
1290 } else if (!encrypt) {
1291 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1292 sync_needed = true;
1293 } else if (!is_ipsec_esp) {
1294 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1295 sg_count, areq->assoclen + cryptlen, tbl_off);
1296 }
1297
1298 /* iv out */
1299 if (is_ipsec_esp)
1300 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1301 DMA_FROM_DEVICE);
1302
1303 if (sync_needed)
1304 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1305 edesc->dma_len,
1306 DMA_BIDIRECTIONAL);
1307
1308 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1309 if (ret != -EINPROGRESS) {
1310 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1311 kfree(edesc);
1312 }
1313 return ret;
1314}
1315
1316/*
1317 * allocate and map the extended descriptor
1318 */
1319static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1320 struct scatterlist *src,
1321 struct scatterlist *dst,
1322 u8 *iv,
1323 unsigned int assoclen,
1324 unsigned int cryptlen,
1325 unsigned int authsize,
1326 unsigned int ivsize,
1327 int icv_stashing,
1328 u32 cryptoflags,
1329 bool encrypt)
1330{
1331 struct talitos_edesc *edesc;
1332 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1333 dma_addr_t iv_dma = 0;
1334 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1335 GFP_ATOMIC;
1336 struct talitos_private *priv = dev_get_drvdata(dev);
1337 bool is_sec1 = has_ftr_sec1(priv);
1338 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1339
1340 if (cryptlen + authsize > max_len) {
1341 dev_err(dev, "length exceeds h/w max limit\n");
1342 return ERR_PTR(-EINVAL);
1343 }
1344
1345 if (!dst || dst == src) {
1346 src_len = assoclen + cryptlen + authsize;
1347 src_nents = sg_nents_for_len(src, src_len);
1348 if (src_nents < 0) {
1349 dev_err(dev, "Invalid number of src SG.\n");
1350 return ERR_PTR(-EINVAL);
1351 }
1352 src_nents = (src_nents == 1) ? 0 : src_nents;
1353 dst_nents = dst ? src_nents : 0;
1354 dst_len = 0;
1355 } else { /* dst && dst != src*/
1356 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1357 src_nents = sg_nents_for_len(src, src_len);
1358 if (src_nents < 0) {
1359 dev_err(dev, "Invalid number of src SG.\n");
1360 return ERR_PTR(-EINVAL);
1361 }
1362 src_nents = (src_nents == 1) ? 0 : src_nents;
1363 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1364 dst_nents = sg_nents_for_len(dst, dst_len);
1365 if (dst_nents < 0) {
1366 dev_err(dev, "Invalid number of dst SG.\n");
1367 return ERR_PTR(-EINVAL);
1368 }
1369 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1370 }
1371
1372 /*
1373 * allocate space for base edesc plus the link tables,
1374 * allowing for two separate entries for AD and generated ICV (+ 2),
1375 * and space for two sets of ICVs (stashed and generated)
1376 */
1377 alloc_len = sizeof(struct talitos_edesc);
1378 if (src_nents || dst_nents || !encrypt) {
1379 if (is_sec1)
1380 dma_len = (src_nents ? src_len : 0) +
1381 (dst_nents ? dst_len : 0) + authsize;
1382 else
1383 dma_len = (src_nents + dst_nents + 2) *
1384 sizeof(struct talitos_ptr) + authsize;
1385 alloc_len += dma_len;
1386 } else {
1387 dma_len = 0;
1388 }
1389 alloc_len += icv_stashing ? authsize : 0;
1390
1391 /* if its a ahash, add space for a second desc next to the first one */
1392 if (is_sec1 && !dst)
1393 alloc_len += sizeof(struct talitos_desc);
1394 alloc_len += ivsize;
1395
1396 edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
1397 if (!edesc)
1398 return ERR_PTR(-ENOMEM);
1399 if (ivsize) {
1400 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1401 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1402 }
1403 memset(&edesc->desc, 0, sizeof(edesc->desc));
1404
1405 edesc->src_nents = src_nents;
1406 edesc->dst_nents = dst_nents;
1407 edesc->iv_dma = iv_dma;
1408 edesc->dma_len = dma_len;
1409 if (dma_len)
1410 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1411 edesc->dma_len,
1412 DMA_BIDIRECTIONAL);
1413
1414 return edesc;
1415}
1416
1417static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1418 int icv_stashing, bool encrypt)
1419{
1420 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1421 unsigned int authsize = crypto_aead_authsize(authenc);
1422 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1423 unsigned int ivsize = crypto_aead_ivsize(authenc);
1424 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1425
1426 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1427 iv, areq->assoclen, cryptlen,
1428 authsize, ivsize, icv_stashing,
1429 areq->base.flags, encrypt);
1430}
1431
1432static int aead_encrypt(struct aead_request *req)
1433{
1434 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1436 struct talitos_edesc *edesc;
1437
1438 /* allocate extended descriptor */
1439 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1440 if (IS_ERR(edesc))
1441 return PTR_ERR(edesc);
1442
1443 /* set encrypt */
1444 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1445
1446 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1447}
1448
1449static int aead_decrypt(struct aead_request *req)
1450{
1451 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452 unsigned int authsize = crypto_aead_authsize(authenc);
1453 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1454 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1455 struct talitos_edesc *edesc;
1456 void *icvdata;
1457
1458 /* allocate extended descriptor */
1459 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1460 if (IS_ERR(edesc))
1461 return PTR_ERR(edesc);
1462
1463 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465 ((!edesc->src_nents && !edesc->dst_nents) ||
1466 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1467
1468 /* decrypt and check the ICV */
1469 edesc->desc.hdr = ctx->desc_hdr_template |
1470 DESC_HDR_DIR_INBOUND |
1471 DESC_HDR_MODE1_MDEU_CICV;
1472
1473 /* reset integrity check result bits */
1474
1475 return ipsec_esp(edesc, req, false,
1476 ipsec_esp_decrypt_hwauth_done);
1477 }
1478
1479 /* Have to check the ICV with software */
1480 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481
1482 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1483 icvdata = edesc->buf + edesc->dma_len;
1484
1485 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486 req->assoclen + req->cryptlen - authsize);
1487
1488 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1489}
1490
1491static int skcipher_setkey(struct crypto_skcipher *cipher,
1492 const u8 *key, unsigned int keylen)
1493{
1494 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1495 struct device *dev = ctx->dev;
1496
1497 if (ctx->keylen)
1498 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1499
1500 memcpy(&ctx->key, key, keylen);
1501 ctx->keylen = keylen;
1502
1503 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1504
1505 return 0;
1506}
1507
1508static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1509 const u8 *key, unsigned int keylen)
1510{
1511 return verify_skcipher_des_key(cipher, key) ?:
1512 skcipher_setkey(cipher, key, keylen);
1513}
1514
1515static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1516 const u8 *key, unsigned int keylen)
1517{
1518 return verify_skcipher_des3_key(cipher, key) ?:
1519 skcipher_setkey(cipher, key, keylen);
1520}
1521
1522static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1523 const u8 *key, unsigned int keylen)
1524{
1525 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1526 keylen == AES_KEYSIZE_256)
1527 return skcipher_setkey(cipher, key, keylen);
1528
1529 return -EINVAL;
1530}
1531
1532static void common_nonsnoop_unmap(struct device *dev,
1533 struct talitos_edesc *edesc,
1534 struct skcipher_request *areq)
1535{
1536 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1537
1538 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1539 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1540
1541 if (edesc->dma_len)
1542 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1543 DMA_BIDIRECTIONAL);
1544}
1545
1546static void skcipher_done(struct device *dev,
1547 struct talitos_desc *desc, void *context,
1548 int err)
1549{
1550 struct skcipher_request *areq = context;
1551 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1552 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1553 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1554 struct talitos_edesc *edesc;
1555
1556 edesc = container_of(desc, struct talitos_edesc, desc);
1557
1558 common_nonsnoop_unmap(dev, edesc, areq);
1559 memcpy(areq->iv, ctx->iv, ivsize);
1560
1561 kfree(edesc);
1562
1563 skcipher_request_complete(areq, err);
1564}
1565
1566static int common_nonsnoop(struct talitos_edesc *edesc,
1567 struct skcipher_request *areq,
1568 void (*callback) (struct device *dev,
1569 struct talitos_desc *desc,
1570 void *context, int error))
1571{
1572 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1573 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1574 struct device *dev = ctx->dev;
1575 struct talitos_desc *desc = &edesc->desc;
1576 unsigned int cryptlen = areq->cryptlen;
1577 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1578 int sg_count, ret;
1579 bool sync_needed = false;
1580 struct talitos_private *priv = dev_get_drvdata(dev);
1581 bool is_sec1 = has_ftr_sec1(priv);
1582 bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1583 (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1584
1585 /* first DWORD empty */
1586
1587 /* cipher iv */
1588 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1589
1590 /* cipher key */
1591 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1592
1593 sg_count = edesc->src_nents ?: 1;
1594 if (is_sec1 && sg_count > 1)
1595 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1596 cryptlen);
1597 else
1598 sg_count = dma_map_sg(dev, areq->src, sg_count,
1599 (areq->src == areq->dst) ?
1600 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1601 /*
1602 * cipher in
1603 */
1604 sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1605 sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1606 if (sg_count > 1)
1607 sync_needed = true;
1608
1609 /* cipher out */
1610 if (areq->src != areq->dst) {
1611 sg_count = edesc->dst_nents ? : 1;
1612 if (!is_sec1 || sg_count == 1)
1613 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1614 }
1615
1616 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1617 sg_count, 0, (edesc->src_nents + 1));
1618 if (ret > 1)
1619 sync_needed = true;
1620
1621 /* iv out */
1622 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1623 DMA_FROM_DEVICE);
1624
1625 /* last DWORD empty */
1626
1627 if (sync_needed)
1628 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1629 edesc->dma_len, DMA_BIDIRECTIONAL);
1630
1631 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1632 if (ret != -EINPROGRESS) {
1633 common_nonsnoop_unmap(dev, edesc, areq);
1634 kfree(edesc);
1635 }
1636 return ret;
1637}
1638
1639static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1640 areq, bool encrypt)
1641{
1642 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1643 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1644 unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1645
1646 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1647 areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1648 areq->base.flags, encrypt);
1649}
1650
1651static int skcipher_encrypt(struct skcipher_request *areq)
1652{
1653 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1654 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1655 struct talitos_edesc *edesc;
1656 unsigned int blocksize =
1657 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1658
1659 if (!areq->cryptlen)
1660 return 0;
1661
1662 if (areq->cryptlen % blocksize)
1663 return -EINVAL;
1664
1665 /* allocate extended descriptor */
1666 edesc = skcipher_edesc_alloc(areq, true);
1667 if (IS_ERR(edesc))
1668 return PTR_ERR(edesc);
1669
1670 /* set encrypt */
1671 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1672
1673 return common_nonsnoop(edesc, areq, skcipher_done);
1674}
1675
1676static int skcipher_decrypt(struct skcipher_request *areq)
1677{
1678 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1679 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1680 struct talitos_edesc *edesc;
1681 unsigned int blocksize =
1682 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1683
1684 if (!areq->cryptlen)
1685 return 0;
1686
1687 if (areq->cryptlen % blocksize)
1688 return -EINVAL;
1689
1690 /* allocate extended descriptor */
1691 edesc = skcipher_edesc_alloc(areq, false);
1692 if (IS_ERR(edesc))
1693 return PTR_ERR(edesc);
1694
1695 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1696
1697 return common_nonsnoop(edesc, areq, skcipher_done);
1698}
1699
1700static void common_nonsnoop_hash_unmap(struct device *dev,
1701 struct talitos_edesc *edesc,
1702 struct ahash_request *areq)
1703{
1704 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1705 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1706 struct talitos_private *priv = dev_get_drvdata(dev);
1707 bool is_sec1 = has_ftr_sec1(priv);
1708 struct talitos_desc *desc = &edesc->desc;
1709 struct talitos_desc *desc2 = (struct talitos_desc *)
1710 (edesc->buf + edesc->dma_len);
1711
1712 unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE);
1713 if (desc->next_desc &&
1714 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1715 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1716 if (req_ctx->last)
1717 memcpy(areq->result, req_ctx->hw_context,
1718 crypto_ahash_digestsize(tfm));
1719
1720 if (req_ctx->psrc)
1721 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1722
1723 /* When using hashctx-in, must unmap it. */
1724 if (from_talitos_ptr_len(&desc->ptr[1], is_sec1))
1725 unmap_single_talitos_ptr(dev, &desc->ptr[1],
1726 DMA_TO_DEVICE);
1727 else if (desc->next_desc)
1728 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1729 DMA_TO_DEVICE);
1730
1731 if (is_sec1 && req_ctx->nbuf)
1732 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1733 DMA_TO_DEVICE);
1734
1735 if (edesc->dma_len)
1736 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1737 DMA_BIDIRECTIONAL);
1738
1739 if (desc->next_desc)
1740 dma_unmap_single(dev, be32_to_cpu(desc->next_desc),
1741 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1742}
1743
1744static void ahash_done(struct device *dev,
1745 struct talitos_desc *desc, void *context,
1746 int err)
1747{
1748 struct ahash_request *areq = context;
1749 struct talitos_edesc *edesc =
1750 container_of(desc, struct talitos_edesc, desc);
1751 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752
1753 if (!req_ctx->last && req_ctx->to_hash_later) {
1754 /* Position any partial block for next update/final/finup */
1755 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1756 req_ctx->nbuf = req_ctx->to_hash_later;
1757 }
1758 common_nonsnoop_hash_unmap(dev, edesc, areq);
1759
1760 kfree(edesc);
1761
1762 ahash_request_complete(areq, err);
1763}
1764
1765/*
1766 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1767 * ourself and submit a padded block
1768 */
1769static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1770 struct talitos_edesc *edesc,
1771 struct talitos_ptr *ptr)
1772{
1773 static u8 padded_hash[64] = {
1774 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 };
1779
1780 pr_err_once("Bug in SEC1, padding ourself\n");
1781 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1782 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1783 (char *)padded_hash, DMA_TO_DEVICE);
1784}
1785
1786static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1787 struct ahash_request *areq, unsigned int length,
1788 void (*callback) (struct device *dev,
1789 struct talitos_desc *desc,
1790 void *context, int error))
1791{
1792 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1793 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1794 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1795 struct device *dev = ctx->dev;
1796 struct talitos_desc *desc = &edesc->desc;
1797 int ret;
1798 bool sync_needed = false;
1799 struct talitos_private *priv = dev_get_drvdata(dev);
1800 bool is_sec1 = has_ftr_sec1(priv);
1801 int sg_count;
1802
1803 /* first DWORD empty */
1804
1805 /* hash context in */
1806 if (!req_ctx->first || req_ctx->swinit) {
1807 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1808 req_ctx->hw_context_size,
1809 req_ctx->hw_context,
1810 DMA_TO_DEVICE);
1811 req_ctx->swinit = 0;
1812 }
1813 /* Indicate next op is not the first. */
1814 req_ctx->first = 0;
1815
1816 /* HMAC key */
1817 if (ctx->keylen)
1818 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1819 is_sec1);
1820
1821 if (is_sec1 && req_ctx->nbuf)
1822 length -= req_ctx->nbuf;
1823
1824 sg_count = edesc->src_nents ?: 1;
1825 if (is_sec1 && sg_count > 1)
1826 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1827 else if (length)
1828 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1829 DMA_TO_DEVICE);
1830 /*
1831 * data in
1832 */
1833 if (is_sec1 && req_ctx->nbuf) {
1834 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1835 req_ctx->buf[req_ctx->buf_idx],
1836 DMA_TO_DEVICE);
1837 } else {
1838 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1839 &desc->ptr[3], sg_count, 0, 0);
1840 if (sg_count > 1)
1841 sync_needed = true;
1842 }
1843
1844 /* fifth DWORD empty */
1845
1846 /* hash/HMAC out -or- hash context out */
1847 if (req_ctx->last)
1848 map_single_talitos_ptr(dev, &desc->ptr[5],
1849 crypto_ahash_digestsize(tfm),
1850 req_ctx->hw_context, DMA_FROM_DEVICE);
1851 else
1852 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1853 req_ctx->hw_context_size,
1854 req_ctx->hw_context,
1855 DMA_FROM_DEVICE);
1856
1857 /* last DWORD empty */
1858
1859 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1860 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1861
1862 if (is_sec1 && req_ctx->nbuf && length) {
1863 struct talitos_desc *desc2 = (struct talitos_desc *)
1864 (edesc->buf + edesc->dma_len);
1865 dma_addr_t next_desc;
1866
1867 memset(desc2, 0, sizeof(*desc2));
1868 desc2->hdr = desc->hdr;
1869 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1870 desc2->hdr1 = desc2->hdr;
1871 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1872 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1873 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1874
1875 if (desc->ptr[1].ptr)
1876 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1877 is_sec1);
1878 else
1879 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1880 req_ctx->hw_context_size,
1881 req_ctx->hw_context,
1882 DMA_TO_DEVICE);
1883 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1884 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1885 &desc2->ptr[3], sg_count, 0, 0);
1886 if (sg_count > 1)
1887 sync_needed = true;
1888 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1889 if (req_ctx->last)
1890 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1891 req_ctx->hw_context_size,
1892 req_ctx->hw_context,
1893 DMA_FROM_DEVICE);
1894
1895 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1896 DMA_BIDIRECTIONAL);
1897 desc->next_desc = cpu_to_be32(next_desc);
1898 }
1899
1900 if (sync_needed)
1901 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1902 edesc->dma_len, DMA_BIDIRECTIONAL);
1903
1904 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1905 if (ret != -EINPROGRESS) {
1906 common_nonsnoop_hash_unmap(dev, edesc, areq);
1907 kfree(edesc);
1908 }
1909 return ret;
1910}
1911
1912static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1913 unsigned int nbytes)
1914{
1915 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1917 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1918 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1919 bool is_sec1 = has_ftr_sec1(priv);
1920
1921 if (is_sec1)
1922 nbytes -= req_ctx->nbuf;
1923
1924 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1925 nbytes, 0, 0, 0, areq->base.flags, false);
1926}
1927
1928static int ahash_init(struct ahash_request *areq)
1929{
1930 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1931 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1932 struct device *dev = ctx->dev;
1933 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1934 unsigned int size;
1935 dma_addr_t dma;
1936
1937 /* Initialize the context */
1938 req_ctx->buf_idx = 0;
1939 req_ctx->nbuf = 0;
1940 req_ctx->first = 1; /* first indicates h/w must init its context */
1941 req_ctx->swinit = 0; /* assume h/w init of context */
1942 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1943 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1944 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1945 req_ctx->hw_context_size = size;
1946
1947 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1948 DMA_TO_DEVICE);
1949 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1950
1951 return 0;
1952}
1953
1954/*
1955 * on h/w without explicit sha224 support, we initialize h/w context
1956 * manually with sha224 constants, and tell it to run sha256.
1957 */
1958static int ahash_init_sha224_swinit(struct ahash_request *areq)
1959{
1960 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961
1962 req_ctx->hw_context[0] = SHA224_H0;
1963 req_ctx->hw_context[1] = SHA224_H1;
1964 req_ctx->hw_context[2] = SHA224_H2;
1965 req_ctx->hw_context[3] = SHA224_H3;
1966 req_ctx->hw_context[4] = SHA224_H4;
1967 req_ctx->hw_context[5] = SHA224_H5;
1968 req_ctx->hw_context[6] = SHA224_H6;
1969 req_ctx->hw_context[7] = SHA224_H7;
1970
1971 /* init 64-bit count */
1972 req_ctx->hw_context[8] = 0;
1973 req_ctx->hw_context[9] = 0;
1974
1975 ahash_init(areq);
1976 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1977
1978 return 0;
1979}
1980
1981static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1982{
1983 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1984 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1985 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1986 struct talitos_edesc *edesc;
1987 unsigned int blocksize =
1988 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1989 unsigned int nbytes_to_hash;
1990 unsigned int to_hash_later;
1991 unsigned int nsg;
1992 int nents;
1993 struct device *dev = ctx->dev;
1994 struct talitos_private *priv = dev_get_drvdata(dev);
1995 bool is_sec1 = has_ftr_sec1(priv);
1996 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1997
1998 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1999 /* Buffer up to one whole block */
2000 nents = sg_nents_for_len(areq->src, nbytes);
2001 if (nents < 0) {
2002 dev_err(dev, "Invalid number of src SG.\n");
2003 return nents;
2004 }
2005 sg_copy_to_buffer(areq->src, nents,
2006 ctx_buf + req_ctx->nbuf, nbytes);
2007 req_ctx->nbuf += nbytes;
2008 return 0;
2009 }
2010
2011 /* At least (blocksize + 1) bytes are available to hash */
2012 nbytes_to_hash = nbytes + req_ctx->nbuf;
2013 to_hash_later = nbytes_to_hash & (blocksize - 1);
2014
2015 if (req_ctx->last)
2016 to_hash_later = 0;
2017 else if (to_hash_later)
2018 /* There is a partial block. Hash the full block(s) now */
2019 nbytes_to_hash -= to_hash_later;
2020 else {
2021 /* Keep one block buffered */
2022 nbytes_to_hash -= blocksize;
2023 to_hash_later = blocksize;
2024 }
2025
2026 /* Chain in any previously buffered data */
2027 if (!is_sec1 && req_ctx->nbuf) {
2028 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2029 sg_init_table(req_ctx->bufsl, nsg);
2030 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2031 if (nsg > 1)
2032 sg_chain(req_ctx->bufsl, 2, areq->src);
2033 req_ctx->psrc = req_ctx->bufsl;
2034 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2035 int offset;
2036
2037 if (nbytes_to_hash > blocksize)
2038 offset = blocksize - req_ctx->nbuf;
2039 else
2040 offset = nbytes_to_hash - req_ctx->nbuf;
2041 nents = sg_nents_for_len(areq->src, offset);
2042 if (nents < 0) {
2043 dev_err(dev, "Invalid number of src SG.\n");
2044 return nents;
2045 }
2046 sg_copy_to_buffer(areq->src, nents,
2047 ctx_buf + req_ctx->nbuf, offset);
2048 req_ctx->nbuf += offset;
2049 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2050 offset);
2051 } else
2052 req_ctx->psrc = areq->src;
2053
2054 if (to_hash_later) {
2055 nents = sg_nents_for_len(areq->src, nbytes);
2056 if (nents < 0) {
2057 dev_err(dev, "Invalid number of src SG.\n");
2058 return nents;
2059 }
2060 sg_pcopy_to_buffer(areq->src, nents,
2061 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2062 to_hash_later,
2063 nbytes - to_hash_later);
2064 }
2065 req_ctx->to_hash_later = to_hash_later;
2066
2067 /* Allocate extended descriptor */
2068 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2069 if (IS_ERR(edesc))
2070 return PTR_ERR(edesc);
2071
2072 edesc->desc.hdr = ctx->desc_hdr_template;
2073
2074 /* On last one, request SEC to pad; otherwise continue */
2075 if (req_ctx->last)
2076 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2077 else
2078 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2079
2080 /* request SEC to INIT hash. */
2081 if (req_ctx->first && !req_ctx->swinit)
2082 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2083
2084 /* When the tfm context has a keylen, it's an HMAC.
2085 * A first or last (ie. not middle) descriptor must request HMAC.
2086 */
2087 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2088 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2089
2090 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2091}
2092
2093static int ahash_update(struct ahash_request *areq)
2094{
2095 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2096
2097 req_ctx->last = 0;
2098
2099 return ahash_process_req(areq, areq->nbytes);
2100}
2101
2102static int ahash_final(struct ahash_request *areq)
2103{
2104 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2105
2106 req_ctx->last = 1;
2107
2108 return ahash_process_req(areq, 0);
2109}
2110
2111static int ahash_finup(struct ahash_request *areq)
2112{
2113 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2114
2115 req_ctx->last = 1;
2116
2117 return ahash_process_req(areq, areq->nbytes);
2118}
2119
2120static int ahash_digest(struct ahash_request *areq)
2121{
2122 ahash_init(areq);
2123 return ahash_finup(areq);
2124}
2125
2126static int ahash_digest_sha224_swinit(struct ahash_request *areq)
2127{
2128 ahash_init_sha224_swinit(areq);
2129 return ahash_finup(areq);
2130}
2131
2132static int ahash_export(struct ahash_request *areq, void *out)
2133{
2134 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2135 struct talitos_export_state *export = out;
2136 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2137 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2138 struct device *dev = ctx->dev;
2139 dma_addr_t dma;
2140
2141 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2142 DMA_FROM_DEVICE);
2143 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2144
2145 memcpy(export->hw_context, req_ctx->hw_context,
2146 req_ctx->hw_context_size);
2147 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2148 export->swinit = req_ctx->swinit;
2149 export->first = req_ctx->first;
2150 export->last = req_ctx->last;
2151 export->to_hash_later = req_ctx->to_hash_later;
2152 export->nbuf = req_ctx->nbuf;
2153
2154 return 0;
2155}
2156
2157static int ahash_import(struct ahash_request *areq, const void *in)
2158{
2159 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2160 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2161 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2162 struct device *dev = ctx->dev;
2163 const struct talitos_export_state *export = in;
2164 unsigned int size;
2165 dma_addr_t dma;
2166
2167 memset(req_ctx, 0, sizeof(*req_ctx));
2168 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2169 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2170 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2171 req_ctx->hw_context_size = size;
2172 memcpy(req_ctx->hw_context, export->hw_context, size);
2173 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2174 req_ctx->swinit = export->swinit;
2175 req_ctx->first = export->first;
2176 req_ctx->last = export->last;
2177 req_ctx->to_hash_later = export->to_hash_later;
2178 req_ctx->nbuf = export->nbuf;
2179
2180 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2181 DMA_TO_DEVICE);
2182 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2183
2184 return 0;
2185}
2186
2187static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2188 u8 *hash)
2189{
2190 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2191
2192 struct scatterlist sg[1];
2193 struct ahash_request *req;
2194 struct crypto_wait wait;
2195 int ret;
2196
2197 crypto_init_wait(&wait);
2198
2199 req = ahash_request_alloc(tfm, GFP_KERNEL);
2200 if (!req)
2201 return -ENOMEM;
2202
2203 /* Keep tfm keylen == 0 during hash of the long key */
2204 ctx->keylen = 0;
2205 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2206 crypto_req_done, &wait);
2207
2208 sg_init_one(&sg[0], key, keylen);
2209
2210 ahash_request_set_crypt(req, sg, hash, keylen);
2211 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2212
2213 ahash_request_free(req);
2214
2215 return ret;
2216}
2217
2218static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2219 unsigned int keylen)
2220{
2221 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2222 struct device *dev = ctx->dev;
2223 unsigned int blocksize =
2224 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2225 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2226 unsigned int keysize = keylen;
2227 u8 hash[SHA512_DIGEST_SIZE];
2228 int ret;
2229
2230 if (keylen <= blocksize)
2231 memcpy(ctx->key, key, keysize);
2232 else {
2233 /* Must get the hash of the long key */
2234 ret = keyhash(tfm, key, keylen, hash);
2235
2236 if (ret)
2237 return -EINVAL;
2238
2239 keysize = digestsize;
2240 memcpy(ctx->key, hash, digestsize);
2241 }
2242
2243 if (ctx->keylen)
2244 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2245
2246 ctx->keylen = keysize;
2247 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2248
2249 return 0;
2250}
2251
2252
2253struct talitos_alg_template {
2254 u32 type;
2255 u32 priority;
2256 union {
2257 struct skcipher_alg skcipher;
2258 struct ahash_alg hash;
2259 struct aead_alg aead;
2260 } alg;
2261 __be32 desc_hdr_template;
2262};
2263
2264static struct talitos_alg_template driver_algs[] = {
2265 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2266 { .type = CRYPTO_ALG_TYPE_AEAD,
2267 .alg.aead = {
2268 .base = {
2269 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2270 .cra_driver_name = "authenc-hmac-sha1-"
2271 "cbc-aes-talitos",
2272 .cra_blocksize = AES_BLOCK_SIZE,
2273 .cra_flags = CRYPTO_ALG_ASYNC |
2274 CRYPTO_ALG_ALLOCATES_MEMORY,
2275 },
2276 .ivsize = AES_BLOCK_SIZE,
2277 .maxauthsize = SHA1_DIGEST_SIZE,
2278 },
2279 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280 DESC_HDR_SEL0_AESU |
2281 DESC_HDR_MODE0_AESU_CBC |
2282 DESC_HDR_SEL1_MDEUA |
2283 DESC_HDR_MODE1_MDEU_INIT |
2284 DESC_HDR_MODE1_MDEU_PAD |
2285 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2286 },
2287 { .type = CRYPTO_ALG_TYPE_AEAD,
2288 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2289 .alg.aead = {
2290 .base = {
2291 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2292 .cra_driver_name = "authenc-hmac-sha1-"
2293 "cbc-aes-talitos-hsna",
2294 .cra_blocksize = AES_BLOCK_SIZE,
2295 .cra_flags = CRYPTO_ALG_ASYNC |
2296 CRYPTO_ALG_ALLOCATES_MEMORY,
2297 },
2298 .ivsize = AES_BLOCK_SIZE,
2299 .maxauthsize = SHA1_DIGEST_SIZE,
2300 },
2301 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2302 DESC_HDR_SEL0_AESU |
2303 DESC_HDR_MODE0_AESU_CBC |
2304 DESC_HDR_SEL1_MDEUA |
2305 DESC_HDR_MODE1_MDEU_INIT |
2306 DESC_HDR_MODE1_MDEU_PAD |
2307 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2308 },
2309 { .type = CRYPTO_ALG_TYPE_AEAD,
2310 .alg.aead = {
2311 .base = {
2312 .cra_name = "authenc(hmac(sha1),"
2313 "cbc(des3_ede))",
2314 .cra_driver_name = "authenc-hmac-sha1-"
2315 "cbc-3des-talitos",
2316 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2317 .cra_flags = CRYPTO_ALG_ASYNC |
2318 CRYPTO_ALG_ALLOCATES_MEMORY,
2319 },
2320 .ivsize = DES3_EDE_BLOCK_SIZE,
2321 .maxauthsize = SHA1_DIGEST_SIZE,
2322 .setkey = aead_des3_setkey,
2323 },
2324 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2325 DESC_HDR_SEL0_DEU |
2326 DESC_HDR_MODE0_DEU_CBC |
2327 DESC_HDR_MODE0_DEU_3DES |
2328 DESC_HDR_SEL1_MDEUA |
2329 DESC_HDR_MODE1_MDEU_INIT |
2330 DESC_HDR_MODE1_MDEU_PAD |
2331 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2332 },
2333 { .type = CRYPTO_ALG_TYPE_AEAD,
2334 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2335 .alg.aead = {
2336 .base = {
2337 .cra_name = "authenc(hmac(sha1),"
2338 "cbc(des3_ede))",
2339 .cra_driver_name = "authenc-hmac-sha1-"
2340 "cbc-3des-talitos-hsna",
2341 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2342 .cra_flags = CRYPTO_ALG_ASYNC |
2343 CRYPTO_ALG_ALLOCATES_MEMORY,
2344 },
2345 .ivsize = DES3_EDE_BLOCK_SIZE,
2346 .maxauthsize = SHA1_DIGEST_SIZE,
2347 .setkey = aead_des3_setkey,
2348 },
2349 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2350 DESC_HDR_SEL0_DEU |
2351 DESC_HDR_MODE0_DEU_CBC |
2352 DESC_HDR_MODE0_DEU_3DES |
2353 DESC_HDR_SEL1_MDEUA |
2354 DESC_HDR_MODE1_MDEU_INIT |
2355 DESC_HDR_MODE1_MDEU_PAD |
2356 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2357 },
2358 { .type = CRYPTO_ALG_TYPE_AEAD,
2359 .alg.aead = {
2360 .base = {
2361 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2362 .cra_driver_name = "authenc-hmac-sha224-"
2363 "cbc-aes-talitos",
2364 .cra_blocksize = AES_BLOCK_SIZE,
2365 .cra_flags = CRYPTO_ALG_ASYNC |
2366 CRYPTO_ALG_ALLOCATES_MEMORY,
2367 },
2368 .ivsize = AES_BLOCK_SIZE,
2369 .maxauthsize = SHA224_DIGEST_SIZE,
2370 },
2371 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2372 DESC_HDR_SEL0_AESU |
2373 DESC_HDR_MODE0_AESU_CBC |
2374 DESC_HDR_SEL1_MDEUA |
2375 DESC_HDR_MODE1_MDEU_INIT |
2376 DESC_HDR_MODE1_MDEU_PAD |
2377 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2378 },
2379 { .type = CRYPTO_ALG_TYPE_AEAD,
2380 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2381 .alg.aead = {
2382 .base = {
2383 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2384 .cra_driver_name = "authenc-hmac-sha224-"
2385 "cbc-aes-talitos-hsna",
2386 .cra_blocksize = AES_BLOCK_SIZE,
2387 .cra_flags = CRYPTO_ALG_ASYNC |
2388 CRYPTO_ALG_ALLOCATES_MEMORY,
2389 },
2390 .ivsize = AES_BLOCK_SIZE,
2391 .maxauthsize = SHA224_DIGEST_SIZE,
2392 },
2393 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2394 DESC_HDR_SEL0_AESU |
2395 DESC_HDR_MODE0_AESU_CBC |
2396 DESC_HDR_SEL1_MDEUA |
2397 DESC_HDR_MODE1_MDEU_INIT |
2398 DESC_HDR_MODE1_MDEU_PAD |
2399 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2400 },
2401 { .type = CRYPTO_ALG_TYPE_AEAD,
2402 .alg.aead = {
2403 .base = {
2404 .cra_name = "authenc(hmac(sha224),"
2405 "cbc(des3_ede))",
2406 .cra_driver_name = "authenc-hmac-sha224-"
2407 "cbc-3des-talitos",
2408 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2409 .cra_flags = CRYPTO_ALG_ASYNC |
2410 CRYPTO_ALG_ALLOCATES_MEMORY,
2411 },
2412 .ivsize = DES3_EDE_BLOCK_SIZE,
2413 .maxauthsize = SHA224_DIGEST_SIZE,
2414 .setkey = aead_des3_setkey,
2415 },
2416 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2417 DESC_HDR_SEL0_DEU |
2418 DESC_HDR_MODE0_DEU_CBC |
2419 DESC_HDR_MODE0_DEU_3DES |
2420 DESC_HDR_SEL1_MDEUA |
2421 DESC_HDR_MODE1_MDEU_INIT |
2422 DESC_HDR_MODE1_MDEU_PAD |
2423 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2424 },
2425 { .type = CRYPTO_ALG_TYPE_AEAD,
2426 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2427 .alg.aead = {
2428 .base = {
2429 .cra_name = "authenc(hmac(sha224),"
2430 "cbc(des3_ede))",
2431 .cra_driver_name = "authenc-hmac-sha224-"
2432 "cbc-3des-talitos-hsna",
2433 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2434 .cra_flags = CRYPTO_ALG_ASYNC |
2435 CRYPTO_ALG_ALLOCATES_MEMORY,
2436 },
2437 .ivsize = DES3_EDE_BLOCK_SIZE,
2438 .maxauthsize = SHA224_DIGEST_SIZE,
2439 .setkey = aead_des3_setkey,
2440 },
2441 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2442 DESC_HDR_SEL0_DEU |
2443 DESC_HDR_MODE0_DEU_CBC |
2444 DESC_HDR_MODE0_DEU_3DES |
2445 DESC_HDR_SEL1_MDEUA |
2446 DESC_HDR_MODE1_MDEU_INIT |
2447 DESC_HDR_MODE1_MDEU_PAD |
2448 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2449 },
2450 { .type = CRYPTO_ALG_TYPE_AEAD,
2451 .alg.aead = {
2452 .base = {
2453 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2454 .cra_driver_name = "authenc-hmac-sha256-"
2455 "cbc-aes-talitos",
2456 .cra_blocksize = AES_BLOCK_SIZE,
2457 .cra_flags = CRYPTO_ALG_ASYNC |
2458 CRYPTO_ALG_ALLOCATES_MEMORY,
2459 },
2460 .ivsize = AES_BLOCK_SIZE,
2461 .maxauthsize = SHA256_DIGEST_SIZE,
2462 },
2463 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2464 DESC_HDR_SEL0_AESU |
2465 DESC_HDR_MODE0_AESU_CBC |
2466 DESC_HDR_SEL1_MDEUA |
2467 DESC_HDR_MODE1_MDEU_INIT |
2468 DESC_HDR_MODE1_MDEU_PAD |
2469 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2470 },
2471 { .type = CRYPTO_ALG_TYPE_AEAD,
2472 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2473 .alg.aead = {
2474 .base = {
2475 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2476 .cra_driver_name = "authenc-hmac-sha256-"
2477 "cbc-aes-talitos-hsna",
2478 .cra_blocksize = AES_BLOCK_SIZE,
2479 .cra_flags = CRYPTO_ALG_ASYNC |
2480 CRYPTO_ALG_ALLOCATES_MEMORY,
2481 },
2482 .ivsize = AES_BLOCK_SIZE,
2483 .maxauthsize = SHA256_DIGEST_SIZE,
2484 },
2485 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2486 DESC_HDR_SEL0_AESU |
2487 DESC_HDR_MODE0_AESU_CBC |
2488 DESC_HDR_SEL1_MDEUA |
2489 DESC_HDR_MODE1_MDEU_INIT |
2490 DESC_HDR_MODE1_MDEU_PAD |
2491 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2492 },
2493 { .type = CRYPTO_ALG_TYPE_AEAD,
2494 .alg.aead = {
2495 .base = {
2496 .cra_name = "authenc(hmac(sha256),"
2497 "cbc(des3_ede))",
2498 .cra_driver_name = "authenc-hmac-sha256-"
2499 "cbc-3des-talitos",
2500 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2501 .cra_flags = CRYPTO_ALG_ASYNC |
2502 CRYPTO_ALG_ALLOCATES_MEMORY,
2503 },
2504 .ivsize = DES3_EDE_BLOCK_SIZE,
2505 .maxauthsize = SHA256_DIGEST_SIZE,
2506 .setkey = aead_des3_setkey,
2507 },
2508 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2509 DESC_HDR_SEL0_DEU |
2510 DESC_HDR_MODE0_DEU_CBC |
2511 DESC_HDR_MODE0_DEU_3DES |
2512 DESC_HDR_SEL1_MDEUA |
2513 DESC_HDR_MODE1_MDEU_INIT |
2514 DESC_HDR_MODE1_MDEU_PAD |
2515 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2516 },
2517 { .type = CRYPTO_ALG_TYPE_AEAD,
2518 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2519 .alg.aead = {
2520 .base = {
2521 .cra_name = "authenc(hmac(sha256),"
2522 "cbc(des3_ede))",
2523 .cra_driver_name = "authenc-hmac-sha256-"
2524 "cbc-3des-talitos-hsna",
2525 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2526 .cra_flags = CRYPTO_ALG_ASYNC |
2527 CRYPTO_ALG_ALLOCATES_MEMORY,
2528 },
2529 .ivsize = DES3_EDE_BLOCK_SIZE,
2530 .maxauthsize = SHA256_DIGEST_SIZE,
2531 .setkey = aead_des3_setkey,
2532 },
2533 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2534 DESC_HDR_SEL0_DEU |
2535 DESC_HDR_MODE0_DEU_CBC |
2536 DESC_HDR_MODE0_DEU_3DES |
2537 DESC_HDR_SEL1_MDEUA |
2538 DESC_HDR_MODE1_MDEU_INIT |
2539 DESC_HDR_MODE1_MDEU_PAD |
2540 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2541 },
2542 { .type = CRYPTO_ALG_TYPE_AEAD,
2543 .alg.aead = {
2544 .base = {
2545 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2546 .cra_driver_name = "authenc-hmac-sha384-"
2547 "cbc-aes-talitos",
2548 .cra_blocksize = AES_BLOCK_SIZE,
2549 .cra_flags = CRYPTO_ALG_ASYNC |
2550 CRYPTO_ALG_ALLOCATES_MEMORY,
2551 },
2552 .ivsize = AES_BLOCK_SIZE,
2553 .maxauthsize = SHA384_DIGEST_SIZE,
2554 },
2555 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2556 DESC_HDR_SEL0_AESU |
2557 DESC_HDR_MODE0_AESU_CBC |
2558 DESC_HDR_SEL1_MDEUB |
2559 DESC_HDR_MODE1_MDEU_INIT |
2560 DESC_HDR_MODE1_MDEU_PAD |
2561 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2562 },
2563 { .type = CRYPTO_ALG_TYPE_AEAD,
2564 .alg.aead = {
2565 .base = {
2566 .cra_name = "authenc(hmac(sha384),"
2567 "cbc(des3_ede))",
2568 .cra_driver_name = "authenc-hmac-sha384-"
2569 "cbc-3des-talitos",
2570 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2571 .cra_flags = CRYPTO_ALG_ASYNC |
2572 CRYPTO_ALG_ALLOCATES_MEMORY,
2573 },
2574 .ivsize = DES3_EDE_BLOCK_SIZE,
2575 .maxauthsize = SHA384_DIGEST_SIZE,
2576 .setkey = aead_des3_setkey,
2577 },
2578 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2579 DESC_HDR_SEL0_DEU |
2580 DESC_HDR_MODE0_DEU_CBC |
2581 DESC_HDR_MODE0_DEU_3DES |
2582 DESC_HDR_SEL1_MDEUB |
2583 DESC_HDR_MODE1_MDEU_INIT |
2584 DESC_HDR_MODE1_MDEU_PAD |
2585 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2586 },
2587 { .type = CRYPTO_ALG_TYPE_AEAD,
2588 .alg.aead = {
2589 .base = {
2590 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2591 .cra_driver_name = "authenc-hmac-sha512-"
2592 "cbc-aes-talitos",
2593 .cra_blocksize = AES_BLOCK_SIZE,
2594 .cra_flags = CRYPTO_ALG_ASYNC |
2595 CRYPTO_ALG_ALLOCATES_MEMORY,
2596 },
2597 .ivsize = AES_BLOCK_SIZE,
2598 .maxauthsize = SHA512_DIGEST_SIZE,
2599 },
2600 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2601 DESC_HDR_SEL0_AESU |
2602 DESC_HDR_MODE0_AESU_CBC |
2603 DESC_HDR_SEL1_MDEUB |
2604 DESC_HDR_MODE1_MDEU_INIT |
2605 DESC_HDR_MODE1_MDEU_PAD |
2606 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2607 },
2608 { .type = CRYPTO_ALG_TYPE_AEAD,
2609 .alg.aead = {
2610 .base = {
2611 .cra_name = "authenc(hmac(sha512),"
2612 "cbc(des3_ede))",
2613 .cra_driver_name = "authenc-hmac-sha512-"
2614 "cbc-3des-talitos",
2615 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2616 .cra_flags = CRYPTO_ALG_ASYNC |
2617 CRYPTO_ALG_ALLOCATES_MEMORY,
2618 },
2619 .ivsize = DES3_EDE_BLOCK_SIZE,
2620 .maxauthsize = SHA512_DIGEST_SIZE,
2621 .setkey = aead_des3_setkey,
2622 },
2623 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2624 DESC_HDR_SEL0_DEU |
2625 DESC_HDR_MODE0_DEU_CBC |
2626 DESC_HDR_MODE0_DEU_3DES |
2627 DESC_HDR_SEL1_MDEUB |
2628 DESC_HDR_MODE1_MDEU_INIT |
2629 DESC_HDR_MODE1_MDEU_PAD |
2630 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2631 },
2632 { .type = CRYPTO_ALG_TYPE_AEAD,
2633 .alg.aead = {
2634 .base = {
2635 .cra_name = "authenc(hmac(md5),cbc(aes))",
2636 .cra_driver_name = "authenc-hmac-md5-"
2637 "cbc-aes-talitos",
2638 .cra_blocksize = AES_BLOCK_SIZE,
2639 .cra_flags = CRYPTO_ALG_ASYNC |
2640 CRYPTO_ALG_ALLOCATES_MEMORY,
2641 },
2642 .ivsize = AES_BLOCK_SIZE,
2643 .maxauthsize = MD5_DIGEST_SIZE,
2644 },
2645 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2646 DESC_HDR_SEL0_AESU |
2647 DESC_HDR_MODE0_AESU_CBC |
2648 DESC_HDR_SEL1_MDEUA |
2649 DESC_HDR_MODE1_MDEU_INIT |
2650 DESC_HDR_MODE1_MDEU_PAD |
2651 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2652 },
2653 { .type = CRYPTO_ALG_TYPE_AEAD,
2654 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2655 .alg.aead = {
2656 .base = {
2657 .cra_name = "authenc(hmac(md5),cbc(aes))",
2658 .cra_driver_name = "authenc-hmac-md5-"
2659 "cbc-aes-talitos-hsna",
2660 .cra_blocksize = AES_BLOCK_SIZE,
2661 .cra_flags = CRYPTO_ALG_ASYNC |
2662 CRYPTO_ALG_ALLOCATES_MEMORY,
2663 },
2664 .ivsize = AES_BLOCK_SIZE,
2665 .maxauthsize = MD5_DIGEST_SIZE,
2666 },
2667 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2668 DESC_HDR_SEL0_AESU |
2669 DESC_HDR_MODE0_AESU_CBC |
2670 DESC_HDR_SEL1_MDEUA |
2671 DESC_HDR_MODE1_MDEU_INIT |
2672 DESC_HDR_MODE1_MDEU_PAD |
2673 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2674 },
2675 { .type = CRYPTO_ALG_TYPE_AEAD,
2676 .alg.aead = {
2677 .base = {
2678 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2679 .cra_driver_name = "authenc-hmac-md5-"
2680 "cbc-3des-talitos",
2681 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2682 .cra_flags = CRYPTO_ALG_ASYNC |
2683 CRYPTO_ALG_ALLOCATES_MEMORY,
2684 },
2685 .ivsize = DES3_EDE_BLOCK_SIZE,
2686 .maxauthsize = MD5_DIGEST_SIZE,
2687 .setkey = aead_des3_setkey,
2688 },
2689 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2690 DESC_HDR_SEL0_DEU |
2691 DESC_HDR_MODE0_DEU_CBC |
2692 DESC_HDR_MODE0_DEU_3DES |
2693 DESC_HDR_SEL1_MDEUA |
2694 DESC_HDR_MODE1_MDEU_INIT |
2695 DESC_HDR_MODE1_MDEU_PAD |
2696 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2697 },
2698 { .type = CRYPTO_ALG_TYPE_AEAD,
2699 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2700 .alg.aead = {
2701 .base = {
2702 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2703 .cra_driver_name = "authenc-hmac-md5-"
2704 "cbc-3des-talitos-hsna",
2705 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2706 .cra_flags = CRYPTO_ALG_ASYNC |
2707 CRYPTO_ALG_ALLOCATES_MEMORY,
2708 },
2709 .ivsize = DES3_EDE_BLOCK_SIZE,
2710 .maxauthsize = MD5_DIGEST_SIZE,
2711 .setkey = aead_des3_setkey,
2712 },
2713 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2714 DESC_HDR_SEL0_DEU |
2715 DESC_HDR_MODE0_DEU_CBC |
2716 DESC_HDR_MODE0_DEU_3DES |
2717 DESC_HDR_SEL1_MDEUA |
2718 DESC_HDR_MODE1_MDEU_INIT |
2719 DESC_HDR_MODE1_MDEU_PAD |
2720 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2721 },
2722 /* SKCIPHER algorithms. */
2723 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2724 .alg.skcipher = {
2725 .base.cra_name = "ecb(aes)",
2726 .base.cra_driver_name = "ecb-aes-talitos",
2727 .base.cra_blocksize = AES_BLOCK_SIZE,
2728 .base.cra_flags = CRYPTO_ALG_ASYNC |
2729 CRYPTO_ALG_ALLOCATES_MEMORY,
2730 .min_keysize = AES_MIN_KEY_SIZE,
2731 .max_keysize = AES_MAX_KEY_SIZE,
2732 .setkey = skcipher_aes_setkey,
2733 },
2734 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2735 DESC_HDR_SEL0_AESU,
2736 },
2737 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2738 .alg.skcipher = {
2739 .base.cra_name = "cbc(aes)",
2740 .base.cra_driver_name = "cbc-aes-talitos",
2741 .base.cra_blocksize = AES_BLOCK_SIZE,
2742 .base.cra_flags = CRYPTO_ALG_ASYNC |
2743 CRYPTO_ALG_ALLOCATES_MEMORY,
2744 .min_keysize = AES_MIN_KEY_SIZE,
2745 .max_keysize = AES_MAX_KEY_SIZE,
2746 .ivsize = AES_BLOCK_SIZE,
2747 .setkey = skcipher_aes_setkey,
2748 },
2749 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2750 DESC_HDR_SEL0_AESU |
2751 DESC_HDR_MODE0_AESU_CBC,
2752 },
2753 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2754 .alg.skcipher = {
2755 .base.cra_name = "ctr(aes)",
2756 .base.cra_driver_name = "ctr-aes-talitos",
2757 .base.cra_blocksize = 1,
2758 .base.cra_flags = CRYPTO_ALG_ASYNC |
2759 CRYPTO_ALG_ALLOCATES_MEMORY,
2760 .min_keysize = AES_MIN_KEY_SIZE,
2761 .max_keysize = AES_MAX_KEY_SIZE,
2762 .ivsize = AES_BLOCK_SIZE,
2763 .setkey = skcipher_aes_setkey,
2764 },
2765 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2766 DESC_HDR_SEL0_AESU |
2767 DESC_HDR_MODE0_AESU_CTR,
2768 },
2769 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2770 .alg.skcipher = {
2771 .base.cra_name = "ctr(aes)",
2772 .base.cra_driver_name = "ctr-aes-talitos",
2773 .base.cra_blocksize = 1,
2774 .base.cra_flags = CRYPTO_ALG_ASYNC |
2775 CRYPTO_ALG_ALLOCATES_MEMORY,
2776 .min_keysize = AES_MIN_KEY_SIZE,
2777 .max_keysize = AES_MAX_KEY_SIZE,
2778 .ivsize = AES_BLOCK_SIZE,
2779 .setkey = skcipher_aes_setkey,
2780 },
2781 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782 DESC_HDR_SEL0_AESU |
2783 DESC_HDR_MODE0_AESU_CTR,
2784 },
2785 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2786 .alg.skcipher = {
2787 .base.cra_name = "ecb(des)",
2788 .base.cra_driver_name = "ecb-des-talitos",
2789 .base.cra_blocksize = DES_BLOCK_SIZE,
2790 .base.cra_flags = CRYPTO_ALG_ASYNC |
2791 CRYPTO_ALG_ALLOCATES_MEMORY,
2792 .min_keysize = DES_KEY_SIZE,
2793 .max_keysize = DES_KEY_SIZE,
2794 .setkey = skcipher_des_setkey,
2795 },
2796 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2797 DESC_HDR_SEL0_DEU,
2798 },
2799 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2800 .alg.skcipher = {
2801 .base.cra_name = "cbc(des)",
2802 .base.cra_driver_name = "cbc-des-talitos",
2803 .base.cra_blocksize = DES_BLOCK_SIZE,
2804 .base.cra_flags = CRYPTO_ALG_ASYNC |
2805 CRYPTO_ALG_ALLOCATES_MEMORY,
2806 .min_keysize = DES_KEY_SIZE,
2807 .max_keysize = DES_KEY_SIZE,
2808 .ivsize = DES_BLOCK_SIZE,
2809 .setkey = skcipher_des_setkey,
2810 },
2811 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812 DESC_HDR_SEL0_DEU |
2813 DESC_HDR_MODE0_DEU_CBC,
2814 },
2815 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2816 .alg.skcipher = {
2817 .base.cra_name = "ecb(des3_ede)",
2818 .base.cra_driver_name = "ecb-3des-talitos",
2819 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2820 .base.cra_flags = CRYPTO_ALG_ASYNC |
2821 CRYPTO_ALG_ALLOCATES_MEMORY,
2822 .min_keysize = DES3_EDE_KEY_SIZE,
2823 .max_keysize = DES3_EDE_KEY_SIZE,
2824 .setkey = skcipher_des3_setkey,
2825 },
2826 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2827 DESC_HDR_SEL0_DEU |
2828 DESC_HDR_MODE0_DEU_3DES,
2829 },
2830 { .type = CRYPTO_ALG_TYPE_SKCIPHER,
2831 .alg.skcipher = {
2832 .base.cra_name = "cbc(des3_ede)",
2833 .base.cra_driver_name = "cbc-3des-talitos",
2834 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2835 .base.cra_flags = CRYPTO_ALG_ASYNC |
2836 CRYPTO_ALG_ALLOCATES_MEMORY,
2837 .min_keysize = DES3_EDE_KEY_SIZE,
2838 .max_keysize = DES3_EDE_KEY_SIZE,
2839 .ivsize = DES3_EDE_BLOCK_SIZE,
2840 .setkey = skcipher_des3_setkey,
2841 },
2842 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2843 DESC_HDR_SEL0_DEU |
2844 DESC_HDR_MODE0_DEU_CBC |
2845 DESC_HDR_MODE0_DEU_3DES,
2846 },
2847 /* AHASH algorithms. */
2848 { .type = CRYPTO_ALG_TYPE_AHASH,
2849 .alg.hash = {
2850 .halg.digestsize = MD5_DIGEST_SIZE,
2851 .halg.statesize = sizeof(struct talitos_export_state),
2852 .halg.base = {
2853 .cra_name = "md5",
2854 .cra_driver_name = "md5-talitos",
2855 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2856 .cra_flags = CRYPTO_ALG_ASYNC |
2857 CRYPTO_ALG_ALLOCATES_MEMORY,
2858 }
2859 },
2860 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2861 DESC_HDR_SEL0_MDEUA |
2862 DESC_HDR_MODE0_MDEU_MD5,
2863 },
2864 { .type = CRYPTO_ALG_TYPE_AHASH,
2865 .alg.hash = {
2866 .halg.digestsize = SHA1_DIGEST_SIZE,
2867 .halg.statesize = sizeof(struct talitos_export_state),
2868 .halg.base = {
2869 .cra_name = "sha1",
2870 .cra_driver_name = "sha1-talitos",
2871 .cra_blocksize = SHA1_BLOCK_SIZE,
2872 .cra_flags = CRYPTO_ALG_ASYNC |
2873 CRYPTO_ALG_ALLOCATES_MEMORY,
2874 }
2875 },
2876 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2877 DESC_HDR_SEL0_MDEUA |
2878 DESC_HDR_MODE0_MDEU_SHA1,
2879 },
2880 { .type = CRYPTO_ALG_TYPE_AHASH,
2881 .alg.hash = {
2882 .halg.digestsize = SHA224_DIGEST_SIZE,
2883 .halg.statesize = sizeof(struct talitos_export_state),
2884 .halg.base = {
2885 .cra_name = "sha224",
2886 .cra_driver_name = "sha224-talitos",
2887 .cra_blocksize = SHA224_BLOCK_SIZE,
2888 .cra_flags = CRYPTO_ALG_ASYNC |
2889 CRYPTO_ALG_ALLOCATES_MEMORY,
2890 }
2891 },
2892 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2893 DESC_HDR_SEL0_MDEUA |
2894 DESC_HDR_MODE0_MDEU_SHA224,
2895 },
2896 { .type = CRYPTO_ALG_TYPE_AHASH,
2897 .alg.hash = {
2898 .halg.digestsize = SHA256_DIGEST_SIZE,
2899 .halg.statesize = sizeof(struct talitos_export_state),
2900 .halg.base = {
2901 .cra_name = "sha256",
2902 .cra_driver_name = "sha256-talitos",
2903 .cra_blocksize = SHA256_BLOCK_SIZE,
2904 .cra_flags = CRYPTO_ALG_ASYNC |
2905 CRYPTO_ALG_ALLOCATES_MEMORY,
2906 }
2907 },
2908 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2909 DESC_HDR_SEL0_MDEUA |
2910 DESC_HDR_MODE0_MDEU_SHA256,
2911 },
2912 { .type = CRYPTO_ALG_TYPE_AHASH,
2913 .alg.hash = {
2914 .halg.digestsize = SHA384_DIGEST_SIZE,
2915 .halg.statesize = sizeof(struct talitos_export_state),
2916 .halg.base = {
2917 .cra_name = "sha384",
2918 .cra_driver_name = "sha384-talitos",
2919 .cra_blocksize = SHA384_BLOCK_SIZE,
2920 .cra_flags = CRYPTO_ALG_ASYNC |
2921 CRYPTO_ALG_ALLOCATES_MEMORY,
2922 }
2923 },
2924 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2925 DESC_HDR_SEL0_MDEUB |
2926 DESC_HDR_MODE0_MDEUB_SHA384,
2927 },
2928 { .type = CRYPTO_ALG_TYPE_AHASH,
2929 .alg.hash = {
2930 .halg.digestsize = SHA512_DIGEST_SIZE,
2931 .halg.statesize = sizeof(struct talitos_export_state),
2932 .halg.base = {
2933 .cra_name = "sha512",
2934 .cra_driver_name = "sha512-talitos",
2935 .cra_blocksize = SHA512_BLOCK_SIZE,
2936 .cra_flags = CRYPTO_ALG_ASYNC |
2937 CRYPTO_ALG_ALLOCATES_MEMORY,
2938 }
2939 },
2940 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941 DESC_HDR_SEL0_MDEUB |
2942 DESC_HDR_MODE0_MDEUB_SHA512,
2943 },
2944 { .type = CRYPTO_ALG_TYPE_AHASH,
2945 .alg.hash = {
2946 .halg.digestsize = MD5_DIGEST_SIZE,
2947 .halg.statesize = sizeof(struct talitos_export_state),
2948 .halg.base = {
2949 .cra_name = "hmac(md5)",
2950 .cra_driver_name = "hmac-md5-talitos",
2951 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2952 .cra_flags = CRYPTO_ALG_ASYNC |
2953 CRYPTO_ALG_ALLOCATES_MEMORY,
2954 }
2955 },
2956 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2957 DESC_HDR_SEL0_MDEUA |
2958 DESC_HDR_MODE0_MDEU_MD5,
2959 },
2960 { .type = CRYPTO_ALG_TYPE_AHASH,
2961 .alg.hash = {
2962 .halg.digestsize = SHA1_DIGEST_SIZE,
2963 .halg.statesize = sizeof(struct talitos_export_state),
2964 .halg.base = {
2965 .cra_name = "hmac(sha1)",
2966 .cra_driver_name = "hmac-sha1-talitos",
2967 .cra_blocksize = SHA1_BLOCK_SIZE,
2968 .cra_flags = CRYPTO_ALG_ASYNC |
2969 CRYPTO_ALG_ALLOCATES_MEMORY,
2970 }
2971 },
2972 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2973 DESC_HDR_SEL0_MDEUA |
2974 DESC_HDR_MODE0_MDEU_SHA1,
2975 },
2976 { .type = CRYPTO_ALG_TYPE_AHASH,
2977 .alg.hash = {
2978 .halg.digestsize = SHA224_DIGEST_SIZE,
2979 .halg.statesize = sizeof(struct talitos_export_state),
2980 .halg.base = {
2981 .cra_name = "hmac(sha224)",
2982 .cra_driver_name = "hmac-sha224-talitos",
2983 .cra_blocksize = SHA224_BLOCK_SIZE,
2984 .cra_flags = CRYPTO_ALG_ASYNC |
2985 CRYPTO_ALG_ALLOCATES_MEMORY,
2986 }
2987 },
2988 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2989 DESC_HDR_SEL0_MDEUA |
2990 DESC_HDR_MODE0_MDEU_SHA224,
2991 },
2992 { .type = CRYPTO_ALG_TYPE_AHASH,
2993 .alg.hash = {
2994 .halg.digestsize = SHA256_DIGEST_SIZE,
2995 .halg.statesize = sizeof(struct talitos_export_state),
2996 .halg.base = {
2997 .cra_name = "hmac(sha256)",
2998 .cra_driver_name = "hmac-sha256-talitos",
2999 .cra_blocksize = SHA256_BLOCK_SIZE,
3000 .cra_flags = CRYPTO_ALG_ASYNC |
3001 CRYPTO_ALG_ALLOCATES_MEMORY,
3002 }
3003 },
3004 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3005 DESC_HDR_SEL0_MDEUA |
3006 DESC_HDR_MODE0_MDEU_SHA256,
3007 },
3008 { .type = CRYPTO_ALG_TYPE_AHASH,
3009 .alg.hash = {
3010 .halg.digestsize = SHA384_DIGEST_SIZE,
3011 .halg.statesize = sizeof(struct talitos_export_state),
3012 .halg.base = {
3013 .cra_name = "hmac(sha384)",
3014 .cra_driver_name = "hmac-sha384-talitos",
3015 .cra_blocksize = SHA384_BLOCK_SIZE,
3016 .cra_flags = CRYPTO_ALG_ASYNC |
3017 CRYPTO_ALG_ALLOCATES_MEMORY,
3018 }
3019 },
3020 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3021 DESC_HDR_SEL0_MDEUB |
3022 DESC_HDR_MODE0_MDEUB_SHA384,
3023 },
3024 { .type = CRYPTO_ALG_TYPE_AHASH,
3025 .alg.hash = {
3026 .halg.digestsize = SHA512_DIGEST_SIZE,
3027 .halg.statesize = sizeof(struct talitos_export_state),
3028 .halg.base = {
3029 .cra_name = "hmac(sha512)",
3030 .cra_driver_name = "hmac-sha512-talitos",
3031 .cra_blocksize = SHA512_BLOCK_SIZE,
3032 .cra_flags = CRYPTO_ALG_ASYNC |
3033 CRYPTO_ALG_ALLOCATES_MEMORY,
3034 }
3035 },
3036 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3037 DESC_HDR_SEL0_MDEUB |
3038 DESC_HDR_MODE0_MDEUB_SHA512,
3039 }
3040};
3041
3042struct talitos_crypto_alg {
3043 struct list_head entry;
3044 struct device *dev;
3045 struct talitos_alg_template algt;
3046};
3047
3048static int talitos_init_common(struct talitos_ctx *ctx,
3049 struct talitos_crypto_alg *talitos_alg)
3050{
3051 struct talitos_private *priv;
3052
3053 /* update context with ptr to dev */
3054 ctx->dev = talitos_alg->dev;
3055
3056 /* assign SEC channel to tfm in round-robin fashion */
3057 priv = dev_get_drvdata(ctx->dev);
3058 ctx->ch = atomic_inc_return(&priv->last_chan) &
3059 (priv->num_channels - 1);
3060
3061 /* copy descriptor header template value */
3062 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3063
3064 /* select done notification */
3065 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3066
3067 return 0;
3068}
3069
3070static int talitos_cra_init_aead(struct crypto_aead *tfm)
3071{
3072 struct aead_alg *alg = crypto_aead_alg(tfm);
3073 struct talitos_crypto_alg *talitos_alg;
3074 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3075
3076 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3077 algt.alg.aead);
3078
3079 return talitos_init_common(ctx, talitos_alg);
3080}
3081
3082static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3083{
3084 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3085 struct talitos_crypto_alg *talitos_alg;
3086 struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3087
3088 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3089 algt.alg.skcipher);
3090
3091 return talitos_init_common(ctx, talitos_alg);
3092}
3093
3094static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3095{
3096 struct crypto_alg *alg = tfm->__crt_alg;
3097 struct talitos_crypto_alg *talitos_alg;
3098 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3099
3100 talitos_alg = container_of(__crypto_ahash_alg(alg),
3101 struct talitos_crypto_alg,
3102 algt.alg.hash);
3103
3104 ctx->keylen = 0;
3105 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3106 sizeof(struct talitos_ahash_req_ctx));
3107
3108 return talitos_init_common(ctx, talitos_alg);
3109}
3110
3111static void talitos_cra_exit(struct crypto_tfm *tfm)
3112{
3113 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3114 struct device *dev = ctx->dev;
3115
3116 if (ctx->keylen)
3117 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3118}
3119
3120/*
3121 * given the alg's descriptor header template, determine whether descriptor
3122 * type and primary/secondary execution units required match the hw
3123 * capabilities description provided in the device tree node.
3124 */
3125static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3126{
3127 struct talitos_private *priv = dev_get_drvdata(dev);
3128 int ret;
3129
3130 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3131 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3132
3133 if (SECONDARY_EU(desc_hdr_template))
3134 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3135 & priv->exec_units);
3136
3137 return ret;
3138}
3139
3140static void talitos_remove(struct platform_device *ofdev)
3141{
3142 struct device *dev = &ofdev->dev;
3143 struct talitos_private *priv = dev_get_drvdata(dev);
3144 struct talitos_crypto_alg *t_alg, *n;
3145 int i;
3146
3147 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3148 switch (t_alg->algt.type) {
3149 case CRYPTO_ALG_TYPE_SKCIPHER:
3150 crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3151 break;
3152 case CRYPTO_ALG_TYPE_AEAD:
3153 crypto_unregister_aead(&t_alg->algt.alg.aead);
3154 break;
3155 case CRYPTO_ALG_TYPE_AHASH:
3156 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3157 break;
3158 }
3159 list_del(&t_alg->entry);
3160 }
3161
3162 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3163 talitos_unregister_rng(dev);
3164
3165 for (i = 0; i < 2; i++)
3166 if (priv->irq[i]) {
3167 free_irq(priv->irq[i], dev);
3168 irq_dispose_mapping(priv->irq[i]);
3169 }
3170
3171 tasklet_kill(&priv->done_task[0]);
3172 if (priv->irq[1])
3173 tasklet_kill(&priv->done_task[1]);
3174}
3175
3176static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3177 struct talitos_alg_template
3178 *template)
3179{
3180 struct talitos_private *priv = dev_get_drvdata(dev);
3181 struct talitos_crypto_alg *t_alg;
3182 struct crypto_alg *alg;
3183
3184 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3185 GFP_KERNEL);
3186 if (!t_alg)
3187 return ERR_PTR(-ENOMEM);
3188
3189 t_alg->algt = *template;
3190
3191 switch (t_alg->algt.type) {
3192 case CRYPTO_ALG_TYPE_SKCIPHER:
3193 alg = &t_alg->algt.alg.skcipher.base;
3194 alg->cra_exit = talitos_cra_exit;
3195 t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3196 t_alg->algt.alg.skcipher.setkey =
3197 t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3198 t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3199 t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3200 if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3201 DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3202 DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3203 devm_kfree(dev, t_alg);
3204 return ERR_PTR(-ENOTSUPP);
3205 }
3206 break;
3207 case CRYPTO_ALG_TYPE_AEAD:
3208 alg = &t_alg->algt.alg.aead.base;
3209 alg->cra_exit = talitos_cra_exit;
3210 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3211 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3212 aead_setkey;
3213 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3214 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3215 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3217 devm_kfree(dev, t_alg);
3218 return ERR_PTR(-ENOTSUPP);
3219 }
3220 break;
3221 case CRYPTO_ALG_TYPE_AHASH:
3222 alg = &t_alg->algt.alg.hash.halg.base;
3223 alg->cra_init = talitos_cra_init_ahash;
3224 alg->cra_exit = talitos_cra_exit;
3225 t_alg->algt.alg.hash.init = ahash_init;
3226 t_alg->algt.alg.hash.update = ahash_update;
3227 t_alg->algt.alg.hash.final = ahash_final;
3228 t_alg->algt.alg.hash.finup = ahash_finup;
3229 t_alg->algt.alg.hash.digest = ahash_digest;
3230 if (!strncmp(alg->cra_name, "hmac", 4))
3231 t_alg->algt.alg.hash.setkey = ahash_setkey;
3232 t_alg->algt.alg.hash.import = ahash_import;
3233 t_alg->algt.alg.hash.export = ahash_export;
3234
3235 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3236 !strncmp(alg->cra_name, "hmac", 4)) {
3237 devm_kfree(dev, t_alg);
3238 return ERR_PTR(-ENOTSUPP);
3239 }
3240 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241 (!strcmp(alg->cra_name, "sha224") ||
3242 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3243 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3244 t_alg->algt.alg.hash.digest =
3245 ahash_digest_sha224_swinit;
3246 t_alg->algt.desc_hdr_template =
3247 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3248 DESC_HDR_SEL0_MDEUA |
3249 DESC_HDR_MODE0_MDEU_SHA256;
3250 }
3251 break;
3252 default:
3253 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3254 devm_kfree(dev, t_alg);
3255 return ERR_PTR(-EINVAL);
3256 }
3257
3258 alg->cra_module = THIS_MODULE;
3259 if (t_alg->algt.priority)
3260 alg->cra_priority = t_alg->algt.priority;
3261 else
3262 alg->cra_priority = TALITOS_CRA_PRIORITY;
3263 if (has_ftr_sec1(priv) && t_alg->algt.type != CRYPTO_ALG_TYPE_AHASH)
3264 alg->cra_alignmask = 3;
3265 else
3266 alg->cra_alignmask = 0;
3267 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3268 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3269
3270 t_alg->dev = dev;
3271
3272 return t_alg;
3273}
3274
3275static int talitos_probe_irq(struct platform_device *ofdev)
3276{
3277 struct device *dev = &ofdev->dev;
3278 struct device_node *np = ofdev->dev.of_node;
3279 struct talitos_private *priv = dev_get_drvdata(dev);
3280 int err;
3281 bool is_sec1 = has_ftr_sec1(priv);
3282
3283 priv->irq[0] = irq_of_parse_and_map(np, 0);
3284 if (!priv->irq[0]) {
3285 dev_err(dev, "failed to map irq\n");
3286 return -EINVAL;
3287 }
3288 if (is_sec1) {
3289 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3290 dev_driver_string(dev), dev);
3291 goto primary_out;
3292 }
3293
3294 priv->irq[1] = irq_of_parse_and_map(np, 1);
3295
3296 /* get the primary irq line */
3297 if (!priv->irq[1]) {
3298 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3299 dev_driver_string(dev), dev);
3300 goto primary_out;
3301 }
3302
3303 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3304 dev_driver_string(dev), dev);
3305 if (err)
3306 goto primary_out;
3307
3308 /* get the secondary irq line */
3309 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3310 dev_driver_string(dev), dev);
3311 if (err) {
3312 dev_err(dev, "failed to request secondary irq\n");
3313 irq_dispose_mapping(priv->irq[1]);
3314 priv->irq[1] = 0;
3315 }
3316
3317 return err;
3318
3319primary_out:
3320 if (err) {
3321 dev_err(dev, "failed to request primary irq\n");
3322 irq_dispose_mapping(priv->irq[0]);
3323 priv->irq[0] = 0;
3324 }
3325
3326 return err;
3327}
3328
3329static int talitos_probe(struct platform_device *ofdev)
3330{
3331 struct device *dev = &ofdev->dev;
3332 struct device_node *np = ofdev->dev.of_node;
3333 struct talitos_private *priv;
3334 int i, err;
3335 int stride;
3336 struct resource *res;
3337
3338 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3339 if (!priv)
3340 return -ENOMEM;
3341
3342 INIT_LIST_HEAD(&priv->alg_list);
3343
3344 dev_set_drvdata(dev, priv);
3345
3346 priv->ofdev = ofdev;
3347
3348 spin_lock_init(&priv->reg_lock);
3349
3350 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3351 if (!res)
3352 return -ENXIO;
3353 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3354 if (!priv->reg) {
3355 dev_err(dev, "failed to of_iomap\n");
3356 err = -ENOMEM;
3357 goto err_out;
3358 }
3359
3360 /* get SEC version capabilities from device tree */
3361 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3362 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3363 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3364 of_property_read_u32(np, "fsl,descriptor-types-mask",
3365 &priv->desc_types);
3366
3367 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3368 !priv->exec_units || !priv->desc_types) {
3369 dev_err(dev, "invalid property data in device tree node\n");
3370 err = -EINVAL;
3371 goto err_out;
3372 }
3373
3374 if (of_device_is_compatible(np, "fsl,sec3.0"))
3375 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3376
3377 if (of_device_is_compatible(np, "fsl,sec2.1"))
3378 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3379 TALITOS_FTR_SHA224_HWINIT |
3380 TALITOS_FTR_HMAC_OK;
3381
3382 if (of_device_is_compatible(np, "fsl,sec1.0"))
3383 priv->features |= TALITOS_FTR_SEC1;
3384
3385 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3386 priv->reg_deu = priv->reg + TALITOS12_DEU;
3387 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3388 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3389 stride = TALITOS1_CH_STRIDE;
3390 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3391 priv->reg_deu = priv->reg + TALITOS10_DEU;
3392 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3393 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3394 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3395 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3396 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3397 stride = TALITOS1_CH_STRIDE;
3398 } else {
3399 priv->reg_deu = priv->reg + TALITOS2_DEU;
3400 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3401 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3402 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3403 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3404 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3405 priv->reg_keu = priv->reg + TALITOS2_KEU;
3406 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3407 stride = TALITOS2_CH_STRIDE;
3408 }
3409
3410 err = talitos_probe_irq(ofdev);
3411 if (err)
3412 goto err_out;
3413
3414 if (has_ftr_sec1(priv)) {
3415 if (priv->num_channels == 1)
3416 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3417 (unsigned long)dev);
3418 else
3419 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3420 (unsigned long)dev);
3421 } else {
3422 if (priv->irq[1]) {
3423 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3424 (unsigned long)dev);
3425 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3426 (unsigned long)dev);
3427 } else if (priv->num_channels == 1) {
3428 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3429 (unsigned long)dev);
3430 } else {
3431 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3432 (unsigned long)dev);
3433 }
3434 }
3435
3436 priv->chan = devm_kcalloc(dev,
3437 priv->num_channels,
3438 sizeof(struct talitos_channel),
3439 GFP_KERNEL);
3440 if (!priv->chan) {
3441 dev_err(dev, "failed to allocate channel management space\n");
3442 err = -ENOMEM;
3443 goto err_out;
3444 }
3445
3446 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3447
3448 for (i = 0; i < priv->num_channels; i++) {
3449 priv->chan[i].reg = priv->reg + stride * (i + 1);
3450 if (!priv->irq[1] || !(i & 1))
3451 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3452
3453 spin_lock_init(&priv->chan[i].head_lock);
3454 spin_lock_init(&priv->chan[i].tail_lock);
3455
3456 priv->chan[i].fifo = devm_kcalloc(dev,
3457 priv->fifo_len,
3458 sizeof(struct talitos_request),
3459 GFP_KERNEL);
3460 if (!priv->chan[i].fifo) {
3461 dev_err(dev, "failed to allocate request fifo %d\n", i);
3462 err = -ENOMEM;
3463 goto err_out;
3464 }
3465
3466 atomic_set(&priv->chan[i].submit_count,
3467 -(priv->chfifo_len - 1));
3468 }
3469
3470 dma_set_mask(dev, DMA_BIT_MASK(36));
3471
3472 /* reset and initialize the h/w */
3473 err = init_device(dev);
3474 if (err) {
3475 dev_err(dev, "failed to initialize device\n");
3476 goto err_out;
3477 }
3478
3479 /* register the RNG, if available */
3480 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3481 err = talitos_register_rng(dev);
3482 if (err) {
3483 dev_err(dev, "failed to register hwrng: %d\n", err);
3484 goto err_out;
3485 } else
3486 dev_info(dev, "hwrng\n");
3487 }
3488
3489 /* register crypto algorithms the device supports */
3490 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3491 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3492 struct talitos_crypto_alg *t_alg;
3493 struct crypto_alg *alg = NULL;
3494
3495 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3496 if (IS_ERR(t_alg)) {
3497 err = PTR_ERR(t_alg);
3498 if (err == -ENOTSUPP)
3499 continue;
3500 goto err_out;
3501 }
3502
3503 switch (t_alg->algt.type) {
3504 case CRYPTO_ALG_TYPE_SKCIPHER:
3505 err = crypto_register_skcipher(
3506 &t_alg->algt.alg.skcipher);
3507 alg = &t_alg->algt.alg.skcipher.base;
3508 break;
3509
3510 case CRYPTO_ALG_TYPE_AEAD:
3511 err = crypto_register_aead(
3512 &t_alg->algt.alg.aead);
3513 alg = &t_alg->algt.alg.aead.base;
3514 break;
3515
3516 case CRYPTO_ALG_TYPE_AHASH:
3517 err = crypto_register_ahash(
3518 &t_alg->algt.alg.hash);
3519 alg = &t_alg->algt.alg.hash.halg.base;
3520 break;
3521 }
3522 if (err) {
3523 dev_err(dev, "%s alg registration failed\n",
3524 alg->cra_driver_name);
3525 devm_kfree(dev, t_alg);
3526 } else
3527 list_add_tail(&t_alg->entry, &priv->alg_list);
3528 }
3529 }
3530 if (!list_empty(&priv->alg_list))
3531 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3532 (char *)of_get_property(np, "compatible", NULL));
3533
3534 return 0;
3535
3536err_out:
3537 talitos_remove(ofdev);
3538
3539 return err;
3540}
3541
3542static const struct of_device_id talitos_match[] = {
3543#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3544 {
3545 .compatible = "fsl,sec1.0",
3546 },
3547#endif
3548#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3549 {
3550 .compatible = "fsl,sec2.0",
3551 },
3552#endif
3553 {},
3554};
3555MODULE_DEVICE_TABLE(of, talitos_match);
3556
3557static struct platform_driver talitos_driver = {
3558 .driver = {
3559 .name = "talitos",
3560 .of_match_table = talitos_match,
3561 },
3562 .probe = talitos_probe,
3563 .remove_new = talitos_remove,
3564};
3565
3566module_platform_driver(talitos_driver);
3567
3568MODULE_LICENSE("GPL");
3569MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3570MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
42#include <linux/slab.h>
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
46#include <crypto/des.h>
47#include <crypto/sha.h>
48#include <crypto/md5.h>
49#include <crypto/internal/aead.h>
50#include <crypto/authenc.h>
51#include <crypto/skcipher.h>
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
54#include <crypto/scatterwalk.h>
55
56#include "talitos.h"
57
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
60{
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (is_sec1) {
63 ptr->len1 = cpu_to_be16(len);
64 } else {
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
67 }
68}
69
70static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
72{
73 dst_ptr->ptr = src_ptr->ptr;
74 if (is_sec1) {
75 dst_ptr->len1 = src_ptr->len1;
76 } else {
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
79 }
80}
81
82static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 bool is_sec1)
84{
85 if (is_sec1)
86 return be16_to_cpu(ptr->len1);
87 else
88 return be16_to_cpu(ptr->len);
89}
90
91static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 bool is_sec1)
93{
94 if (!is_sec1)
95 ptr->j_extent = val;
96}
97
98static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99{
100 if (!is_sec1)
101 ptr->j_extent |= val;
102}
103
104/*
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
106 */
107static void __map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir,
111 unsigned long attrs)
112{
113 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 struct talitos_private *priv = dev_get_drvdata(dev);
115 bool is_sec1 = has_ftr_sec1(priv);
116
117 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118}
119
120static void map_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 unsigned int len, void *data,
123 enum dma_data_direction dir)
124{
125 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126}
127
128static void map_single_talitos_ptr_nosync(struct device *dev,
129 struct talitos_ptr *ptr,
130 unsigned int len, void *data,
131 enum dma_data_direction dir)
132{
133 __map_single_talitos_ptr(dev, ptr, len, data, dir,
134 DMA_ATTR_SKIP_CPU_SYNC);
135}
136
137/*
138 * unmap bus single (contiguous) h/w descriptor pointer
139 */
140static void unmap_single_talitos_ptr(struct device *dev,
141 struct talitos_ptr *ptr,
142 enum dma_data_direction dir)
143{
144 struct talitos_private *priv = dev_get_drvdata(dev);
145 bool is_sec1 = has_ftr_sec1(priv);
146
147 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 from_talitos_ptr_len(ptr, is_sec1), dir);
149}
150
151static int reset_channel(struct device *dev, int ch)
152{
153 struct talitos_private *priv = dev_get_drvdata(dev);
154 unsigned int timeout = TALITOS_TIMEOUT;
155 bool is_sec1 = has_ftr_sec1(priv);
156
157 if (is_sec1) {
158 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 TALITOS1_CCCR_LO_RESET);
160
161 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 TALITOS1_CCCR_LO_RESET) && --timeout)
163 cpu_relax();
164 } else {
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 TALITOS2_CCCR_RESET);
167
168 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 TALITOS2_CCCR_RESET) && --timeout)
170 cpu_relax();
171 }
172
173 if (timeout == 0) {
174 dev_err(dev, "failed to reset channel %d\n", ch);
175 return -EIO;
176 }
177
178 /* set 36-bit addressing, done writeback enable and done IRQ enable */
179 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 /* enable chaining descriptors */
182 if (is_sec1)
183 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184 TALITOS_CCCR_LO_NE);
185
186 /* and ICCR writeback, if available */
187 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 TALITOS_CCCR_LO_IWSE);
190
191 return 0;
192}
193
194static int reset_device(struct device *dev)
195{
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
198 bool is_sec1 = has_ftr_sec1(priv);
199 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200
201 setbits32(priv->reg + TALITOS_MCR, mcr);
202
203 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204 && --timeout)
205 cpu_relax();
206
207 if (priv->irq[1]) {
208 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 setbits32(priv->reg + TALITOS_MCR, mcr);
210 }
211
212 if (timeout == 0) {
213 dev_err(dev, "failed to reset device\n");
214 return -EIO;
215 }
216
217 return 0;
218}
219
220/*
221 * Reset and initialize the device
222 */
223static int init_device(struct device *dev)
224{
225 struct talitos_private *priv = dev_get_drvdata(dev);
226 int ch, err;
227 bool is_sec1 = has_ftr_sec1(priv);
228
229 /*
230 * Master reset
231 * errata documentation: warning: certain SEC interrupts
232 * are not fully cleared by writing the MCR:SWR bit,
233 * set bit twice to completely reset
234 */
235 err = reset_device(dev);
236 if (err)
237 return err;
238
239 err = reset_device(dev);
240 if (err)
241 return err;
242
243 /* reset channels */
244 for (ch = 0; ch < priv->num_channels; ch++) {
245 err = reset_channel(dev, ch);
246 if (err)
247 return err;
248 }
249
250 /* enable channel done and error interrupts */
251 if (is_sec1) {
252 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 /* disable parity error check in DEU (erroneous? test vect.) */
255 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256 } else {
257 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259 }
260
261 /* disable integrity check error interrupts (use writeback instead) */
262 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 TALITOS_MDEUICR_LO_ICE);
265
266 return 0;
267}
268
269/**
270 * talitos_submit - submits a descriptor to the device for processing
271 * @dev: the SEC device to be used
272 * @ch: the SEC device channel to be used
273 * @desc: the descriptor to be processed by the device
274 * @callback: whom to call when processing is complete
275 * @context: a handle for use by caller (optional)
276 *
277 * desc must contain valid dma-mapped (bus physical) address pointers.
278 * callback must check err and feedback in descriptor header
279 * for device processing status.
280 */
281int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 void (*callback)(struct device *dev,
283 struct talitos_desc *desc,
284 void *context, int error),
285 void *context)
286{
287 struct talitos_private *priv = dev_get_drvdata(dev);
288 struct talitos_request *request;
289 unsigned long flags;
290 int head;
291 bool is_sec1 = has_ftr_sec1(priv);
292
293 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294
295 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 /* h/w fifo is full */
297 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298 return -EAGAIN;
299 }
300
301 head = priv->chan[ch].head;
302 request = &priv->chan[ch].fifo[head];
303
304 /* map descriptor and save caller data */
305 if (is_sec1) {
306 desc->hdr1 = desc->hdr;
307 request->dma_desc = dma_map_single(dev, &desc->hdr1,
308 TALITOS_DESC_SIZE,
309 DMA_BIDIRECTIONAL);
310 } else {
311 request->dma_desc = dma_map_single(dev, desc,
312 TALITOS_DESC_SIZE,
313 DMA_BIDIRECTIONAL);
314 }
315 request->callback = callback;
316 request->context = context;
317
318 /* increment fifo head */
319 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320
321 smp_wmb();
322 request->desc = desc;
323
324 /* GO! */
325 wmb();
326 out_be32(priv->chan[ch].reg + TALITOS_FF,
327 upper_32_bits(request->dma_desc));
328 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 lower_32_bits(request->dma_desc));
330
331 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332
333 return -EINPROGRESS;
334}
335EXPORT_SYMBOL(talitos_submit);
336
337/*
338 * process what was done, notify callback of error if not
339 */
340static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
341{
342 struct talitos_private *priv = dev_get_drvdata(dev);
343 struct talitos_request *request, saved_req;
344 unsigned long flags;
345 int tail, status;
346 bool is_sec1 = has_ftr_sec1(priv);
347
348 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
349
350 tail = priv->chan[ch].tail;
351 while (priv->chan[ch].fifo[tail].desc) {
352 __be32 hdr;
353
354 request = &priv->chan[ch].fifo[tail];
355
356 /* descriptors with their done bits set don't get the error */
357 rmb();
358 if (!is_sec1)
359 hdr = request->desc->hdr;
360 else if (request->desc->next_desc)
361 hdr = (request->desc + 1)->hdr1;
362 else
363 hdr = request->desc->hdr1;
364
365 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
366 status = 0;
367 else
368 if (!error)
369 break;
370 else
371 status = error;
372
373 dma_unmap_single(dev, request->dma_desc,
374 TALITOS_DESC_SIZE,
375 DMA_BIDIRECTIONAL);
376
377 /* copy entries so we can call callback outside lock */
378 saved_req.desc = request->desc;
379 saved_req.callback = request->callback;
380 saved_req.context = request->context;
381
382 /* release request entry in fifo */
383 smp_wmb();
384 request->desc = NULL;
385
386 /* increment fifo tail */
387 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
388
389 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
390
391 atomic_dec(&priv->chan[ch].submit_count);
392
393 saved_req.callback(dev, saved_req.desc, saved_req.context,
394 status);
395 /* channel may resume processing in single desc error case */
396 if (error && !reset_ch && status == error)
397 return;
398 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
399 tail = priv->chan[ch].tail;
400 }
401
402 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
403}
404
405/*
406 * process completed requests for channels that have done status
407 */
408#define DEF_TALITOS1_DONE(name, ch_done_mask) \
409static void talitos1_done_##name(unsigned long data) \
410{ \
411 struct device *dev = (struct device *)data; \
412 struct talitos_private *priv = dev_get_drvdata(dev); \
413 unsigned long flags; \
414 \
415 if (ch_done_mask & 0x10000000) \
416 flush_channel(dev, 0, 0, 0); \
417 if (ch_done_mask & 0x40000000) \
418 flush_channel(dev, 1, 0, 0); \
419 if (ch_done_mask & 0x00010000) \
420 flush_channel(dev, 2, 0, 0); \
421 if (ch_done_mask & 0x00040000) \
422 flush_channel(dev, 3, 0, 0); \
423 \
424 /* At this point, all completed channels have been processed */ \
425 /* Unmask done interrupts for channels completed later on. */ \
426 spin_lock_irqsave(&priv->reg_lock, flags); \
427 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
428 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
429 spin_unlock_irqrestore(&priv->reg_lock, flags); \
430}
431
432DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
433DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
434
435#define DEF_TALITOS2_DONE(name, ch_done_mask) \
436static void talitos2_done_##name(unsigned long data) \
437{ \
438 struct device *dev = (struct device *)data; \
439 struct talitos_private *priv = dev_get_drvdata(dev); \
440 unsigned long flags; \
441 \
442 if (ch_done_mask & 1) \
443 flush_channel(dev, 0, 0, 0); \
444 if (ch_done_mask & (1 << 2)) \
445 flush_channel(dev, 1, 0, 0); \
446 if (ch_done_mask & (1 << 4)) \
447 flush_channel(dev, 2, 0, 0); \
448 if (ch_done_mask & (1 << 6)) \
449 flush_channel(dev, 3, 0, 0); \
450 \
451 /* At this point, all completed channels have been processed */ \
452 /* Unmask done interrupts for channels completed later on. */ \
453 spin_lock_irqsave(&priv->reg_lock, flags); \
454 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
455 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
456 spin_unlock_irqrestore(&priv->reg_lock, flags); \
457}
458
459DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
460DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
461DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
462DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
463
464/*
465 * locate current (offending) descriptor
466 */
467static u32 current_desc_hdr(struct device *dev, int ch)
468{
469 struct talitos_private *priv = dev_get_drvdata(dev);
470 int tail, iter;
471 dma_addr_t cur_desc;
472
473 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
474 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
475
476 if (!cur_desc) {
477 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
478 return 0;
479 }
480
481 tail = priv->chan[ch].tail;
482
483 iter = tail;
484 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
485 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
486 iter = (iter + 1) & (priv->fifo_len - 1);
487 if (iter == tail) {
488 dev_err(dev, "couldn't locate current descriptor\n");
489 return 0;
490 }
491 }
492
493 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
494 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
495
496 return priv->chan[ch].fifo[iter].desc->hdr;
497}
498
499/*
500 * user diagnostics; report root cause of error based on execution unit status
501 */
502static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
503{
504 struct talitos_private *priv = dev_get_drvdata(dev);
505 int i;
506
507 if (!desc_hdr)
508 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
509
510 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
511 case DESC_HDR_SEL0_AFEU:
512 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_afeu + TALITOS_EUISR),
514 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
515 break;
516 case DESC_HDR_SEL0_DEU:
517 dev_err(dev, "DEUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_deu + TALITOS_EUISR),
519 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
520 break;
521 case DESC_HDR_SEL0_MDEUA:
522 case DESC_HDR_SEL0_MDEUB:
523 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
524 in_be32(priv->reg_mdeu + TALITOS_EUISR),
525 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
526 break;
527 case DESC_HDR_SEL0_RNG:
528 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
529 in_be32(priv->reg_rngu + TALITOS_ISR),
530 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
531 break;
532 case DESC_HDR_SEL0_PKEU:
533 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
534 in_be32(priv->reg_pkeu + TALITOS_EUISR),
535 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
536 break;
537 case DESC_HDR_SEL0_AESU:
538 dev_err(dev, "AESUISR 0x%08x_%08x\n",
539 in_be32(priv->reg_aesu + TALITOS_EUISR),
540 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
541 break;
542 case DESC_HDR_SEL0_CRCU:
543 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
544 in_be32(priv->reg_crcu + TALITOS_EUISR),
545 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
546 break;
547 case DESC_HDR_SEL0_KEU:
548 dev_err(dev, "KEUISR 0x%08x_%08x\n",
549 in_be32(priv->reg_pkeu + TALITOS_EUISR),
550 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
551 break;
552 }
553
554 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
555 case DESC_HDR_SEL1_MDEUA:
556 case DESC_HDR_SEL1_MDEUB:
557 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
558 in_be32(priv->reg_mdeu + TALITOS_EUISR),
559 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
560 break;
561 case DESC_HDR_SEL1_CRCU:
562 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
563 in_be32(priv->reg_crcu + TALITOS_EUISR),
564 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
565 break;
566 }
567
568 for (i = 0; i < 8; i++)
569 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
570 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
571 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
572}
573
574/*
575 * recover from error interrupts
576 */
577static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
578{
579 struct talitos_private *priv = dev_get_drvdata(dev);
580 unsigned int timeout = TALITOS_TIMEOUT;
581 int ch, error, reset_dev = 0;
582 u32 v_lo;
583 bool is_sec1 = has_ftr_sec1(priv);
584 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
585
586 for (ch = 0; ch < priv->num_channels; ch++) {
587 /* skip channels without errors */
588 if (is_sec1) {
589 /* bits 29, 31, 17, 19 */
590 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
591 continue;
592 } else {
593 if (!(isr & (1 << (ch * 2 + 1))))
594 continue;
595 }
596
597 error = -EINVAL;
598
599 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
600
601 if (v_lo & TALITOS_CCPSR_LO_DOF) {
602 dev_err(dev, "double fetch fifo overflow error\n");
603 error = -EAGAIN;
604 reset_ch = 1;
605 }
606 if (v_lo & TALITOS_CCPSR_LO_SOF) {
607 /* h/w dropped descriptor */
608 dev_err(dev, "single fetch fifo overflow error\n");
609 error = -EAGAIN;
610 }
611 if (v_lo & TALITOS_CCPSR_LO_MDTE)
612 dev_err(dev, "master data transfer error\n");
613 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
614 dev_err(dev, is_sec1 ? "pointer not complete error\n"
615 : "s/g data length zero error\n");
616 if (v_lo & TALITOS_CCPSR_LO_FPZ)
617 dev_err(dev, is_sec1 ? "parity error\n"
618 : "fetch pointer zero error\n");
619 if (v_lo & TALITOS_CCPSR_LO_IDH)
620 dev_err(dev, "illegal descriptor header error\n");
621 if (v_lo & TALITOS_CCPSR_LO_IEU)
622 dev_err(dev, is_sec1 ? "static assignment error\n"
623 : "invalid exec unit error\n");
624 if (v_lo & TALITOS_CCPSR_LO_EU)
625 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
626 if (!is_sec1) {
627 if (v_lo & TALITOS_CCPSR_LO_GB)
628 dev_err(dev, "gather boundary error\n");
629 if (v_lo & TALITOS_CCPSR_LO_GRL)
630 dev_err(dev, "gather return/length error\n");
631 if (v_lo & TALITOS_CCPSR_LO_SB)
632 dev_err(dev, "scatter boundary error\n");
633 if (v_lo & TALITOS_CCPSR_LO_SRL)
634 dev_err(dev, "scatter return/length error\n");
635 }
636
637 flush_channel(dev, ch, error, reset_ch);
638
639 if (reset_ch) {
640 reset_channel(dev, ch);
641 } else {
642 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
643 TALITOS2_CCCR_CONT);
644 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
645 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
646 TALITOS2_CCCR_CONT) && --timeout)
647 cpu_relax();
648 if (timeout == 0) {
649 dev_err(dev, "failed to restart channel %d\n",
650 ch);
651 reset_dev = 1;
652 }
653 }
654 }
655 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
656 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
657 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
658 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
659 isr, isr_lo);
660 else
661 dev_err(dev, "done overflow, internal time out, or "
662 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
663
664 /* purge request queues */
665 for (ch = 0; ch < priv->num_channels; ch++)
666 flush_channel(dev, ch, -EIO, 1);
667
668 /* reset and reinitialize the device */
669 init_device(dev);
670 }
671}
672
673#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
674static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
675{ \
676 struct device *dev = data; \
677 struct talitos_private *priv = dev_get_drvdata(dev); \
678 u32 isr, isr_lo; \
679 unsigned long flags; \
680 \
681 spin_lock_irqsave(&priv->reg_lock, flags); \
682 isr = in_be32(priv->reg + TALITOS_ISR); \
683 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
684 /* Acknowledge interrupt */ \
685 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
687 \
688 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
689 spin_unlock_irqrestore(&priv->reg_lock, flags); \
690 talitos_error(dev, isr & ch_err_mask, isr_lo); \
691 } \
692 else { \
693 if (likely(isr & ch_done_mask)) { \
694 /* mask further done interrupts. */ \
695 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
696 /* done_task will unmask done interrupts at exit */ \
697 tasklet_schedule(&priv->done_task[tlet]); \
698 } \
699 spin_unlock_irqrestore(&priv->reg_lock, flags); \
700 } \
701 \
702 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
703 IRQ_NONE; \
704}
705
706DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
707
708#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
709static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
710{ \
711 struct device *dev = data; \
712 struct talitos_private *priv = dev_get_drvdata(dev); \
713 u32 isr, isr_lo; \
714 unsigned long flags; \
715 \
716 spin_lock_irqsave(&priv->reg_lock, flags); \
717 isr = in_be32(priv->reg + TALITOS_ISR); \
718 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
719 /* Acknowledge interrupt */ \
720 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
721 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
722 \
723 if (unlikely(isr & ch_err_mask || isr_lo)) { \
724 spin_unlock_irqrestore(&priv->reg_lock, flags); \
725 talitos_error(dev, isr & ch_err_mask, isr_lo); \
726 } \
727 else { \
728 if (likely(isr & ch_done_mask)) { \
729 /* mask further done interrupts. */ \
730 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
731 /* done_task will unmask done interrupts at exit */ \
732 tasklet_schedule(&priv->done_task[tlet]); \
733 } \
734 spin_unlock_irqrestore(&priv->reg_lock, flags); \
735 } \
736 \
737 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
738 IRQ_NONE; \
739}
740
741DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
742DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
743 0)
744DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
745 1)
746
747/*
748 * hwrng
749 */
750static int talitos_rng_data_present(struct hwrng *rng, int wait)
751{
752 struct device *dev = (struct device *)rng->priv;
753 struct talitos_private *priv = dev_get_drvdata(dev);
754 u32 ofl;
755 int i;
756
757 for (i = 0; i < 20; i++) {
758 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
759 TALITOS_RNGUSR_LO_OFL;
760 if (ofl || !wait)
761 break;
762 udelay(10);
763 }
764
765 return !!ofl;
766}
767
768static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
769{
770 struct device *dev = (struct device *)rng->priv;
771 struct talitos_private *priv = dev_get_drvdata(dev);
772
773 /* rng fifo requires 64-bit accesses */
774 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
775 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
776
777 return sizeof(u32);
778}
779
780static int talitos_rng_init(struct hwrng *rng)
781{
782 struct device *dev = (struct device *)rng->priv;
783 struct talitos_private *priv = dev_get_drvdata(dev);
784 unsigned int timeout = TALITOS_TIMEOUT;
785
786 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
787 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
788 & TALITOS_RNGUSR_LO_RD)
789 && --timeout)
790 cpu_relax();
791 if (timeout == 0) {
792 dev_err(dev, "failed to reset rng hw\n");
793 return -ENODEV;
794 }
795
796 /* start generating */
797 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
798
799 return 0;
800}
801
802static int talitos_register_rng(struct device *dev)
803{
804 struct talitos_private *priv = dev_get_drvdata(dev);
805 int err;
806
807 priv->rng.name = dev_driver_string(dev),
808 priv->rng.init = talitos_rng_init,
809 priv->rng.data_present = talitos_rng_data_present,
810 priv->rng.data_read = talitos_rng_data_read,
811 priv->rng.priv = (unsigned long)dev;
812
813 err = hwrng_register(&priv->rng);
814 if (!err)
815 priv->rng_registered = true;
816
817 return err;
818}
819
820static void talitos_unregister_rng(struct device *dev)
821{
822 struct talitos_private *priv = dev_get_drvdata(dev);
823
824 if (!priv->rng_registered)
825 return;
826
827 hwrng_unregister(&priv->rng);
828 priv->rng_registered = false;
829}
830
831/*
832 * crypto alg
833 */
834#define TALITOS_CRA_PRIORITY 3000
835/*
836 * Defines a priority for doing AEAD with descriptors type
837 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
838 */
839#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
840#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
841#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
842
843struct talitos_ctx {
844 struct device *dev;
845 int ch;
846 __be32 desc_hdr_template;
847 u8 key[TALITOS_MAX_KEY_SIZE];
848 u8 iv[TALITOS_MAX_IV_LENGTH];
849 dma_addr_t dma_key;
850 unsigned int keylen;
851 unsigned int enckeylen;
852 unsigned int authkeylen;
853};
854
855#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
856#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
857
858struct talitos_ahash_req_ctx {
859 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
860 unsigned int hw_context_size;
861 u8 buf[2][HASH_MAX_BLOCK_SIZE];
862 int buf_idx;
863 unsigned int swinit;
864 unsigned int first;
865 unsigned int last;
866 unsigned int to_hash_later;
867 unsigned int nbuf;
868 struct scatterlist bufsl[2];
869 struct scatterlist *psrc;
870};
871
872struct talitos_export_state {
873 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
874 u8 buf[HASH_MAX_BLOCK_SIZE];
875 unsigned int swinit;
876 unsigned int first;
877 unsigned int last;
878 unsigned int to_hash_later;
879 unsigned int nbuf;
880};
881
882static int aead_setkey(struct crypto_aead *authenc,
883 const u8 *key, unsigned int keylen)
884{
885 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
886 struct device *dev = ctx->dev;
887 struct crypto_authenc_keys keys;
888
889 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
890 goto badkey;
891
892 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
893 goto badkey;
894
895 if (ctx->keylen)
896 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
897
898 memcpy(ctx->key, keys.authkey, keys.authkeylen);
899 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
900
901 ctx->keylen = keys.authkeylen + keys.enckeylen;
902 ctx->enckeylen = keys.enckeylen;
903 ctx->authkeylen = keys.authkeylen;
904 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
905 DMA_TO_DEVICE);
906
907 memzero_explicit(&keys, sizeof(keys));
908 return 0;
909
910badkey:
911 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
912 memzero_explicit(&keys, sizeof(keys));
913 return -EINVAL;
914}
915
916/*
917 * talitos_edesc - s/w-extended descriptor
918 * @src_nents: number of segments in input scatterlist
919 * @dst_nents: number of segments in output scatterlist
920 * @icv_ool: whether ICV is out-of-line
921 * @iv_dma: dma address of iv for checking continuity and link table
922 * @dma_len: length of dma mapped link_tbl space
923 * @dma_link_tbl: bus physical address of link_tbl/buf
924 * @desc: h/w descriptor
925 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
926 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
927 *
928 * if decrypting (with authcheck), or either one of src_nents or dst_nents
929 * is greater than 1, an integrity check value is concatenated to the end
930 * of link_tbl data
931 */
932struct talitos_edesc {
933 int src_nents;
934 int dst_nents;
935 bool icv_ool;
936 dma_addr_t iv_dma;
937 int dma_len;
938 dma_addr_t dma_link_tbl;
939 struct talitos_desc desc;
940 union {
941 struct talitos_ptr link_tbl[0];
942 u8 buf[0];
943 };
944};
945
946static void talitos_sg_unmap(struct device *dev,
947 struct talitos_edesc *edesc,
948 struct scatterlist *src,
949 struct scatterlist *dst,
950 unsigned int len, unsigned int offset)
951{
952 struct talitos_private *priv = dev_get_drvdata(dev);
953 bool is_sec1 = has_ftr_sec1(priv);
954 unsigned int src_nents = edesc->src_nents ? : 1;
955 unsigned int dst_nents = edesc->dst_nents ? : 1;
956
957 if (is_sec1 && dst && dst_nents > 1) {
958 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
959 len, DMA_FROM_DEVICE);
960 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
961 offset);
962 }
963 if (src != dst) {
964 if (src_nents == 1 || !is_sec1)
965 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
966
967 if (dst && (dst_nents == 1 || !is_sec1))
968 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
969 } else if (src_nents == 1 || !is_sec1) {
970 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
971 }
972}
973
974static void ipsec_esp_unmap(struct device *dev,
975 struct talitos_edesc *edesc,
976 struct aead_request *areq)
977{
978 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
979 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
980 unsigned int ivsize = crypto_aead_ivsize(aead);
981 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
982 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
983
984 if (is_ipsec_esp)
985 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
986 DMA_FROM_DEVICE);
987 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
988
989 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
990 areq->assoclen);
991
992 if (edesc->dma_len)
993 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
994 DMA_BIDIRECTIONAL);
995
996 if (!is_ipsec_esp) {
997 unsigned int dst_nents = edesc->dst_nents ? : 1;
998
999 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1000 areq->assoclen + areq->cryptlen - ivsize);
1001 }
1002}
1003
1004/*
1005 * ipsec_esp descriptor callbacks
1006 */
1007static void ipsec_esp_encrypt_done(struct device *dev,
1008 struct talitos_desc *desc, void *context,
1009 int err)
1010{
1011 struct talitos_private *priv = dev_get_drvdata(dev);
1012 bool is_sec1 = has_ftr_sec1(priv);
1013 struct aead_request *areq = context;
1014 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1015 unsigned int authsize = crypto_aead_authsize(authenc);
1016 unsigned int ivsize = crypto_aead_ivsize(authenc);
1017 struct talitos_edesc *edesc;
1018 struct scatterlist *sg;
1019 void *icvdata;
1020
1021 edesc = container_of(desc, struct talitos_edesc, desc);
1022
1023 ipsec_esp_unmap(dev, edesc, areq);
1024
1025 /* copy the generated ICV to dst */
1026 if (edesc->icv_ool) {
1027 if (is_sec1)
1028 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1029 else
1030 icvdata = &edesc->link_tbl[edesc->src_nents +
1031 edesc->dst_nents + 2];
1032 sg = sg_last(areq->dst, edesc->dst_nents);
1033 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1034 icvdata, authsize);
1035 }
1036
1037 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1038
1039 kfree(edesc);
1040
1041 aead_request_complete(areq, err);
1042}
1043
1044static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1045 struct talitos_desc *desc,
1046 void *context, int err)
1047{
1048 struct aead_request *req = context;
1049 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1050 unsigned int authsize = crypto_aead_authsize(authenc);
1051 struct talitos_edesc *edesc;
1052 struct scatterlist *sg;
1053 char *oicv, *icv;
1054 struct talitos_private *priv = dev_get_drvdata(dev);
1055 bool is_sec1 = has_ftr_sec1(priv);
1056
1057 edesc = container_of(desc, struct talitos_edesc, desc);
1058
1059 ipsec_esp_unmap(dev, edesc, req);
1060
1061 if (!err) {
1062 /* auth check */
1063 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1064 icv = (char *)sg_virt(sg) + sg->length - authsize;
1065
1066 if (edesc->dma_len) {
1067 if (is_sec1)
1068 oicv = (char *)&edesc->dma_link_tbl +
1069 req->assoclen + req->cryptlen;
1070 else
1071 oicv = (char *)
1072 &edesc->link_tbl[edesc->src_nents +
1073 edesc->dst_nents + 2];
1074 if (edesc->icv_ool)
1075 icv = oicv + authsize;
1076 } else
1077 oicv = (char *)&edesc->link_tbl[0];
1078
1079 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1080 }
1081
1082 kfree(edesc);
1083
1084 aead_request_complete(req, err);
1085}
1086
1087static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1088 struct talitos_desc *desc,
1089 void *context, int err)
1090{
1091 struct aead_request *req = context;
1092 struct talitos_edesc *edesc;
1093
1094 edesc = container_of(desc, struct talitos_edesc, desc);
1095
1096 ipsec_esp_unmap(dev, edesc, req);
1097
1098 /* check ICV auth status */
1099 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1100 DESC_HDR_LO_ICCR1_PASS))
1101 err = -EBADMSG;
1102
1103 kfree(edesc);
1104
1105 aead_request_complete(req, err);
1106}
1107
1108/*
1109 * convert scatterlist to SEC h/w link table format
1110 * stop at cryptlen bytes
1111 */
1112static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1113 unsigned int offset, int cryptlen,
1114 struct talitos_ptr *link_tbl_ptr)
1115{
1116 int n_sg = sg_count;
1117 int count = 0;
1118
1119 while (cryptlen && sg && n_sg--) {
1120 unsigned int len = sg_dma_len(sg);
1121
1122 if (offset >= len) {
1123 offset -= len;
1124 goto next;
1125 }
1126
1127 len -= offset;
1128
1129 if (len > cryptlen)
1130 len = cryptlen;
1131
1132 to_talitos_ptr(link_tbl_ptr + count,
1133 sg_dma_address(sg) + offset, len, 0);
1134 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1135 count++;
1136 cryptlen -= len;
1137 offset = 0;
1138
1139next:
1140 sg = sg_next(sg);
1141 }
1142
1143 /* tag end of link table */
1144 if (count > 0)
1145 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1146 DESC_PTR_LNKTBL_RETURN, 0);
1147
1148 return count;
1149}
1150
1151static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1152 unsigned int len, struct talitos_edesc *edesc,
1153 struct talitos_ptr *ptr, int sg_count,
1154 unsigned int offset, int tbl_off, int elen)
1155{
1156 struct talitos_private *priv = dev_get_drvdata(dev);
1157 bool is_sec1 = has_ftr_sec1(priv);
1158
1159 if (!src) {
1160 to_talitos_ptr(ptr, 0, 0, is_sec1);
1161 return 1;
1162 }
1163 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1164 if (sg_count == 1) {
1165 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1166 return sg_count;
1167 }
1168 if (is_sec1) {
1169 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1170 return sg_count;
1171 }
1172 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1173 &edesc->link_tbl[tbl_off]);
1174 if (sg_count == 1) {
1175 /* Only one segment now, so no link tbl needed*/
1176 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1177 return sg_count;
1178 }
1179 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1180 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1181 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1182
1183 return sg_count;
1184}
1185
1186static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1187 unsigned int len, struct talitos_edesc *edesc,
1188 struct talitos_ptr *ptr, int sg_count,
1189 unsigned int offset, int tbl_off)
1190{
1191 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1192 tbl_off, 0);
1193}
1194
1195/*
1196 * fill in and submit ipsec_esp descriptor
1197 */
1198static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1199 void (*callback)(struct device *dev,
1200 struct talitos_desc *desc,
1201 void *context, int error))
1202{
1203 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1204 unsigned int authsize = crypto_aead_authsize(aead);
1205 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1206 struct device *dev = ctx->dev;
1207 struct talitos_desc *desc = &edesc->desc;
1208 unsigned int cryptlen = areq->cryptlen;
1209 unsigned int ivsize = crypto_aead_ivsize(aead);
1210 int tbl_off = 0;
1211 int sg_count, ret;
1212 int elen = 0;
1213 bool sync_needed = false;
1214 struct talitos_private *priv = dev_get_drvdata(dev);
1215 bool is_sec1 = has_ftr_sec1(priv);
1216 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1217 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1218 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1219
1220 /* hmac key */
1221 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1222
1223 sg_count = edesc->src_nents ?: 1;
1224 if (is_sec1 && sg_count > 1)
1225 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1226 areq->assoclen + cryptlen);
1227 else
1228 sg_count = dma_map_sg(dev, areq->src, sg_count,
1229 (areq->src == areq->dst) ?
1230 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1231
1232 /* hmac data */
1233 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1234 &desc->ptr[1], sg_count, 0, tbl_off);
1235
1236 if (ret > 1) {
1237 tbl_off += ret;
1238 sync_needed = true;
1239 }
1240
1241 /* cipher iv */
1242 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1243
1244 /* cipher key */
1245 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1246 ctx->enckeylen, is_sec1);
1247
1248 /*
1249 * cipher in
1250 * map and adjust cipher len to aead request cryptlen.
1251 * extent is bytes of HMAC postpended to ciphertext,
1252 * typically 12 for ipsec
1253 */
1254 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1255 elen = authsize;
1256
1257 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1258 sg_count, areq->assoclen, tbl_off, elen);
1259
1260 if (ret > 1) {
1261 tbl_off += ret;
1262 sync_needed = true;
1263 }
1264
1265 /* cipher out */
1266 if (areq->src != areq->dst) {
1267 sg_count = edesc->dst_nents ? : 1;
1268 if (!is_sec1 || sg_count == 1)
1269 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1270 }
1271
1272 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1273 sg_count, areq->assoclen, tbl_off);
1274
1275 if (is_ipsec_esp)
1276 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1277
1278 /* ICV data */
1279 if (ret > 1) {
1280 tbl_off += ret;
1281 edesc->icv_ool = true;
1282 sync_needed = true;
1283
1284 if (is_ipsec_esp) {
1285 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1286 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1287 sizeof(struct talitos_ptr) + authsize;
1288
1289 /* Add an entry to the link table for ICV data */
1290 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1291 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1292 is_sec1);
1293
1294 /* icv data follows link tables */
1295 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1296 authsize, is_sec1);
1297 } else {
1298 dma_addr_t addr = edesc->dma_link_tbl;
1299
1300 if (is_sec1)
1301 addr += areq->assoclen + cryptlen;
1302 else
1303 addr += sizeof(struct talitos_ptr) * tbl_off;
1304
1305 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1306 }
1307 } else if (!is_ipsec_esp) {
1308 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1309 &desc->ptr[6], sg_count, areq->assoclen +
1310 cryptlen,
1311 tbl_off);
1312 if (ret > 1) {
1313 tbl_off += ret;
1314 edesc->icv_ool = true;
1315 sync_needed = true;
1316 } else {
1317 edesc->icv_ool = false;
1318 }
1319 } else {
1320 edesc->icv_ool = false;
1321 }
1322
1323 /* iv out */
1324 if (is_ipsec_esp)
1325 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1326 DMA_FROM_DEVICE);
1327
1328 if (sync_needed)
1329 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1330 edesc->dma_len,
1331 DMA_BIDIRECTIONAL);
1332
1333 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1334 if (ret != -EINPROGRESS) {
1335 ipsec_esp_unmap(dev, edesc, areq);
1336 kfree(edesc);
1337 }
1338 return ret;
1339}
1340
1341/*
1342 * allocate and map the extended descriptor
1343 */
1344static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1345 struct scatterlist *src,
1346 struct scatterlist *dst,
1347 u8 *iv,
1348 unsigned int assoclen,
1349 unsigned int cryptlen,
1350 unsigned int authsize,
1351 unsigned int ivsize,
1352 int icv_stashing,
1353 u32 cryptoflags,
1354 bool encrypt)
1355{
1356 struct talitos_edesc *edesc;
1357 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1358 dma_addr_t iv_dma = 0;
1359 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1360 GFP_ATOMIC;
1361 struct talitos_private *priv = dev_get_drvdata(dev);
1362 bool is_sec1 = has_ftr_sec1(priv);
1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364 void *err;
1365
1366 if (cryptlen + authsize > max_len) {
1367 dev_err(dev, "length exceeds h/w max limit\n");
1368 return ERR_PTR(-EINVAL);
1369 }
1370
1371 if (ivsize)
1372 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1373
1374 if (!dst || dst == src) {
1375 src_len = assoclen + cryptlen + authsize;
1376 src_nents = sg_nents_for_len(src, src_len);
1377 if (src_nents < 0) {
1378 dev_err(dev, "Invalid number of src SG.\n");
1379 err = ERR_PTR(-EINVAL);
1380 goto error_sg;
1381 }
1382 src_nents = (src_nents == 1) ? 0 : src_nents;
1383 dst_nents = dst ? src_nents : 0;
1384 dst_len = 0;
1385 } else { /* dst && dst != src*/
1386 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1387 src_nents = sg_nents_for_len(src, src_len);
1388 if (src_nents < 0) {
1389 dev_err(dev, "Invalid number of src SG.\n");
1390 err = ERR_PTR(-EINVAL);
1391 goto error_sg;
1392 }
1393 src_nents = (src_nents == 1) ? 0 : src_nents;
1394 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1395 dst_nents = sg_nents_for_len(dst, dst_len);
1396 if (dst_nents < 0) {
1397 dev_err(dev, "Invalid number of dst SG.\n");
1398 err = ERR_PTR(-EINVAL);
1399 goto error_sg;
1400 }
1401 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1402 }
1403
1404 /*
1405 * allocate space for base edesc plus the link tables,
1406 * allowing for two separate entries for AD and generated ICV (+ 2),
1407 * and space for two sets of ICVs (stashed and generated)
1408 */
1409 alloc_len = sizeof(struct talitos_edesc);
1410 if (src_nents || dst_nents) {
1411 if (is_sec1)
1412 dma_len = (src_nents ? src_len : 0) +
1413 (dst_nents ? dst_len : 0);
1414 else
1415 dma_len = (src_nents + dst_nents + 2) *
1416 sizeof(struct talitos_ptr) + authsize * 2;
1417 alloc_len += dma_len;
1418 } else {
1419 dma_len = 0;
1420 alloc_len += icv_stashing ? authsize : 0;
1421 }
1422
1423 /* if its a ahash, add space for a second desc next to the first one */
1424 if (is_sec1 && !dst)
1425 alloc_len += sizeof(struct talitos_desc);
1426
1427 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1428 if (!edesc) {
1429 err = ERR_PTR(-ENOMEM);
1430 goto error_sg;
1431 }
1432 memset(&edesc->desc, 0, sizeof(edesc->desc));
1433
1434 edesc->src_nents = src_nents;
1435 edesc->dst_nents = dst_nents;
1436 edesc->iv_dma = iv_dma;
1437 edesc->dma_len = dma_len;
1438 if (dma_len) {
1439 void *addr = &edesc->link_tbl[0];
1440
1441 if (is_sec1 && !dst)
1442 addr += sizeof(struct talitos_desc);
1443 edesc->dma_link_tbl = dma_map_single(dev, addr,
1444 edesc->dma_len,
1445 DMA_BIDIRECTIONAL);
1446 }
1447 return edesc;
1448error_sg:
1449 if (iv_dma)
1450 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1451 return err;
1452}
1453
1454static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1455 int icv_stashing, bool encrypt)
1456{
1457 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1458 unsigned int authsize = crypto_aead_authsize(authenc);
1459 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1460 unsigned int ivsize = crypto_aead_ivsize(authenc);
1461
1462 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1463 iv, areq->assoclen, areq->cryptlen,
1464 authsize, ivsize, icv_stashing,
1465 areq->base.flags, encrypt);
1466}
1467
1468static int aead_encrypt(struct aead_request *req)
1469{
1470 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1471 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1472 struct talitos_edesc *edesc;
1473
1474 /* allocate extended descriptor */
1475 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1476 if (IS_ERR(edesc))
1477 return PTR_ERR(edesc);
1478
1479 /* set encrypt */
1480 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1481
1482 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1483}
1484
1485static int aead_decrypt(struct aead_request *req)
1486{
1487 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1488 unsigned int authsize = crypto_aead_authsize(authenc);
1489 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1490 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1491 struct talitos_edesc *edesc;
1492 struct scatterlist *sg;
1493 void *icvdata;
1494
1495 req->cryptlen -= authsize;
1496
1497 /* allocate extended descriptor */
1498 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1499 if (IS_ERR(edesc))
1500 return PTR_ERR(edesc);
1501
1502 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1503 ((!edesc->src_nents && !edesc->dst_nents) ||
1504 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1505
1506 /* decrypt and check the ICV */
1507 edesc->desc.hdr = ctx->desc_hdr_template |
1508 DESC_HDR_DIR_INBOUND |
1509 DESC_HDR_MODE1_MDEU_CICV;
1510
1511 /* reset integrity check result bits */
1512
1513 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1514 }
1515
1516 /* Have to check the ICV with software */
1517 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1518
1519 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1520 if (edesc->dma_len)
1521 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1522 edesc->dst_nents + 2];
1523 else
1524 icvdata = &edesc->link_tbl[0];
1525
1526 sg = sg_last(req->src, edesc->src_nents ? : 1);
1527
1528 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1529
1530 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1531}
1532
1533static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1534 const u8 *key, unsigned int keylen)
1535{
1536 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1537 struct device *dev = ctx->dev;
1538 u32 tmp[DES_EXPKEY_WORDS];
1539
1540 if (keylen > TALITOS_MAX_KEY_SIZE) {
1541 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1542 return -EINVAL;
1543 }
1544
1545 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1546 CRYPTO_TFM_REQ_WEAK_KEY) &&
1547 !des_ekey(tmp, key)) {
1548 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1549 return -EINVAL;
1550 }
1551
1552 if (ctx->keylen)
1553 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1554
1555 memcpy(&ctx->key, key, keylen);
1556 ctx->keylen = keylen;
1557
1558 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1559
1560 return 0;
1561}
1562
1563static void common_nonsnoop_unmap(struct device *dev,
1564 struct talitos_edesc *edesc,
1565 struct ablkcipher_request *areq)
1566{
1567 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1568
1569 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1570 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1571
1572 if (edesc->dma_len)
1573 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1574 DMA_BIDIRECTIONAL);
1575}
1576
1577static void ablkcipher_done(struct device *dev,
1578 struct talitos_desc *desc, void *context,
1579 int err)
1580{
1581 struct ablkcipher_request *areq = context;
1582 struct talitos_edesc *edesc;
1583
1584 edesc = container_of(desc, struct talitos_edesc, desc);
1585
1586 common_nonsnoop_unmap(dev, edesc, areq);
1587
1588 kfree(edesc);
1589
1590 areq->base.complete(&areq->base, err);
1591}
1592
1593static int common_nonsnoop(struct talitos_edesc *edesc,
1594 struct ablkcipher_request *areq,
1595 void (*callback) (struct device *dev,
1596 struct talitos_desc *desc,
1597 void *context, int error))
1598{
1599 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1600 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1601 struct device *dev = ctx->dev;
1602 struct talitos_desc *desc = &edesc->desc;
1603 unsigned int cryptlen = areq->nbytes;
1604 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1605 int sg_count, ret;
1606 bool sync_needed = false;
1607 struct talitos_private *priv = dev_get_drvdata(dev);
1608 bool is_sec1 = has_ftr_sec1(priv);
1609
1610 /* first DWORD empty */
1611
1612 /* cipher iv */
1613 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1614
1615 /* cipher key */
1616 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1617
1618 sg_count = edesc->src_nents ?: 1;
1619 if (is_sec1 && sg_count > 1)
1620 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1621 cryptlen);
1622 else
1623 sg_count = dma_map_sg(dev, areq->src, sg_count,
1624 (areq->src == areq->dst) ?
1625 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1626 /*
1627 * cipher in
1628 */
1629 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1630 &desc->ptr[3], sg_count, 0, 0);
1631 if (sg_count > 1)
1632 sync_needed = true;
1633
1634 /* cipher out */
1635 if (areq->src != areq->dst) {
1636 sg_count = edesc->dst_nents ? : 1;
1637 if (!is_sec1 || sg_count == 1)
1638 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1639 }
1640
1641 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1642 sg_count, 0, (edesc->src_nents + 1));
1643 if (ret > 1)
1644 sync_needed = true;
1645
1646 /* iv out */
1647 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1648 DMA_FROM_DEVICE);
1649
1650 /* last DWORD empty */
1651
1652 if (sync_needed)
1653 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1654 edesc->dma_len, DMA_BIDIRECTIONAL);
1655
1656 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1657 if (ret != -EINPROGRESS) {
1658 common_nonsnoop_unmap(dev, edesc, areq);
1659 kfree(edesc);
1660 }
1661 return ret;
1662}
1663
1664static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1665 areq, bool encrypt)
1666{
1667 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1668 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1669 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1670
1671 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1672 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1673 areq->base.flags, encrypt);
1674}
1675
1676static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1677{
1678 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1679 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1680 struct talitos_edesc *edesc;
1681
1682 /* allocate extended descriptor */
1683 edesc = ablkcipher_edesc_alloc(areq, true);
1684 if (IS_ERR(edesc))
1685 return PTR_ERR(edesc);
1686
1687 /* set encrypt */
1688 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1689
1690 return common_nonsnoop(edesc, areq, ablkcipher_done);
1691}
1692
1693static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1694{
1695 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1696 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1697 struct talitos_edesc *edesc;
1698
1699 /* allocate extended descriptor */
1700 edesc = ablkcipher_edesc_alloc(areq, false);
1701 if (IS_ERR(edesc))
1702 return PTR_ERR(edesc);
1703
1704 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1705
1706 return common_nonsnoop(edesc, areq, ablkcipher_done);
1707}
1708
1709static void common_nonsnoop_hash_unmap(struct device *dev,
1710 struct talitos_edesc *edesc,
1711 struct ahash_request *areq)
1712{
1713 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1714 struct talitos_private *priv = dev_get_drvdata(dev);
1715 bool is_sec1 = has_ftr_sec1(priv);
1716 struct talitos_desc *desc = &edesc->desc;
1717 struct talitos_desc *desc2 = desc + 1;
1718
1719 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1720 if (desc->next_desc &&
1721 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1722 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1723
1724 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1725
1726 /* When using hashctx-in, must unmap it. */
1727 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1728 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1729 DMA_TO_DEVICE);
1730 else if (desc->next_desc)
1731 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1732 DMA_TO_DEVICE);
1733
1734 if (is_sec1 && req_ctx->nbuf)
1735 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1736 DMA_TO_DEVICE);
1737
1738 if (edesc->dma_len)
1739 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1740 DMA_BIDIRECTIONAL);
1741
1742 if (edesc->desc.next_desc)
1743 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1744 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1745}
1746
1747static void ahash_done(struct device *dev,
1748 struct talitos_desc *desc, void *context,
1749 int err)
1750{
1751 struct ahash_request *areq = context;
1752 struct talitos_edesc *edesc =
1753 container_of(desc, struct talitos_edesc, desc);
1754 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1755
1756 if (!req_ctx->last && req_ctx->to_hash_later) {
1757 /* Position any partial block for next update/final/finup */
1758 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1759 req_ctx->nbuf = req_ctx->to_hash_later;
1760 }
1761 common_nonsnoop_hash_unmap(dev, edesc, areq);
1762
1763 kfree(edesc);
1764
1765 areq->base.complete(&areq->base, err);
1766}
1767
1768/*
1769 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1770 * ourself and submit a padded block
1771 */
1772static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1773 struct talitos_edesc *edesc,
1774 struct talitos_ptr *ptr)
1775{
1776 static u8 padded_hash[64] = {
1777 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1780 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1781 };
1782
1783 pr_err_once("Bug in SEC1, padding ourself\n");
1784 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1785 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1786 (char *)padded_hash, DMA_TO_DEVICE);
1787}
1788
1789static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1790 struct ahash_request *areq, unsigned int length,
1791 unsigned int offset,
1792 void (*callback) (struct device *dev,
1793 struct talitos_desc *desc,
1794 void *context, int error))
1795{
1796 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1797 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1798 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1799 struct device *dev = ctx->dev;
1800 struct talitos_desc *desc = &edesc->desc;
1801 int ret;
1802 bool sync_needed = false;
1803 struct talitos_private *priv = dev_get_drvdata(dev);
1804 bool is_sec1 = has_ftr_sec1(priv);
1805 int sg_count;
1806
1807 /* first DWORD empty */
1808
1809 /* hash context in */
1810 if (!req_ctx->first || req_ctx->swinit) {
1811 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1812 req_ctx->hw_context_size,
1813 req_ctx->hw_context,
1814 DMA_TO_DEVICE);
1815 req_ctx->swinit = 0;
1816 }
1817 /* Indicate next op is not the first. */
1818 req_ctx->first = 0;
1819
1820 /* HMAC key */
1821 if (ctx->keylen)
1822 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1823 is_sec1);
1824
1825 if (is_sec1 && req_ctx->nbuf)
1826 length -= req_ctx->nbuf;
1827
1828 sg_count = edesc->src_nents ?: 1;
1829 if (is_sec1 && sg_count > 1)
1830 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1831 edesc->buf + sizeof(struct talitos_desc),
1832 length, req_ctx->nbuf);
1833 else if (length)
1834 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1835 DMA_TO_DEVICE);
1836 /*
1837 * data in
1838 */
1839 if (is_sec1 && req_ctx->nbuf) {
1840 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1841 req_ctx->buf[req_ctx->buf_idx],
1842 DMA_TO_DEVICE);
1843 } else {
1844 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1845 &desc->ptr[3], sg_count, offset, 0);
1846 if (sg_count > 1)
1847 sync_needed = true;
1848 }
1849
1850 /* fifth DWORD empty */
1851
1852 /* hash/HMAC out -or- hash context out */
1853 if (req_ctx->last)
1854 map_single_talitos_ptr(dev, &desc->ptr[5],
1855 crypto_ahash_digestsize(tfm),
1856 areq->result, DMA_FROM_DEVICE);
1857 else
1858 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1859 req_ctx->hw_context_size,
1860 req_ctx->hw_context,
1861 DMA_FROM_DEVICE);
1862
1863 /* last DWORD empty */
1864
1865 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1866 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1867
1868 if (is_sec1 && req_ctx->nbuf && length) {
1869 struct talitos_desc *desc2 = desc + 1;
1870 dma_addr_t next_desc;
1871
1872 memset(desc2, 0, sizeof(*desc2));
1873 desc2->hdr = desc->hdr;
1874 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1875 desc2->hdr1 = desc2->hdr;
1876 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1877 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1878 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1879
1880 if (desc->ptr[1].ptr)
1881 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1882 is_sec1);
1883 else
1884 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1885 req_ctx->hw_context_size,
1886 req_ctx->hw_context,
1887 DMA_TO_DEVICE);
1888 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1889 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1890 &desc2->ptr[3], sg_count, offset, 0);
1891 if (sg_count > 1)
1892 sync_needed = true;
1893 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1894 if (req_ctx->last)
1895 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1896 req_ctx->hw_context_size,
1897 req_ctx->hw_context,
1898 DMA_FROM_DEVICE);
1899
1900 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1901 DMA_BIDIRECTIONAL);
1902 desc->next_desc = cpu_to_be32(next_desc);
1903 }
1904
1905 if (sync_needed)
1906 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1907 edesc->dma_len, DMA_BIDIRECTIONAL);
1908
1909 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1910 if (ret != -EINPROGRESS) {
1911 common_nonsnoop_hash_unmap(dev, edesc, areq);
1912 kfree(edesc);
1913 }
1914 return ret;
1915}
1916
1917static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1918 unsigned int nbytes)
1919{
1920 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1921 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1922 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1923 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1924 bool is_sec1 = has_ftr_sec1(priv);
1925
1926 if (is_sec1)
1927 nbytes -= req_ctx->nbuf;
1928
1929 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1930 nbytes, 0, 0, 0, areq->base.flags, false);
1931}
1932
1933static int ahash_init(struct ahash_request *areq)
1934{
1935 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1936 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1937 struct device *dev = ctx->dev;
1938 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1939 unsigned int size;
1940 dma_addr_t dma;
1941
1942 /* Initialize the context */
1943 req_ctx->buf_idx = 0;
1944 req_ctx->nbuf = 0;
1945 req_ctx->first = 1; /* first indicates h/w must init its context */
1946 req_ctx->swinit = 0; /* assume h/w init of context */
1947 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1948 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1949 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1950 req_ctx->hw_context_size = size;
1951
1952 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1953 DMA_TO_DEVICE);
1954 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1955
1956 return 0;
1957}
1958
1959/*
1960 * on h/w without explicit sha224 support, we initialize h/w context
1961 * manually with sha224 constants, and tell it to run sha256.
1962 */
1963static int ahash_init_sha224_swinit(struct ahash_request *areq)
1964{
1965 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1966
1967 req_ctx->hw_context[0] = SHA224_H0;
1968 req_ctx->hw_context[1] = SHA224_H1;
1969 req_ctx->hw_context[2] = SHA224_H2;
1970 req_ctx->hw_context[3] = SHA224_H3;
1971 req_ctx->hw_context[4] = SHA224_H4;
1972 req_ctx->hw_context[5] = SHA224_H5;
1973 req_ctx->hw_context[6] = SHA224_H6;
1974 req_ctx->hw_context[7] = SHA224_H7;
1975
1976 /* init 64-bit count */
1977 req_ctx->hw_context[8] = 0;
1978 req_ctx->hw_context[9] = 0;
1979
1980 ahash_init(areq);
1981 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1982
1983 return 0;
1984}
1985
1986static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1987{
1988 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1989 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1990 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1991 struct talitos_edesc *edesc;
1992 unsigned int blocksize =
1993 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1994 unsigned int nbytes_to_hash;
1995 unsigned int to_hash_later;
1996 unsigned int nsg;
1997 int nents;
1998 struct device *dev = ctx->dev;
1999 struct talitos_private *priv = dev_get_drvdata(dev);
2000 bool is_sec1 = has_ftr_sec1(priv);
2001 int offset = 0;
2002 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2003
2004 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2005 /* Buffer up to one whole block */
2006 nents = sg_nents_for_len(areq->src, nbytes);
2007 if (nents < 0) {
2008 dev_err(ctx->dev, "Invalid number of src SG.\n");
2009 return nents;
2010 }
2011 sg_copy_to_buffer(areq->src, nents,
2012 ctx_buf + req_ctx->nbuf, nbytes);
2013 req_ctx->nbuf += nbytes;
2014 return 0;
2015 }
2016
2017 /* At least (blocksize + 1) bytes are available to hash */
2018 nbytes_to_hash = nbytes + req_ctx->nbuf;
2019 to_hash_later = nbytes_to_hash & (blocksize - 1);
2020
2021 if (req_ctx->last)
2022 to_hash_later = 0;
2023 else if (to_hash_later)
2024 /* There is a partial block. Hash the full block(s) now */
2025 nbytes_to_hash -= to_hash_later;
2026 else {
2027 /* Keep one block buffered */
2028 nbytes_to_hash -= blocksize;
2029 to_hash_later = blocksize;
2030 }
2031
2032 /* Chain in any previously buffered data */
2033 if (!is_sec1 && req_ctx->nbuf) {
2034 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2035 sg_init_table(req_ctx->bufsl, nsg);
2036 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2037 if (nsg > 1)
2038 sg_chain(req_ctx->bufsl, 2, areq->src);
2039 req_ctx->psrc = req_ctx->bufsl;
2040 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2041 if (nbytes_to_hash > blocksize)
2042 offset = blocksize - req_ctx->nbuf;
2043 else
2044 offset = nbytes_to_hash - req_ctx->nbuf;
2045 nents = sg_nents_for_len(areq->src, offset);
2046 if (nents < 0) {
2047 dev_err(ctx->dev, "Invalid number of src SG.\n");
2048 return nents;
2049 }
2050 sg_copy_to_buffer(areq->src, nents,
2051 ctx_buf + req_ctx->nbuf, offset);
2052 req_ctx->nbuf += offset;
2053 req_ctx->psrc = areq->src;
2054 } else
2055 req_ctx->psrc = areq->src;
2056
2057 if (to_hash_later) {
2058 nents = sg_nents_for_len(areq->src, nbytes);
2059 if (nents < 0) {
2060 dev_err(ctx->dev, "Invalid number of src SG.\n");
2061 return nents;
2062 }
2063 sg_pcopy_to_buffer(areq->src, nents,
2064 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2065 to_hash_later,
2066 nbytes - to_hash_later);
2067 }
2068 req_ctx->to_hash_later = to_hash_later;
2069
2070 /* Allocate extended descriptor */
2071 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2072 if (IS_ERR(edesc))
2073 return PTR_ERR(edesc);
2074
2075 edesc->desc.hdr = ctx->desc_hdr_template;
2076
2077 /* On last one, request SEC to pad; otherwise continue */
2078 if (req_ctx->last)
2079 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2080 else
2081 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2082
2083 /* request SEC to INIT hash. */
2084 if (req_ctx->first && !req_ctx->swinit)
2085 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2086
2087 /* When the tfm context has a keylen, it's an HMAC.
2088 * A first or last (ie. not middle) descriptor must request HMAC.
2089 */
2090 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2091 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2092
2093 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2094 ahash_done);
2095}
2096
2097static int ahash_update(struct ahash_request *areq)
2098{
2099 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100
2101 req_ctx->last = 0;
2102
2103 return ahash_process_req(areq, areq->nbytes);
2104}
2105
2106static int ahash_final(struct ahash_request *areq)
2107{
2108 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2109
2110 req_ctx->last = 1;
2111
2112 return ahash_process_req(areq, 0);
2113}
2114
2115static int ahash_finup(struct ahash_request *areq)
2116{
2117 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2118
2119 req_ctx->last = 1;
2120
2121 return ahash_process_req(areq, areq->nbytes);
2122}
2123
2124static int ahash_digest(struct ahash_request *areq)
2125{
2126 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2127 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2128
2129 ahash->init(areq);
2130 req_ctx->last = 1;
2131
2132 return ahash_process_req(areq, areq->nbytes);
2133}
2134
2135static int ahash_export(struct ahash_request *areq, void *out)
2136{
2137 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2138 struct talitos_export_state *export = out;
2139 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2140 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2141 struct device *dev = ctx->dev;
2142 dma_addr_t dma;
2143
2144 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2145 DMA_FROM_DEVICE);
2146 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2147
2148 memcpy(export->hw_context, req_ctx->hw_context,
2149 req_ctx->hw_context_size);
2150 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2151 export->swinit = req_ctx->swinit;
2152 export->first = req_ctx->first;
2153 export->last = req_ctx->last;
2154 export->to_hash_later = req_ctx->to_hash_later;
2155 export->nbuf = req_ctx->nbuf;
2156
2157 return 0;
2158}
2159
2160static int ahash_import(struct ahash_request *areq, const void *in)
2161{
2162 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2163 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2164 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2165 struct device *dev = ctx->dev;
2166 const struct talitos_export_state *export = in;
2167 unsigned int size;
2168 dma_addr_t dma;
2169
2170 memset(req_ctx, 0, sizeof(*req_ctx));
2171 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2172 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2173 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2174 req_ctx->hw_context_size = size;
2175 memcpy(req_ctx->hw_context, export->hw_context, size);
2176 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2177 req_ctx->swinit = export->swinit;
2178 req_ctx->first = export->first;
2179 req_ctx->last = export->last;
2180 req_ctx->to_hash_later = export->to_hash_later;
2181 req_ctx->nbuf = export->nbuf;
2182
2183 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2184 DMA_TO_DEVICE);
2185 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2186
2187 return 0;
2188}
2189
2190static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2191 u8 *hash)
2192{
2193 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2194
2195 struct scatterlist sg[1];
2196 struct ahash_request *req;
2197 struct crypto_wait wait;
2198 int ret;
2199
2200 crypto_init_wait(&wait);
2201
2202 req = ahash_request_alloc(tfm, GFP_KERNEL);
2203 if (!req)
2204 return -ENOMEM;
2205
2206 /* Keep tfm keylen == 0 during hash of the long key */
2207 ctx->keylen = 0;
2208 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2209 crypto_req_done, &wait);
2210
2211 sg_init_one(&sg[0], key, keylen);
2212
2213 ahash_request_set_crypt(req, sg, hash, keylen);
2214 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2215
2216 ahash_request_free(req);
2217
2218 return ret;
2219}
2220
2221static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2222 unsigned int keylen)
2223{
2224 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2225 struct device *dev = ctx->dev;
2226 unsigned int blocksize =
2227 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2228 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2229 unsigned int keysize = keylen;
2230 u8 hash[SHA512_DIGEST_SIZE];
2231 int ret;
2232
2233 if (keylen <= blocksize)
2234 memcpy(ctx->key, key, keysize);
2235 else {
2236 /* Must get the hash of the long key */
2237 ret = keyhash(tfm, key, keylen, hash);
2238
2239 if (ret) {
2240 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2241 return -EINVAL;
2242 }
2243
2244 keysize = digestsize;
2245 memcpy(ctx->key, hash, digestsize);
2246 }
2247
2248 if (ctx->keylen)
2249 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2250
2251 ctx->keylen = keysize;
2252 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2253
2254 return 0;
2255}
2256
2257
2258struct talitos_alg_template {
2259 u32 type;
2260 u32 priority;
2261 union {
2262 struct crypto_alg crypto;
2263 struct ahash_alg hash;
2264 struct aead_alg aead;
2265 } alg;
2266 __be32 desc_hdr_template;
2267};
2268
2269static struct talitos_alg_template driver_algs[] = {
2270 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2271 { .type = CRYPTO_ALG_TYPE_AEAD,
2272 .alg.aead = {
2273 .base = {
2274 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2275 .cra_driver_name = "authenc-hmac-sha1-"
2276 "cbc-aes-talitos",
2277 .cra_blocksize = AES_BLOCK_SIZE,
2278 .cra_flags = CRYPTO_ALG_ASYNC,
2279 },
2280 .ivsize = AES_BLOCK_SIZE,
2281 .maxauthsize = SHA1_DIGEST_SIZE,
2282 },
2283 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2284 DESC_HDR_SEL0_AESU |
2285 DESC_HDR_MODE0_AESU_CBC |
2286 DESC_HDR_SEL1_MDEUA |
2287 DESC_HDR_MODE1_MDEU_INIT |
2288 DESC_HDR_MODE1_MDEU_PAD |
2289 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2290 },
2291 { .type = CRYPTO_ALG_TYPE_AEAD,
2292 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2293 .alg.aead = {
2294 .base = {
2295 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2296 .cra_driver_name = "authenc-hmac-sha1-"
2297 "cbc-aes-talitos",
2298 .cra_blocksize = AES_BLOCK_SIZE,
2299 .cra_flags = CRYPTO_ALG_ASYNC,
2300 },
2301 .ivsize = AES_BLOCK_SIZE,
2302 .maxauthsize = SHA1_DIGEST_SIZE,
2303 },
2304 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2305 DESC_HDR_SEL0_AESU |
2306 DESC_HDR_MODE0_AESU_CBC |
2307 DESC_HDR_SEL1_MDEUA |
2308 DESC_HDR_MODE1_MDEU_INIT |
2309 DESC_HDR_MODE1_MDEU_PAD |
2310 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2311 },
2312 { .type = CRYPTO_ALG_TYPE_AEAD,
2313 .alg.aead = {
2314 .base = {
2315 .cra_name = "authenc(hmac(sha1),"
2316 "cbc(des3_ede))",
2317 .cra_driver_name = "authenc-hmac-sha1-"
2318 "cbc-3des-talitos",
2319 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2320 .cra_flags = CRYPTO_ALG_ASYNC,
2321 },
2322 .ivsize = DES3_EDE_BLOCK_SIZE,
2323 .maxauthsize = SHA1_DIGEST_SIZE,
2324 },
2325 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2326 DESC_HDR_SEL0_DEU |
2327 DESC_HDR_MODE0_DEU_CBC |
2328 DESC_HDR_MODE0_DEU_3DES |
2329 DESC_HDR_SEL1_MDEUA |
2330 DESC_HDR_MODE1_MDEU_INIT |
2331 DESC_HDR_MODE1_MDEU_PAD |
2332 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2333 },
2334 { .type = CRYPTO_ALG_TYPE_AEAD,
2335 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2336 .alg.aead = {
2337 .base = {
2338 .cra_name = "authenc(hmac(sha1),"
2339 "cbc(des3_ede))",
2340 .cra_driver_name = "authenc-hmac-sha1-"
2341 "cbc-3des-talitos",
2342 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2343 .cra_flags = CRYPTO_ALG_ASYNC,
2344 },
2345 .ivsize = DES3_EDE_BLOCK_SIZE,
2346 .maxauthsize = SHA1_DIGEST_SIZE,
2347 },
2348 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2349 DESC_HDR_SEL0_DEU |
2350 DESC_HDR_MODE0_DEU_CBC |
2351 DESC_HDR_MODE0_DEU_3DES |
2352 DESC_HDR_SEL1_MDEUA |
2353 DESC_HDR_MODE1_MDEU_INIT |
2354 DESC_HDR_MODE1_MDEU_PAD |
2355 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2356 },
2357 { .type = CRYPTO_ALG_TYPE_AEAD,
2358 .alg.aead = {
2359 .base = {
2360 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2361 .cra_driver_name = "authenc-hmac-sha224-"
2362 "cbc-aes-talitos",
2363 .cra_blocksize = AES_BLOCK_SIZE,
2364 .cra_flags = CRYPTO_ALG_ASYNC,
2365 },
2366 .ivsize = AES_BLOCK_SIZE,
2367 .maxauthsize = SHA224_DIGEST_SIZE,
2368 },
2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 DESC_HDR_SEL0_AESU |
2371 DESC_HDR_MODE0_AESU_CBC |
2372 DESC_HDR_SEL1_MDEUA |
2373 DESC_HDR_MODE1_MDEU_INIT |
2374 DESC_HDR_MODE1_MDEU_PAD |
2375 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2376 },
2377 { .type = CRYPTO_ALG_TYPE_AEAD,
2378 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2379 .alg.aead = {
2380 .base = {
2381 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2382 .cra_driver_name = "authenc-hmac-sha224-"
2383 "cbc-aes-talitos",
2384 .cra_blocksize = AES_BLOCK_SIZE,
2385 .cra_flags = CRYPTO_ALG_ASYNC,
2386 },
2387 .ivsize = AES_BLOCK_SIZE,
2388 .maxauthsize = SHA224_DIGEST_SIZE,
2389 },
2390 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2391 DESC_HDR_SEL0_AESU |
2392 DESC_HDR_MODE0_AESU_CBC |
2393 DESC_HDR_SEL1_MDEUA |
2394 DESC_HDR_MODE1_MDEU_INIT |
2395 DESC_HDR_MODE1_MDEU_PAD |
2396 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2397 },
2398 { .type = CRYPTO_ALG_TYPE_AEAD,
2399 .alg.aead = {
2400 .base = {
2401 .cra_name = "authenc(hmac(sha224),"
2402 "cbc(des3_ede))",
2403 .cra_driver_name = "authenc-hmac-sha224-"
2404 "cbc-3des-talitos",
2405 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2406 .cra_flags = CRYPTO_ALG_ASYNC,
2407 },
2408 .ivsize = DES3_EDE_BLOCK_SIZE,
2409 .maxauthsize = SHA224_DIGEST_SIZE,
2410 },
2411 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2412 DESC_HDR_SEL0_DEU |
2413 DESC_HDR_MODE0_DEU_CBC |
2414 DESC_HDR_MODE0_DEU_3DES |
2415 DESC_HDR_SEL1_MDEUA |
2416 DESC_HDR_MODE1_MDEU_INIT |
2417 DESC_HDR_MODE1_MDEU_PAD |
2418 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2419 },
2420 { .type = CRYPTO_ALG_TYPE_AEAD,
2421 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2422 .alg.aead = {
2423 .base = {
2424 .cra_name = "authenc(hmac(sha224),"
2425 "cbc(des3_ede))",
2426 .cra_driver_name = "authenc-hmac-sha224-"
2427 "cbc-3des-talitos",
2428 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2429 .cra_flags = CRYPTO_ALG_ASYNC,
2430 },
2431 .ivsize = DES3_EDE_BLOCK_SIZE,
2432 .maxauthsize = SHA224_DIGEST_SIZE,
2433 },
2434 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2435 DESC_HDR_SEL0_DEU |
2436 DESC_HDR_MODE0_DEU_CBC |
2437 DESC_HDR_MODE0_DEU_3DES |
2438 DESC_HDR_SEL1_MDEUA |
2439 DESC_HDR_MODE1_MDEU_INIT |
2440 DESC_HDR_MODE1_MDEU_PAD |
2441 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2442 },
2443 { .type = CRYPTO_ALG_TYPE_AEAD,
2444 .alg.aead = {
2445 .base = {
2446 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2447 .cra_driver_name = "authenc-hmac-sha256-"
2448 "cbc-aes-talitos",
2449 .cra_blocksize = AES_BLOCK_SIZE,
2450 .cra_flags = CRYPTO_ALG_ASYNC,
2451 },
2452 .ivsize = AES_BLOCK_SIZE,
2453 .maxauthsize = SHA256_DIGEST_SIZE,
2454 },
2455 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2456 DESC_HDR_SEL0_AESU |
2457 DESC_HDR_MODE0_AESU_CBC |
2458 DESC_HDR_SEL1_MDEUA |
2459 DESC_HDR_MODE1_MDEU_INIT |
2460 DESC_HDR_MODE1_MDEU_PAD |
2461 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2462 },
2463 { .type = CRYPTO_ALG_TYPE_AEAD,
2464 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2465 .alg.aead = {
2466 .base = {
2467 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2468 .cra_driver_name = "authenc-hmac-sha256-"
2469 "cbc-aes-talitos",
2470 .cra_blocksize = AES_BLOCK_SIZE,
2471 .cra_flags = CRYPTO_ALG_ASYNC,
2472 },
2473 .ivsize = AES_BLOCK_SIZE,
2474 .maxauthsize = SHA256_DIGEST_SIZE,
2475 },
2476 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2477 DESC_HDR_SEL0_AESU |
2478 DESC_HDR_MODE0_AESU_CBC |
2479 DESC_HDR_SEL1_MDEUA |
2480 DESC_HDR_MODE1_MDEU_INIT |
2481 DESC_HDR_MODE1_MDEU_PAD |
2482 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2483 },
2484 { .type = CRYPTO_ALG_TYPE_AEAD,
2485 .alg.aead = {
2486 .base = {
2487 .cra_name = "authenc(hmac(sha256),"
2488 "cbc(des3_ede))",
2489 .cra_driver_name = "authenc-hmac-sha256-"
2490 "cbc-3des-talitos",
2491 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2492 .cra_flags = CRYPTO_ALG_ASYNC,
2493 },
2494 .ivsize = DES3_EDE_BLOCK_SIZE,
2495 .maxauthsize = SHA256_DIGEST_SIZE,
2496 },
2497 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2498 DESC_HDR_SEL0_DEU |
2499 DESC_HDR_MODE0_DEU_CBC |
2500 DESC_HDR_MODE0_DEU_3DES |
2501 DESC_HDR_SEL1_MDEUA |
2502 DESC_HDR_MODE1_MDEU_INIT |
2503 DESC_HDR_MODE1_MDEU_PAD |
2504 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2505 },
2506 { .type = CRYPTO_ALG_TYPE_AEAD,
2507 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2508 .alg.aead = {
2509 .base = {
2510 .cra_name = "authenc(hmac(sha256),"
2511 "cbc(des3_ede))",
2512 .cra_driver_name = "authenc-hmac-sha256-"
2513 "cbc-3des-talitos",
2514 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2515 .cra_flags = CRYPTO_ALG_ASYNC,
2516 },
2517 .ivsize = DES3_EDE_BLOCK_SIZE,
2518 .maxauthsize = SHA256_DIGEST_SIZE,
2519 },
2520 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2521 DESC_HDR_SEL0_DEU |
2522 DESC_HDR_MODE0_DEU_CBC |
2523 DESC_HDR_MODE0_DEU_3DES |
2524 DESC_HDR_SEL1_MDEUA |
2525 DESC_HDR_MODE1_MDEU_INIT |
2526 DESC_HDR_MODE1_MDEU_PAD |
2527 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2528 },
2529 { .type = CRYPTO_ALG_TYPE_AEAD,
2530 .alg.aead = {
2531 .base = {
2532 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2533 .cra_driver_name = "authenc-hmac-sha384-"
2534 "cbc-aes-talitos",
2535 .cra_blocksize = AES_BLOCK_SIZE,
2536 .cra_flags = CRYPTO_ALG_ASYNC,
2537 },
2538 .ivsize = AES_BLOCK_SIZE,
2539 .maxauthsize = SHA384_DIGEST_SIZE,
2540 },
2541 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542 DESC_HDR_SEL0_AESU |
2543 DESC_HDR_MODE0_AESU_CBC |
2544 DESC_HDR_SEL1_MDEUB |
2545 DESC_HDR_MODE1_MDEU_INIT |
2546 DESC_HDR_MODE1_MDEU_PAD |
2547 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2548 },
2549 { .type = CRYPTO_ALG_TYPE_AEAD,
2550 .alg.aead = {
2551 .base = {
2552 .cra_name = "authenc(hmac(sha384),"
2553 "cbc(des3_ede))",
2554 .cra_driver_name = "authenc-hmac-sha384-"
2555 "cbc-3des-talitos",
2556 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2557 .cra_flags = CRYPTO_ALG_ASYNC,
2558 },
2559 .ivsize = DES3_EDE_BLOCK_SIZE,
2560 .maxauthsize = SHA384_DIGEST_SIZE,
2561 },
2562 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2563 DESC_HDR_SEL0_DEU |
2564 DESC_HDR_MODE0_DEU_CBC |
2565 DESC_HDR_MODE0_DEU_3DES |
2566 DESC_HDR_SEL1_MDEUB |
2567 DESC_HDR_MODE1_MDEU_INIT |
2568 DESC_HDR_MODE1_MDEU_PAD |
2569 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2570 },
2571 { .type = CRYPTO_ALG_TYPE_AEAD,
2572 .alg.aead = {
2573 .base = {
2574 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2575 .cra_driver_name = "authenc-hmac-sha512-"
2576 "cbc-aes-talitos",
2577 .cra_blocksize = AES_BLOCK_SIZE,
2578 .cra_flags = CRYPTO_ALG_ASYNC,
2579 },
2580 .ivsize = AES_BLOCK_SIZE,
2581 .maxauthsize = SHA512_DIGEST_SIZE,
2582 },
2583 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2584 DESC_HDR_SEL0_AESU |
2585 DESC_HDR_MODE0_AESU_CBC |
2586 DESC_HDR_SEL1_MDEUB |
2587 DESC_HDR_MODE1_MDEU_INIT |
2588 DESC_HDR_MODE1_MDEU_PAD |
2589 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2590 },
2591 { .type = CRYPTO_ALG_TYPE_AEAD,
2592 .alg.aead = {
2593 .base = {
2594 .cra_name = "authenc(hmac(sha512),"
2595 "cbc(des3_ede))",
2596 .cra_driver_name = "authenc-hmac-sha512-"
2597 "cbc-3des-talitos",
2598 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2599 .cra_flags = CRYPTO_ALG_ASYNC,
2600 },
2601 .ivsize = DES3_EDE_BLOCK_SIZE,
2602 .maxauthsize = SHA512_DIGEST_SIZE,
2603 },
2604 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2605 DESC_HDR_SEL0_DEU |
2606 DESC_HDR_MODE0_DEU_CBC |
2607 DESC_HDR_MODE0_DEU_3DES |
2608 DESC_HDR_SEL1_MDEUB |
2609 DESC_HDR_MODE1_MDEU_INIT |
2610 DESC_HDR_MODE1_MDEU_PAD |
2611 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2612 },
2613 { .type = CRYPTO_ALG_TYPE_AEAD,
2614 .alg.aead = {
2615 .base = {
2616 .cra_name = "authenc(hmac(md5),cbc(aes))",
2617 .cra_driver_name = "authenc-hmac-md5-"
2618 "cbc-aes-talitos",
2619 .cra_blocksize = AES_BLOCK_SIZE,
2620 .cra_flags = CRYPTO_ALG_ASYNC,
2621 },
2622 .ivsize = AES_BLOCK_SIZE,
2623 .maxauthsize = MD5_DIGEST_SIZE,
2624 },
2625 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2626 DESC_HDR_SEL0_AESU |
2627 DESC_HDR_MODE0_AESU_CBC |
2628 DESC_HDR_SEL1_MDEUA |
2629 DESC_HDR_MODE1_MDEU_INIT |
2630 DESC_HDR_MODE1_MDEU_PAD |
2631 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2632 },
2633 { .type = CRYPTO_ALG_TYPE_AEAD,
2634 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2635 .alg.aead = {
2636 .base = {
2637 .cra_name = "authenc(hmac(md5),cbc(aes))",
2638 .cra_driver_name = "authenc-hmac-md5-"
2639 "cbc-aes-talitos",
2640 .cra_blocksize = AES_BLOCK_SIZE,
2641 .cra_flags = CRYPTO_ALG_ASYNC,
2642 },
2643 .ivsize = AES_BLOCK_SIZE,
2644 .maxauthsize = MD5_DIGEST_SIZE,
2645 },
2646 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2647 DESC_HDR_SEL0_AESU |
2648 DESC_HDR_MODE0_AESU_CBC |
2649 DESC_HDR_SEL1_MDEUA |
2650 DESC_HDR_MODE1_MDEU_INIT |
2651 DESC_HDR_MODE1_MDEU_PAD |
2652 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2653 },
2654 { .type = CRYPTO_ALG_TYPE_AEAD,
2655 .alg.aead = {
2656 .base = {
2657 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2658 .cra_driver_name = "authenc-hmac-md5-"
2659 "cbc-3des-talitos",
2660 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2661 .cra_flags = CRYPTO_ALG_ASYNC,
2662 },
2663 .ivsize = DES3_EDE_BLOCK_SIZE,
2664 .maxauthsize = MD5_DIGEST_SIZE,
2665 },
2666 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2667 DESC_HDR_SEL0_DEU |
2668 DESC_HDR_MODE0_DEU_CBC |
2669 DESC_HDR_MODE0_DEU_3DES |
2670 DESC_HDR_SEL1_MDEUA |
2671 DESC_HDR_MODE1_MDEU_INIT |
2672 DESC_HDR_MODE1_MDEU_PAD |
2673 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2674 },
2675 { .type = CRYPTO_ALG_TYPE_AEAD,
2676 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2677 .alg.aead = {
2678 .base = {
2679 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2680 .cra_driver_name = "authenc-hmac-md5-"
2681 "cbc-3des-talitos",
2682 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2683 .cra_flags = CRYPTO_ALG_ASYNC,
2684 },
2685 .ivsize = DES3_EDE_BLOCK_SIZE,
2686 .maxauthsize = MD5_DIGEST_SIZE,
2687 },
2688 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2689 DESC_HDR_SEL0_DEU |
2690 DESC_HDR_MODE0_DEU_CBC |
2691 DESC_HDR_MODE0_DEU_3DES |
2692 DESC_HDR_SEL1_MDEUA |
2693 DESC_HDR_MODE1_MDEU_INIT |
2694 DESC_HDR_MODE1_MDEU_PAD |
2695 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2696 },
2697 /* ABLKCIPHER algorithms. */
2698 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2699 .alg.crypto = {
2700 .cra_name = "ecb(aes)",
2701 .cra_driver_name = "ecb-aes-talitos",
2702 .cra_blocksize = AES_BLOCK_SIZE,
2703 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2704 CRYPTO_ALG_ASYNC,
2705 .cra_ablkcipher = {
2706 .min_keysize = AES_MIN_KEY_SIZE,
2707 .max_keysize = AES_MAX_KEY_SIZE,
2708 .ivsize = AES_BLOCK_SIZE,
2709 }
2710 },
2711 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2712 DESC_HDR_SEL0_AESU,
2713 },
2714 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2715 .alg.crypto = {
2716 .cra_name = "cbc(aes)",
2717 .cra_driver_name = "cbc-aes-talitos",
2718 .cra_blocksize = AES_BLOCK_SIZE,
2719 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2720 CRYPTO_ALG_ASYNC,
2721 .cra_ablkcipher = {
2722 .min_keysize = AES_MIN_KEY_SIZE,
2723 .max_keysize = AES_MAX_KEY_SIZE,
2724 .ivsize = AES_BLOCK_SIZE,
2725 }
2726 },
2727 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2728 DESC_HDR_SEL0_AESU |
2729 DESC_HDR_MODE0_AESU_CBC,
2730 },
2731 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2732 .alg.crypto = {
2733 .cra_name = "ctr(aes)",
2734 .cra_driver_name = "ctr-aes-talitos",
2735 .cra_blocksize = AES_BLOCK_SIZE,
2736 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2737 CRYPTO_ALG_ASYNC,
2738 .cra_ablkcipher = {
2739 .min_keysize = AES_MIN_KEY_SIZE,
2740 .max_keysize = AES_MAX_KEY_SIZE,
2741 .ivsize = AES_BLOCK_SIZE,
2742 }
2743 },
2744 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2745 DESC_HDR_SEL0_AESU |
2746 DESC_HDR_MODE0_AESU_CTR,
2747 },
2748 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2749 .alg.crypto = {
2750 .cra_name = "ecb(des)",
2751 .cra_driver_name = "ecb-des-talitos",
2752 .cra_blocksize = DES_BLOCK_SIZE,
2753 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2754 CRYPTO_ALG_ASYNC,
2755 .cra_ablkcipher = {
2756 .min_keysize = DES_KEY_SIZE,
2757 .max_keysize = DES_KEY_SIZE,
2758 .ivsize = DES_BLOCK_SIZE,
2759 }
2760 },
2761 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2762 DESC_HDR_SEL0_DEU,
2763 },
2764 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2765 .alg.crypto = {
2766 .cra_name = "cbc(des)",
2767 .cra_driver_name = "cbc-des-talitos",
2768 .cra_blocksize = DES_BLOCK_SIZE,
2769 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2770 CRYPTO_ALG_ASYNC,
2771 .cra_ablkcipher = {
2772 .min_keysize = DES_KEY_SIZE,
2773 .max_keysize = DES_KEY_SIZE,
2774 .ivsize = DES_BLOCK_SIZE,
2775 }
2776 },
2777 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778 DESC_HDR_SEL0_DEU |
2779 DESC_HDR_MODE0_DEU_CBC,
2780 },
2781 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2782 .alg.crypto = {
2783 .cra_name = "ecb(des3_ede)",
2784 .cra_driver_name = "ecb-3des-talitos",
2785 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2786 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2787 CRYPTO_ALG_ASYNC,
2788 .cra_ablkcipher = {
2789 .min_keysize = DES3_EDE_KEY_SIZE,
2790 .max_keysize = DES3_EDE_KEY_SIZE,
2791 .ivsize = DES3_EDE_BLOCK_SIZE,
2792 }
2793 },
2794 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2795 DESC_HDR_SEL0_DEU |
2796 DESC_HDR_MODE0_DEU_3DES,
2797 },
2798 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2799 .alg.crypto = {
2800 .cra_name = "cbc(des3_ede)",
2801 .cra_driver_name = "cbc-3des-talitos",
2802 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2803 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2804 CRYPTO_ALG_ASYNC,
2805 .cra_ablkcipher = {
2806 .min_keysize = DES3_EDE_KEY_SIZE,
2807 .max_keysize = DES3_EDE_KEY_SIZE,
2808 .ivsize = DES3_EDE_BLOCK_SIZE,
2809 }
2810 },
2811 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812 DESC_HDR_SEL0_DEU |
2813 DESC_HDR_MODE0_DEU_CBC |
2814 DESC_HDR_MODE0_DEU_3DES,
2815 },
2816 /* AHASH algorithms. */
2817 { .type = CRYPTO_ALG_TYPE_AHASH,
2818 .alg.hash = {
2819 .halg.digestsize = MD5_DIGEST_SIZE,
2820 .halg.statesize = sizeof(struct talitos_export_state),
2821 .halg.base = {
2822 .cra_name = "md5",
2823 .cra_driver_name = "md5-talitos",
2824 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2825 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2826 CRYPTO_ALG_ASYNC,
2827 }
2828 },
2829 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2830 DESC_HDR_SEL0_MDEUA |
2831 DESC_HDR_MODE0_MDEU_MD5,
2832 },
2833 { .type = CRYPTO_ALG_TYPE_AHASH,
2834 .alg.hash = {
2835 .halg.digestsize = SHA1_DIGEST_SIZE,
2836 .halg.statesize = sizeof(struct talitos_export_state),
2837 .halg.base = {
2838 .cra_name = "sha1",
2839 .cra_driver_name = "sha1-talitos",
2840 .cra_blocksize = SHA1_BLOCK_SIZE,
2841 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2842 CRYPTO_ALG_ASYNC,
2843 }
2844 },
2845 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2846 DESC_HDR_SEL0_MDEUA |
2847 DESC_HDR_MODE0_MDEU_SHA1,
2848 },
2849 { .type = CRYPTO_ALG_TYPE_AHASH,
2850 .alg.hash = {
2851 .halg.digestsize = SHA224_DIGEST_SIZE,
2852 .halg.statesize = sizeof(struct talitos_export_state),
2853 .halg.base = {
2854 .cra_name = "sha224",
2855 .cra_driver_name = "sha224-talitos",
2856 .cra_blocksize = SHA224_BLOCK_SIZE,
2857 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2858 CRYPTO_ALG_ASYNC,
2859 }
2860 },
2861 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2862 DESC_HDR_SEL0_MDEUA |
2863 DESC_HDR_MODE0_MDEU_SHA224,
2864 },
2865 { .type = CRYPTO_ALG_TYPE_AHASH,
2866 .alg.hash = {
2867 .halg.digestsize = SHA256_DIGEST_SIZE,
2868 .halg.statesize = sizeof(struct talitos_export_state),
2869 .halg.base = {
2870 .cra_name = "sha256",
2871 .cra_driver_name = "sha256-talitos",
2872 .cra_blocksize = SHA256_BLOCK_SIZE,
2873 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2874 CRYPTO_ALG_ASYNC,
2875 }
2876 },
2877 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2878 DESC_HDR_SEL0_MDEUA |
2879 DESC_HDR_MODE0_MDEU_SHA256,
2880 },
2881 { .type = CRYPTO_ALG_TYPE_AHASH,
2882 .alg.hash = {
2883 .halg.digestsize = SHA384_DIGEST_SIZE,
2884 .halg.statesize = sizeof(struct talitos_export_state),
2885 .halg.base = {
2886 .cra_name = "sha384",
2887 .cra_driver_name = "sha384-talitos",
2888 .cra_blocksize = SHA384_BLOCK_SIZE,
2889 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2890 CRYPTO_ALG_ASYNC,
2891 }
2892 },
2893 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2894 DESC_HDR_SEL0_MDEUB |
2895 DESC_HDR_MODE0_MDEUB_SHA384,
2896 },
2897 { .type = CRYPTO_ALG_TYPE_AHASH,
2898 .alg.hash = {
2899 .halg.digestsize = SHA512_DIGEST_SIZE,
2900 .halg.statesize = sizeof(struct talitos_export_state),
2901 .halg.base = {
2902 .cra_name = "sha512",
2903 .cra_driver_name = "sha512-talitos",
2904 .cra_blocksize = SHA512_BLOCK_SIZE,
2905 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2906 CRYPTO_ALG_ASYNC,
2907 }
2908 },
2909 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2910 DESC_HDR_SEL0_MDEUB |
2911 DESC_HDR_MODE0_MDEUB_SHA512,
2912 },
2913 { .type = CRYPTO_ALG_TYPE_AHASH,
2914 .alg.hash = {
2915 .halg.digestsize = MD5_DIGEST_SIZE,
2916 .halg.statesize = sizeof(struct talitos_export_state),
2917 .halg.base = {
2918 .cra_name = "hmac(md5)",
2919 .cra_driver_name = "hmac-md5-talitos",
2920 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2921 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2922 CRYPTO_ALG_ASYNC,
2923 }
2924 },
2925 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926 DESC_HDR_SEL0_MDEUA |
2927 DESC_HDR_MODE0_MDEU_MD5,
2928 },
2929 { .type = CRYPTO_ALG_TYPE_AHASH,
2930 .alg.hash = {
2931 .halg.digestsize = SHA1_DIGEST_SIZE,
2932 .halg.statesize = sizeof(struct talitos_export_state),
2933 .halg.base = {
2934 .cra_name = "hmac(sha1)",
2935 .cra_driver_name = "hmac-sha1-talitos",
2936 .cra_blocksize = SHA1_BLOCK_SIZE,
2937 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2938 CRYPTO_ALG_ASYNC,
2939 }
2940 },
2941 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2942 DESC_HDR_SEL0_MDEUA |
2943 DESC_HDR_MODE0_MDEU_SHA1,
2944 },
2945 { .type = CRYPTO_ALG_TYPE_AHASH,
2946 .alg.hash = {
2947 .halg.digestsize = SHA224_DIGEST_SIZE,
2948 .halg.statesize = sizeof(struct talitos_export_state),
2949 .halg.base = {
2950 .cra_name = "hmac(sha224)",
2951 .cra_driver_name = "hmac-sha224-talitos",
2952 .cra_blocksize = SHA224_BLOCK_SIZE,
2953 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2954 CRYPTO_ALG_ASYNC,
2955 }
2956 },
2957 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2958 DESC_HDR_SEL0_MDEUA |
2959 DESC_HDR_MODE0_MDEU_SHA224,
2960 },
2961 { .type = CRYPTO_ALG_TYPE_AHASH,
2962 .alg.hash = {
2963 .halg.digestsize = SHA256_DIGEST_SIZE,
2964 .halg.statesize = sizeof(struct talitos_export_state),
2965 .halg.base = {
2966 .cra_name = "hmac(sha256)",
2967 .cra_driver_name = "hmac-sha256-talitos",
2968 .cra_blocksize = SHA256_BLOCK_SIZE,
2969 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2970 CRYPTO_ALG_ASYNC,
2971 }
2972 },
2973 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2974 DESC_HDR_SEL0_MDEUA |
2975 DESC_HDR_MODE0_MDEU_SHA256,
2976 },
2977 { .type = CRYPTO_ALG_TYPE_AHASH,
2978 .alg.hash = {
2979 .halg.digestsize = SHA384_DIGEST_SIZE,
2980 .halg.statesize = sizeof(struct talitos_export_state),
2981 .halg.base = {
2982 .cra_name = "hmac(sha384)",
2983 .cra_driver_name = "hmac-sha384-talitos",
2984 .cra_blocksize = SHA384_BLOCK_SIZE,
2985 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2986 CRYPTO_ALG_ASYNC,
2987 }
2988 },
2989 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2990 DESC_HDR_SEL0_MDEUB |
2991 DESC_HDR_MODE0_MDEUB_SHA384,
2992 },
2993 { .type = CRYPTO_ALG_TYPE_AHASH,
2994 .alg.hash = {
2995 .halg.digestsize = SHA512_DIGEST_SIZE,
2996 .halg.statesize = sizeof(struct talitos_export_state),
2997 .halg.base = {
2998 .cra_name = "hmac(sha512)",
2999 .cra_driver_name = "hmac-sha512-talitos",
3000 .cra_blocksize = SHA512_BLOCK_SIZE,
3001 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3002 CRYPTO_ALG_ASYNC,
3003 }
3004 },
3005 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3006 DESC_HDR_SEL0_MDEUB |
3007 DESC_HDR_MODE0_MDEUB_SHA512,
3008 }
3009};
3010
3011struct talitos_crypto_alg {
3012 struct list_head entry;
3013 struct device *dev;
3014 struct talitos_alg_template algt;
3015};
3016
3017static int talitos_init_common(struct talitos_ctx *ctx,
3018 struct talitos_crypto_alg *talitos_alg)
3019{
3020 struct talitos_private *priv;
3021
3022 /* update context with ptr to dev */
3023 ctx->dev = talitos_alg->dev;
3024
3025 /* assign SEC channel to tfm in round-robin fashion */
3026 priv = dev_get_drvdata(ctx->dev);
3027 ctx->ch = atomic_inc_return(&priv->last_chan) &
3028 (priv->num_channels - 1);
3029
3030 /* copy descriptor header template value */
3031 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3032
3033 /* select done notification */
3034 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3035
3036 return 0;
3037}
3038
3039static int talitos_cra_init(struct crypto_tfm *tfm)
3040{
3041 struct crypto_alg *alg = tfm->__crt_alg;
3042 struct talitos_crypto_alg *talitos_alg;
3043 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3044
3045 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3046 talitos_alg = container_of(__crypto_ahash_alg(alg),
3047 struct talitos_crypto_alg,
3048 algt.alg.hash);
3049 else
3050 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3051 algt.alg.crypto);
3052
3053 return talitos_init_common(ctx, talitos_alg);
3054}
3055
3056static int talitos_cra_init_aead(struct crypto_aead *tfm)
3057{
3058 struct aead_alg *alg = crypto_aead_alg(tfm);
3059 struct talitos_crypto_alg *talitos_alg;
3060 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3061
3062 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3063 algt.alg.aead);
3064
3065 return talitos_init_common(ctx, talitos_alg);
3066}
3067
3068static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3069{
3070 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3071
3072 talitos_cra_init(tfm);
3073
3074 ctx->keylen = 0;
3075 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3076 sizeof(struct talitos_ahash_req_ctx));
3077
3078 return 0;
3079}
3080
3081static void talitos_cra_exit(struct crypto_tfm *tfm)
3082{
3083 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3084 struct device *dev = ctx->dev;
3085
3086 if (ctx->keylen)
3087 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3088}
3089
3090/*
3091 * given the alg's descriptor header template, determine whether descriptor
3092 * type and primary/secondary execution units required match the hw
3093 * capabilities description provided in the device tree node.
3094 */
3095static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3096{
3097 struct talitos_private *priv = dev_get_drvdata(dev);
3098 int ret;
3099
3100 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3101 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3102
3103 if (SECONDARY_EU(desc_hdr_template))
3104 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3105 & priv->exec_units);
3106
3107 return ret;
3108}
3109
3110static int talitos_remove(struct platform_device *ofdev)
3111{
3112 struct device *dev = &ofdev->dev;
3113 struct talitos_private *priv = dev_get_drvdata(dev);
3114 struct talitos_crypto_alg *t_alg, *n;
3115 int i;
3116
3117 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3118 switch (t_alg->algt.type) {
3119 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3120 break;
3121 case CRYPTO_ALG_TYPE_AEAD:
3122 crypto_unregister_aead(&t_alg->algt.alg.aead);
3123 case CRYPTO_ALG_TYPE_AHASH:
3124 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3125 break;
3126 }
3127 list_del(&t_alg->entry);
3128 }
3129
3130 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3131 talitos_unregister_rng(dev);
3132
3133 for (i = 0; i < 2; i++)
3134 if (priv->irq[i]) {
3135 free_irq(priv->irq[i], dev);
3136 irq_dispose_mapping(priv->irq[i]);
3137 }
3138
3139 tasklet_kill(&priv->done_task[0]);
3140 if (priv->irq[1])
3141 tasklet_kill(&priv->done_task[1]);
3142
3143 return 0;
3144}
3145
3146static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3147 struct talitos_alg_template
3148 *template)
3149{
3150 struct talitos_private *priv = dev_get_drvdata(dev);
3151 struct talitos_crypto_alg *t_alg;
3152 struct crypto_alg *alg;
3153
3154 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3155 GFP_KERNEL);
3156 if (!t_alg)
3157 return ERR_PTR(-ENOMEM);
3158
3159 t_alg->algt = *template;
3160
3161 switch (t_alg->algt.type) {
3162 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3163 alg = &t_alg->algt.alg.crypto;
3164 alg->cra_init = talitos_cra_init;
3165 alg->cra_exit = talitos_cra_exit;
3166 alg->cra_type = &crypto_ablkcipher_type;
3167 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3168 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3169 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3170 alg->cra_ablkcipher.geniv = "eseqiv";
3171 break;
3172 case CRYPTO_ALG_TYPE_AEAD:
3173 alg = &t_alg->algt.alg.aead.base;
3174 alg->cra_exit = talitos_cra_exit;
3175 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3176 t_alg->algt.alg.aead.setkey = aead_setkey;
3177 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3178 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3179 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3180 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3181 devm_kfree(dev, t_alg);
3182 return ERR_PTR(-ENOTSUPP);
3183 }
3184 break;
3185 case CRYPTO_ALG_TYPE_AHASH:
3186 alg = &t_alg->algt.alg.hash.halg.base;
3187 alg->cra_init = talitos_cra_init_ahash;
3188 alg->cra_exit = talitos_cra_exit;
3189 alg->cra_type = &crypto_ahash_type;
3190 t_alg->algt.alg.hash.init = ahash_init;
3191 t_alg->algt.alg.hash.update = ahash_update;
3192 t_alg->algt.alg.hash.final = ahash_final;
3193 t_alg->algt.alg.hash.finup = ahash_finup;
3194 t_alg->algt.alg.hash.digest = ahash_digest;
3195 if (!strncmp(alg->cra_name, "hmac", 4))
3196 t_alg->algt.alg.hash.setkey = ahash_setkey;
3197 t_alg->algt.alg.hash.import = ahash_import;
3198 t_alg->algt.alg.hash.export = ahash_export;
3199
3200 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3201 !strncmp(alg->cra_name, "hmac", 4)) {
3202 devm_kfree(dev, t_alg);
3203 return ERR_PTR(-ENOTSUPP);
3204 }
3205 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3206 (!strcmp(alg->cra_name, "sha224") ||
3207 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3208 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3209 t_alg->algt.desc_hdr_template =
3210 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3211 DESC_HDR_SEL0_MDEUA |
3212 DESC_HDR_MODE0_MDEU_SHA256;
3213 }
3214 break;
3215 default:
3216 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3217 devm_kfree(dev, t_alg);
3218 return ERR_PTR(-EINVAL);
3219 }
3220
3221 alg->cra_module = THIS_MODULE;
3222 if (t_alg->algt.priority)
3223 alg->cra_priority = t_alg->algt.priority;
3224 else
3225 alg->cra_priority = TALITOS_CRA_PRIORITY;
3226 alg->cra_alignmask = 0;
3227 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3228 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3229
3230 t_alg->dev = dev;
3231
3232 return t_alg;
3233}
3234
3235static int talitos_probe_irq(struct platform_device *ofdev)
3236{
3237 struct device *dev = &ofdev->dev;
3238 struct device_node *np = ofdev->dev.of_node;
3239 struct talitos_private *priv = dev_get_drvdata(dev);
3240 int err;
3241 bool is_sec1 = has_ftr_sec1(priv);
3242
3243 priv->irq[0] = irq_of_parse_and_map(np, 0);
3244 if (!priv->irq[0]) {
3245 dev_err(dev, "failed to map irq\n");
3246 return -EINVAL;
3247 }
3248 if (is_sec1) {
3249 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3250 dev_driver_string(dev), dev);
3251 goto primary_out;
3252 }
3253
3254 priv->irq[1] = irq_of_parse_and_map(np, 1);
3255
3256 /* get the primary irq line */
3257 if (!priv->irq[1]) {
3258 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3259 dev_driver_string(dev), dev);
3260 goto primary_out;
3261 }
3262
3263 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3264 dev_driver_string(dev), dev);
3265 if (err)
3266 goto primary_out;
3267
3268 /* get the secondary irq line */
3269 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3270 dev_driver_string(dev), dev);
3271 if (err) {
3272 dev_err(dev, "failed to request secondary irq\n");
3273 irq_dispose_mapping(priv->irq[1]);
3274 priv->irq[1] = 0;
3275 }
3276
3277 return err;
3278
3279primary_out:
3280 if (err) {
3281 dev_err(dev, "failed to request primary irq\n");
3282 irq_dispose_mapping(priv->irq[0]);
3283 priv->irq[0] = 0;
3284 }
3285
3286 return err;
3287}
3288
3289static int talitos_probe(struct platform_device *ofdev)
3290{
3291 struct device *dev = &ofdev->dev;
3292 struct device_node *np = ofdev->dev.of_node;
3293 struct talitos_private *priv;
3294 int i, err;
3295 int stride;
3296 struct resource *res;
3297
3298 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3299 if (!priv)
3300 return -ENOMEM;
3301
3302 INIT_LIST_HEAD(&priv->alg_list);
3303
3304 dev_set_drvdata(dev, priv);
3305
3306 priv->ofdev = ofdev;
3307
3308 spin_lock_init(&priv->reg_lock);
3309
3310 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3311 if (!res)
3312 return -ENXIO;
3313 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3314 if (!priv->reg) {
3315 dev_err(dev, "failed to of_iomap\n");
3316 err = -ENOMEM;
3317 goto err_out;
3318 }
3319
3320 /* get SEC version capabilities from device tree */
3321 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3322 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3323 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3324 of_property_read_u32(np, "fsl,descriptor-types-mask",
3325 &priv->desc_types);
3326
3327 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3328 !priv->exec_units || !priv->desc_types) {
3329 dev_err(dev, "invalid property data in device tree node\n");
3330 err = -EINVAL;
3331 goto err_out;
3332 }
3333
3334 if (of_device_is_compatible(np, "fsl,sec3.0"))
3335 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3336
3337 if (of_device_is_compatible(np, "fsl,sec2.1"))
3338 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3339 TALITOS_FTR_SHA224_HWINIT |
3340 TALITOS_FTR_HMAC_OK;
3341
3342 if (of_device_is_compatible(np, "fsl,sec1.0"))
3343 priv->features |= TALITOS_FTR_SEC1;
3344
3345 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3346 priv->reg_deu = priv->reg + TALITOS12_DEU;
3347 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3348 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3349 stride = TALITOS1_CH_STRIDE;
3350 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3351 priv->reg_deu = priv->reg + TALITOS10_DEU;
3352 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3353 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3354 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3355 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3356 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3357 stride = TALITOS1_CH_STRIDE;
3358 } else {
3359 priv->reg_deu = priv->reg + TALITOS2_DEU;
3360 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3361 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3362 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3363 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3364 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3365 priv->reg_keu = priv->reg + TALITOS2_KEU;
3366 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3367 stride = TALITOS2_CH_STRIDE;
3368 }
3369
3370 err = talitos_probe_irq(ofdev);
3371 if (err)
3372 goto err_out;
3373
3374 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3375 if (priv->num_channels == 1)
3376 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3377 (unsigned long)dev);
3378 else
3379 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3380 (unsigned long)dev);
3381 } else {
3382 if (priv->irq[1]) {
3383 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3384 (unsigned long)dev);
3385 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3386 (unsigned long)dev);
3387 } else if (priv->num_channels == 1) {
3388 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3389 (unsigned long)dev);
3390 } else {
3391 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3392 (unsigned long)dev);
3393 }
3394 }
3395
3396 priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
3397 priv->num_channels, GFP_KERNEL);
3398 if (!priv->chan) {
3399 dev_err(dev, "failed to allocate channel management space\n");
3400 err = -ENOMEM;
3401 goto err_out;
3402 }
3403
3404 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3405
3406 for (i = 0; i < priv->num_channels; i++) {
3407 priv->chan[i].reg = priv->reg + stride * (i + 1);
3408 if (!priv->irq[1] || !(i & 1))
3409 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3410
3411 spin_lock_init(&priv->chan[i].head_lock);
3412 spin_lock_init(&priv->chan[i].tail_lock);
3413
3414 priv->chan[i].fifo = devm_kzalloc(dev,
3415 sizeof(struct talitos_request) *
3416 priv->fifo_len, GFP_KERNEL);
3417 if (!priv->chan[i].fifo) {
3418 dev_err(dev, "failed to allocate request fifo %d\n", i);
3419 err = -ENOMEM;
3420 goto err_out;
3421 }
3422
3423 atomic_set(&priv->chan[i].submit_count,
3424 -(priv->chfifo_len - 1));
3425 }
3426
3427 dma_set_mask(dev, DMA_BIT_MASK(36));
3428
3429 /* reset and initialize the h/w */
3430 err = init_device(dev);
3431 if (err) {
3432 dev_err(dev, "failed to initialize device\n");
3433 goto err_out;
3434 }
3435
3436 /* register the RNG, if available */
3437 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3438 err = talitos_register_rng(dev);
3439 if (err) {
3440 dev_err(dev, "failed to register hwrng: %d\n", err);
3441 goto err_out;
3442 } else
3443 dev_info(dev, "hwrng\n");
3444 }
3445
3446 /* register crypto algorithms the device supports */
3447 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3448 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3449 struct talitos_crypto_alg *t_alg;
3450 struct crypto_alg *alg = NULL;
3451
3452 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3453 if (IS_ERR(t_alg)) {
3454 err = PTR_ERR(t_alg);
3455 if (err == -ENOTSUPP)
3456 continue;
3457 goto err_out;
3458 }
3459
3460 switch (t_alg->algt.type) {
3461 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3462 err = crypto_register_alg(
3463 &t_alg->algt.alg.crypto);
3464 alg = &t_alg->algt.alg.crypto;
3465 break;
3466
3467 case CRYPTO_ALG_TYPE_AEAD:
3468 err = crypto_register_aead(
3469 &t_alg->algt.alg.aead);
3470 alg = &t_alg->algt.alg.aead.base;
3471 break;
3472
3473 case CRYPTO_ALG_TYPE_AHASH:
3474 err = crypto_register_ahash(
3475 &t_alg->algt.alg.hash);
3476 alg = &t_alg->algt.alg.hash.halg.base;
3477 break;
3478 }
3479 if (err) {
3480 dev_err(dev, "%s alg registration failed\n",
3481 alg->cra_driver_name);
3482 devm_kfree(dev, t_alg);
3483 } else
3484 list_add_tail(&t_alg->entry, &priv->alg_list);
3485 }
3486 }
3487 if (!list_empty(&priv->alg_list))
3488 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3489 (char *)of_get_property(np, "compatible", NULL));
3490
3491 return 0;
3492
3493err_out:
3494 talitos_remove(ofdev);
3495
3496 return err;
3497}
3498
3499static const struct of_device_id talitos_match[] = {
3500#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3501 {
3502 .compatible = "fsl,sec1.0",
3503 },
3504#endif
3505#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3506 {
3507 .compatible = "fsl,sec2.0",
3508 },
3509#endif
3510 {},
3511};
3512MODULE_DEVICE_TABLE(of, talitos_match);
3513
3514static struct platform_driver talitos_driver = {
3515 .driver = {
3516 .name = "talitos",
3517 .of_match_table = talitos_match,
3518 },
3519 .probe = talitos_probe,
3520 .remove = talitos_remove,
3521};
3522
3523module_platform_driver(talitos_driver);
3524
3525MODULE_LICENSE("GPL");
3526MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3527MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");