Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
9 */
10
11#include <linux/dma-mapping.h>
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <crypto/scatterwalk.h>
16#include <crypto/des.h>
17#include <linux/ccp.h>
18
19#include "ccp-dev.h"
20
21/* SHA initial context values */
22static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
23 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
24 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
25 cpu_to_be32(SHA1_H4),
26};
27
28static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
29 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
30 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
31 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
32 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
33};
34
35static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
36 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
37 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
38 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
39 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
40};
41
42static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
43 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
44 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
45 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
46 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
47};
48
49static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
50 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
51 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
52 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
53 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
54};
55
56#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
57 ccp_gen_jobid(ccp) : 0)
58
59static u32 ccp_gen_jobid(struct ccp_device *ccp)
60{
61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
62}
63
64static void ccp_sg_free(struct ccp_sg_workarea *wa)
65{
66 if (wa->dma_count)
67 dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
68
69 wa->dma_count = 0;
70}
71
72static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
73 struct scatterlist *sg, u64 len,
74 enum dma_data_direction dma_dir)
75{
76 memset(wa, 0, sizeof(*wa));
77
78 wa->sg = sg;
79 if (!sg)
80 return 0;
81
82 wa->nents = sg_nents_for_len(sg, len);
83 if (wa->nents < 0)
84 return wa->nents;
85
86 wa->bytes_left = len;
87 wa->sg_used = 0;
88
89 if (len == 0)
90 return 0;
91
92 if (dma_dir == DMA_NONE)
93 return 0;
94
95 wa->dma_sg = sg;
96 wa->dma_sg_head = sg;
97 wa->dma_dev = dev;
98 wa->dma_dir = dma_dir;
99 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
100 if (!wa->dma_count)
101 return -ENOMEM;
102
103 return 0;
104}
105
106static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
107{
108 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
109 unsigned int sg_combined_len = 0;
110
111 if (!wa->sg)
112 return;
113
114 wa->sg_used += nbytes;
115 wa->bytes_left -= nbytes;
116 if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
117 /* Advance to the next DMA scatterlist entry */
118 wa->dma_sg = sg_next(wa->dma_sg);
119
120 /* In the case that the DMA mapped scatterlist has entries
121 * that have been merged, the non-DMA mapped scatterlist
122 * must be advanced multiple times for each merged entry.
123 * This ensures that the current non-DMA mapped entry
124 * corresponds to the current DMA mapped entry.
125 */
126 do {
127 sg_combined_len += wa->sg->length;
128 wa->sg = sg_next(wa->sg);
129 } while (wa->sg_used > sg_combined_len);
130
131 wa->sg_used = 0;
132 }
133}
134
135static void ccp_dm_free(struct ccp_dm_workarea *wa)
136{
137 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
138 if (wa->address)
139 dma_pool_free(wa->dma_pool, wa->address,
140 wa->dma.address);
141 } else {
142 if (wa->dma.address)
143 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
144 wa->dma.dir);
145 kfree(wa->address);
146 }
147
148 wa->address = NULL;
149 wa->dma.address = 0;
150}
151
152static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
153 struct ccp_cmd_queue *cmd_q,
154 unsigned int len,
155 enum dma_data_direction dir)
156{
157 memset(wa, 0, sizeof(*wa));
158
159 if (!len)
160 return 0;
161
162 wa->dev = cmd_q->ccp->dev;
163 wa->length = len;
164
165 if (len <= CCP_DMAPOOL_MAX_SIZE) {
166 wa->dma_pool = cmd_q->dma_pool;
167
168 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL,
169 &wa->dma.address);
170 if (!wa->address)
171 return -ENOMEM;
172
173 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
174
175 } else {
176 wa->address = kzalloc(len, GFP_KERNEL);
177 if (!wa->address)
178 return -ENOMEM;
179
180 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
181 dir);
182 if (dma_mapping_error(wa->dev, wa->dma.address)) {
183 kfree(wa->address);
184 wa->address = NULL;
185 return -ENOMEM;
186 }
187
188 wa->dma.length = len;
189 }
190 wa->dma.dir = dir;
191
192 return 0;
193}
194
195static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
196 struct scatterlist *sg, unsigned int sg_offset,
197 unsigned int len)
198{
199 WARN_ON(!wa->address);
200
201 if (len > (wa->length - wa_offset))
202 return -EINVAL;
203
204 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
205 0);
206 return 0;
207}
208
209static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
210 struct scatterlist *sg, unsigned int sg_offset,
211 unsigned int len)
212{
213 WARN_ON(!wa->address);
214
215 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
216 1);
217}
218
219static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
220 unsigned int wa_offset,
221 struct scatterlist *sg,
222 unsigned int sg_offset,
223 unsigned int len)
224{
225 u8 *p, *q;
226 int rc;
227
228 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
229 if (rc)
230 return rc;
231
232 p = wa->address + wa_offset;
233 q = p + len - 1;
234 while (p < q) {
235 *p = *p ^ *q;
236 *q = *p ^ *q;
237 *p = *p ^ *q;
238 p++;
239 q--;
240 }
241 return 0;
242}
243
244static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
245 unsigned int wa_offset,
246 struct scatterlist *sg,
247 unsigned int sg_offset,
248 unsigned int len)
249{
250 u8 *p, *q;
251
252 p = wa->address + wa_offset;
253 q = p + len - 1;
254 while (p < q) {
255 *p = *p ^ *q;
256 *q = *p ^ *q;
257 *p = *p ^ *q;
258 p++;
259 q--;
260 }
261
262 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
263}
264
265static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
266{
267 ccp_dm_free(&data->dm_wa);
268 ccp_sg_free(&data->sg_wa);
269}
270
271static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
272 struct scatterlist *sg, u64 sg_len,
273 unsigned int dm_len,
274 enum dma_data_direction dir)
275{
276 int ret;
277
278 memset(data, 0, sizeof(*data));
279
280 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
281 dir);
282 if (ret)
283 goto e_err;
284
285 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
286 if (ret)
287 goto e_err;
288
289 return 0;
290
291e_err:
292 ccp_free_data(data, cmd_q);
293
294 return ret;
295}
296
297static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
298{
299 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
300 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
301 unsigned int buf_count, nbytes;
302
303 /* Clear the buffer if setting it */
304 if (!from)
305 memset(dm_wa->address, 0, dm_wa->length);
306
307 if (!sg_wa->sg)
308 return 0;
309
310 /* Perform the copy operation
311 * nbytes will always be <= UINT_MAX because dm_wa->length is
312 * an unsigned int
313 */
314 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
315 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
316 nbytes, from);
317
318 /* Update the structures and generate the count */
319 buf_count = 0;
320 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
321 nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
322 dm_wa->length - buf_count);
323 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
324
325 buf_count += nbytes;
326 ccp_update_sg_workarea(sg_wa, nbytes);
327 }
328
329 return buf_count;
330}
331
332static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
333{
334 return ccp_queue_buf(data, 0);
335}
336
337static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
338{
339 return ccp_queue_buf(data, 1);
340}
341
342static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
343 struct ccp_op *op, unsigned int block_size,
344 bool blocksize_op)
345{
346 unsigned int sg_src_len, sg_dst_len, op_len;
347
348 /* The CCP can only DMA from/to one address each per operation. This
349 * requires that we find the smallest DMA area between the source
350 * and destination. The resulting len values will always be <= UINT_MAX
351 * because the dma length is an unsigned int.
352 */
353 sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
354 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
355
356 if (dst) {
357 sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
358 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
359 op_len = min(sg_src_len, sg_dst_len);
360 } else {
361 op_len = sg_src_len;
362 }
363
364 /* The data operation length will be at least block_size in length
365 * or the smaller of available sg room remaining for the source or
366 * the destination
367 */
368 op_len = max(op_len, block_size);
369
370 /* Unless we have to buffer data, there's no reason to wait */
371 op->soc = 0;
372
373 if (sg_src_len < block_size) {
374 /* Not enough data in the sg element, so it
375 * needs to be buffered into a blocksize chunk
376 */
377 int cp_len = ccp_fill_queue_buf(src);
378
379 op->soc = 1;
380 op->src.u.dma.address = src->dm_wa.dma.address;
381 op->src.u.dma.offset = 0;
382 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
383 } else {
384 /* Enough data in the sg element, but we need to
385 * adjust for any previously copied data
386 */
387 op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
388 op->src.u.dma.offset = src->sg_wa.sg_used;
389 op->src.u.dma.length = op_len & ~(block_size - 1);
390
391 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
392 }
393
394 if (dst) {
395 if (sg_dst_len < block_size) {
396 /* Not enough room in the sg element or we're on the
397 * last piece of data (when using padding), so the
398 * output needs to be buffered into a blocksize chunk
399 */
400 op->soc = 1;
401 op->dst.u.dma.address = dst->dm_wa.dma.address;
402 op->dst.u.dma.offset = 0;
403 op->dst.u.dma.length = op->src.u.dma.length;
404 } else {
405 /* Enough room in the sg element, but we need to
406 * adjust for any previously used area
407 */
408 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
409 op->dst.u.dma.offset = dst->sg_wa.sg_used;
410 op->dst.u.dma.length = op->src.u.dma.length;
411 }
412 }
413}
414
415static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
416 struct ccp_op *op)
417{
418 op->init = 0;
419
420 if (dst) {
421 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
422 ccp_empty_queue_buf(dst);
423 else
424 ccp_update_sg_workarea(&dst->sg_wa,
425 op->dst.u.dma.length);
426 }
427}
428
429static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
430 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
431 u32 byte_swap, bool from)
432{
433 struct ccp_op op;
434
435 memset(&op, 0, sizeof(op));
436
437 op.cmd_q = cmd_q;
438 op.jobid = jobid;
439 op.eom = 1;
440
441 if (from) {
442 op.soc = 1;
443 op.src.type = CCP_MEMTYPE_SB;
444 op.src.u.sb = sb;
445 op.dst.type = CCP_MEMTYPE_SYSTEM;
446 op.dst.u.dma.address = wa->dma.address;
447 op.dst.u.dma.length = wa->length;
448 } else {
449 op.src.type = CCP_MEMTYPE_SYSTEM;
450 op.src.u.dma.address = wa->dma.address;
451 op.src.u.dma.length = wa->length;
452 op.dst.type = CCP_MEMTYPE_SB;
453 op.dst.u.sb = sb;
454 }
455
456 op.u.passthru.byte_swap = byte_swap;
457
458 return cmd_q->ccp->vdata->perform->passthru(&op);
459}
460
461static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
462 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
463 u32 byte_swap)
464{
465 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
466}
467
468static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
469 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
470 u32 byte_swap)
471{
472 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
473}
474
475static noinline_for_stack int
476ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
477{
478 struct ccp_aes_engine *aes = &cmd->u.aes;
479 struct ccp_dm_workarea key, ctx;
480 struct ccp_data src;
481 struct ccp_op op;
482 unsigned int dm_offset;
483 int ret;
484
485 if (!((aes->key_len == AES_KEYSIZE_128) ||
486 (aes->key_len == AES_KEYSIZE_192) ||
487 (aes->key_len == AES_KEYSIZE_256)))
488 return -EINVAL;
489
490 if (aes->src_len & (AES_BLOCK_SIZE - 1))
491 return -EINVAL;
492
493 if (aes->iv_len != AES_BLOCK_SIZE)
494 return -EINVAL;
495
496 if (!aes->key || !aes->iv || !aes->src)
497 return -EINVAL;
498
499 if (aes->cmac_final) {
500 if (aes->cmac_key_len != AES_BLOCK_SIZE)
501 return -EINVAL;
502
503 if (!aes->cmac_key)
504 return -EINVAL;
505 }
506
507 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
508 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
509
510 ret = -EIO;
511 memset(&op, 0, sizeof(op));
512 op.cmd_q = cmd_q;
513 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
514 op.sb_key = cmd_q->sb_key;
515 op.sb_ctx = cmd_q->sb_ctx;
516 op.init = 1;
517 op.u.aes.type = aes->type;
518 op.u.aes.mode = aes->mode;
519 op.u.aes.action = aes->action;
520
521 /* All supported key sizes fit in a single (32-byte) SB entry
522 * and must be in little endian format. Use the 256-bit byte
523 * swap passthru option to convert from big endian to little
524 * endian.
525 */
526 ret = ccp_init_dm_workarea(&key, cmd_q,
527 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
528 DMA_TO_DEVICE);
529 if (ret)
530 return ret;
531
532 dm_offset = CCP_SB_BYTES - aes->key_len;
533 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
534 if (ret)
535 goto e_key;
536 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
537 CCP_PASSTHRU_BYTESWAP_256BIT);
538 if (ret) {
539 cmd->engine_error = cmd_q->cmd_error;
540 goto e_key;
541 }
542
543 /* The AES context fits in a single (32-byte) SB entry and
544 * must be in little endian format. Use the 256-bit byte swap
545 * passthru option to convert from big endian to little endian.
546 */
547 ret = ccp_init_dm_workarea(&ctx, cmd_q,
548 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
549 DMA_BIDIRECTIONAL);
550 if (ret)
551 goto e_key;
552
553 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
554 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
555 if (ret)
556 goto e_ctx;
557 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
558 CCP_PASSTHRU_BYTESWAP_256BIT);
559 if (ret) {
560 cmd->engine_error = cmd_q->cmd_error;
561 goto e_ctx;
562 }
563
564 /* Send data to the CCP AES engine */
565 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
566 AES_BLOCK_SIZE, DMA_TO_DEVICE);
567 if (ret)
568 goto e_ctx;
569
570 while (src.sg_wa.bytes_left) {
571 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
572 if (aes->cmac_final && !src.sg_wa.bytes_left) {
573 op.eom = 1;
574
575 /* Push the K1/K2 key to the CCP now */
576 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
577 op.sb_ctx,
578 CCP_PASSTHRU_BYTESWAP_256BIT);
579 if (ret) {
580 cmd->engine_error = cmd_q->cmd_error;
581 goto e_src;
582 }
583
584 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
585 aes->cmac_key_len);
586 if (ret)
587 goto e_src;
588 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
589 CCP_PASSTHRU_BYTESWAP_256BIT);
590 if (ret) {
591 cmd->engine_error = cmd_q->cmd_error;
592 goto e_src;
593 }
594 }
595
596 ret = cmd_q->ccp->vdata->perform->aes(&op);
597 if (ret) {
598 cmd->engine_error = cmd_q->cmd_error;
599 goto e_src;
600 }
601
602 ccp_process_data(&src, NULL, &op);
603 }
604
605 /* Retrieve the AES context - convert from LE to BE using
606 * 32-byte (256-bit) byteswapping
607 */
608 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
609 CCP_PASSTHRU_BYTESWAP_256BIT);
610 if (ret) {
611 cmd->engine_error = cmd_q->cmd_error;
612 goto e_src;
613 }
614
615 /* ...but we only need AES_BLOCK_SIZE bytes */
616 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
617 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
618
619e_src:
620 ccp_free_data(&src, cmd_q);
621
622e_ctx:
623 ccp_dm_free(&ctx);
624
625e_key:
626 ccp_dm_free(&key);
627
628 return ret;
629}
630
631static noinline_for_stack int
632ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
633{
634 struct ccp_aes_engine *aes = &cmd->u.aes;
635 struct ccp_dm_workarea key, ctx, final_wa, tag;
636 struct ccp_data src, dst;
637 struct ccp_data aad;
638 struct ccp_op op;
639 unsigned int dm_offset;
640 unsigned int authsize;
641 unsigned int jobid;
642 unsigned int ilen;
643 bool in_place = true; /* Default value */
644 __be64 *final;
645 int ret;
646
647 struct scatterlist *p_inp, sg_inp[2];
648 struct scatterlist *p_tag, sg_tag[2];
649 struct scatterlist *p_outp, sg_outp[2];
650 struct scatterlist *p_aad;
651
652 if (!aes->iv)
653 return -EINVAL;
654
655 if (!((aes->key_len == AES_KEYSIZE_128) ||
656 (aes->key_len == AES_KEYSIZE_192) ||
657 (aes->key_len == AES_KEYSIZE_256)))
658 return -EINVAL;
659
660 if (!aes->key) /* Gotta have a key SGL */
661 return -EINVAL;
662
663 /* Zero defaults to 16 bytes, the maximum size */
664 authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
665 switch (authsize) {
666 case 16:
667 case 15:
668 case 14:
669 case 13:
670 case 12:
671 case 8:
672 case 4:
673 break;
674 default:
675 return -EINVAL;
676 }
677
678 /* First, decompose the source buffer into AAD & PT,
679 * and the destination buffer into AAD, CT & tag, or
680 * the input into CT & tag.
681 * It is expected that the input and output SGs will
682 * be valid, even if the AAD and input lengths are 0.
683 */
684 p_aad = aes->src;
685 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
686 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
687 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
688 ilen = aes->src_len;
689 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
690 } else {
691 /* Input length for decryption includes tag */
692 ilen = aes->src_len - authsize;
693 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
694 }
695
696 jobid = CCP_NEW_JOBID(cmd_q->ccp);
697
698 memset(&op, 0, sizeof(op));
699 op.cmd_q = cmd_q;
700 op.jobid = jobid;
701 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
702 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
703 op.init = 1;
704 op.u.aes.type = aes->type;
705
706 /* Copy the key to the LSB */
707 ret = ccp_init_dm_workarea(&key, cmd_q,
708 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
709 DMA_TO_DEVICE);
710 if (ret)
711 return ret;
712
713 dm_offset = CCP_SB_BYTES - aes->key_len;
714 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
715 if (ret)
716 goto e_key;
717 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
718 CCP_PASSTHRU_BYTESWAP_256BIT);
719 if (ret) {
720 cmd->engine_error = cmd_q->cmd_error;
721 goto e_key;
722 }
723
724 /* Copy the context (IV) to the LSB.
725 * There is an assumption here that the IV is 96 bits in length, plus
726 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
727 */
728 ret = ccp_init_dm_workarea(&ctx, cmd_q,
729 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
730 DMA_BIDIRECTIONAL);
731 if (ret)
732 goto e_key;
733
734 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
735 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
736 if (ret)
737 goto e_ctx;
738
739 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
740 CCP_PASSTHRU_BYTESWAP_256BIT);
741 if (ret) {
742 cmd->engine_error = cmd_q->cmd_error;
743 goto e_ctx;
744 }
745
746 op.init = 1;
747 if (aes->aad_len > 0) {
748 /* Step 1: Run a GHASH over the Additional Authenticated Data */
749 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
750 AES_BLOCK_SIZE,
751 DMA_TO_DEVICE);
752 if (ret)
753 goto e_ctx;
754
755 op.u.aes.mode = CCP_AES_MODE_GHASH;
756 op.u.aes.action = CCP_AES_GHASHAAD;
757
758 while (aad.sg_wa.bytes_left) {
759 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
760
761 ret = cmd_q->ccp->vdata->perform->aes(&op);
762 if (ret) {
763 cmd->engine_error = cmd_q->cmd_error;
764 goto e_aad;
765 }
766
767 ccp_process_data(&aad, NULL, &op);
768 op.init = 0;
769 }
770 }
771
772 op.u.aes.mode = CCP_AES_MODE_GCTR;
773 op.u.aes.action = aes->action;
774
775 if (ilen > 0) {
776 /* Step 2: Run a GCTR over the plaintext */
777 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
778
779 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
780 AES_BLOCK_SIZE,
781 in_place ? DMA_BIDIRECTIONAL
782 : DMA_TO_DEVICE);
783 if (ret)
784 goto e_aad;
785
786 if (in_place) {
787 dst = src;
788 } else {
789 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
790 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
791 if (ret)
792 goto e_src;
793 }
794
795 op.soc = 0;
796 op.eom = 0;
797 op.init = 1;
798 while (src.sg_wa.bytes_left) {
799 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
800 if (!src.sg_wa.bytes_left) {
801 unsigned int nbytes = ilen % AES_BLOCK_SIZE;
802
803 if (nbytes) {
804 op.eom = 1;
805 op.u.aes.size = (nbytes * 8) - 1;
806 }
807 }
808
809 ret = cmd_q->ccp->vdata->perform->aes(&op);
810 if (ret) {
811 cmd->engine_error = cmd_q->cmd_error;
812 goto e_dst;
813 }
814
815 ccp_process_data(&src, &dst, &op);
816 op.init = 0;
817 }
818 }
819
820 /* Step 3: Update the IV portion of the context with the original IV */
821 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
822 CCP_PASSTHRU_BYTESWAP_256BIT);
823 if (ret) {
824 cmd->engine_error = cmd_q->cmd_error;
825 goto e_dst;
826 }
827
828 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
829 if (ret)
830 goto e_dst;
831
832 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
833 CCP_PASSTHRU_BYTESWAP_256BIT);
834 if (ret) {
835 cmd->engine_error = cmd_q->cmd_error;
836 goto e_dst;
837 }
838
839 /* Step 4: Concatenate the lengths of the AAD and source, and
840 * hash that 16 byte buffer.
841 */
842 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
843 DMA_BIDIRECTIONAL);
844 if (ret)
845 goto e_dst;
846 final = (__be64 *)final_wa.address;
847 final[0] = cpu_to_be64(aes->aad_len * 8);
848 final[1] = cpu_to_be64(ilen * 8);
849
850 memset(&op, 0, sizeof(op));
851 op.cmd_q = cmd_q;
852 op.jobid = jobid;
853 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
854 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
855 op.init = 1;
856 op.u.aes.type = aes->type;
857 op.u.aes.mode = CCP_AES_MODE_GHASH;
858 op.u.aes.action = CCP_AES_GHASHFINAL;
859 op.src.type = CCP_MEMTYPE_SYSTEM;
860 op.src.u.dma.address = final_wa.dma.address;
861 op.src.u.dma.length = AES_BLOCK_SIZE;
862 op.dst.type = CCP_MEMTYPE_SYSTEM;
863 op.dst.u.dma.address = final_wa.dma.address;
864 op.dst.u.dma.length = AES_BLOCK_SIZE;
865 op.eom = 1;
866 op.u.aes.size = 0;
867 ret = cmd_q->ccp->vdata->perform->aes(&op);
868 if (ret)
869 goto e_final_wa;
870
871 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
872 /* Put the ciphered tag after the ciphertext. */
873 ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
874 } else {
875 /* Does this ciphered tag match the input? */
876 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
877 DMA_BIDIRECTIONAL);
878 if (ret)
879 goto e_final_wa;
880 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
881 if (ret) {
882 ccp_dm_free(&tag);
883 goto e_final_wa;
884 }
885
886 ret = crypto_memneq(tag.address, final_wa.address,
887 authsize) ? -EBADMSG : 0;
888 ccp_dm_free(&tag);
889 }
890
891e_final_wa:
892 ccp_dm_free(&final_wa);
893
894e_dst:
895 if (ilen > 0 && !in_place)
896 ccp_free_data(&dst, cmd_q);
897
898e_src:
899 if (ilen > 0)
900 ccp_free_data(&src, cmd_q);
901
902e_aad:
903 if (aes->aad_len)
904 ccp_free_data(&aad, cmd_q);
905
906e_ctx:
907 ccp_dm_free(&ctx);
908
909e_key:
910 ccp_dm_free(&key);
911
912 return ret;
913}
914
915static noinline_for_stack int
916ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
917{
918 struct ccp_aes_engine *aes = &cmd->u.aes;
919 struct ccp_dm_workarea key, ctx;
920 struct ccp_data src, dst;
921 struct ccp_op op;
922 unsigned int dm_offset;
923 bool in_place = false;
924 int ret;
925
926 if (!((aes->key_len == AES_KEYSIZE_128) ||
927 (aes->key_len == AES_KEYSIZE_192) ||
928 (aes->key_len == AES_KEYSIZE_256)))
929 return -EINVAL;
930
931 if (((aes->mode == CCP_AES_MODE_ECB) ||
932 (aes->mode == CCP_AES_MODE_CBC)) &&
933 (aes->src_len & (AES_BLOCK_SIZE - 1)))
934 return -EINVAL;
935
936 if (!aes->key || !aes->src || !aes->dst)
937 return -EINVAL;
938
939 if (aes->mode != CCP_AES_MODE_ECB) {
940 if (aes->iv_len != AES_BLOCK_SIZE)
941 return -EINVAL;
942
943 if (!aes->iv)
944 return -EINVAL;
945 }
946
947 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
948 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
949
950 ret = -EIO;
951 memset(&op, 0, sizeof(op));
952 op.cmd_q = cmd_q;
953 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
954 op.sb_key = cmd_q->sb_key;
955 op.sb_ctx = cmd_q->sb_ctx;
956 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
957 op.u.aes.type = aes->type;
958 op.u.aes.mode = aes->mode;
959 op.u.aes.action = aes->action;
960
961 /* All supported key sizes fit in a single (32-byte) SB entry
962 * and must be in little endian format. Use the 256-bit byte
963 * swap passthru option to convert from big endian to little
964 * endian.
965 */
966 ret = ccp_init_dm_workarea(&key, cmd_q,
967 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
968 DMA_TO_DEVICE);
969 if (ret)
970 return ret;
971
972 dm_offset = CCP_SB_BYTES - aes->key_len;
973 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
974 if (ret)
975 goto e_key;
976 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
977 CCP_PASSTHRU_BYTESWAP_256BIT);
978 if (ret) {
979 cmd->engine_error = cmd_q->cmd_error;
980 goto e_key;
981 }
982
983 /* The AES context fits in a single (32-byte) SB entry and
984 * must be in little endian format. Use the 256-bit byte swap
985 * passthru option to convert from big endian to little endian.
986 */
987 ret = ccp_init_dm_workarea(&ctx, cmd_q,
988 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
989 DMA_BIDIRECTIONAL);
990 if (ret)
991 goto e_key;
992
993 if (aes->mode != CCP_AES_MODE_ECB) {
994 /* Load the AES context - convert to LE */
995 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
996 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
997 if (ret)
998 goto e_ctx;
999 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1000 CCP_PASSTHRU_BYTESWAP_256BIT);
1001 if (ret) {
1002 cmd->engine_error = cmd_q->cmd_error;
1003 goto e_ctx;
1004 }
1005 }
1006 switch (aes->mode) {
1007 case CCP_AES_MODE_CFB: /* CFB128 only */
1008 case CCP_AES_MODE_CTR:
1009 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
1010 break;
1011 default:
1012 op.u.aes.size = 0;
1013 }
1014
1015 /* Prepare the input and output data workareas. For in-place
1016 * operations we need to set the dma direction to BIDIRECTIONAL
1017 * and copy the src workarea to the dst workarea.
1018 */
1019 if (sg_virt(aes->src) == sg_virt(aes->dst))
1020 in_place = true;
1021
1022 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
1023 AES_BLOCK_SIZE,
1024 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1025 if (ret)
1026 goto e_ctx;
1027
1028 if (in_place) {
1029 dst = src;
1030 } else {
1031 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
1032 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1033 if (ret)
1034 goto e_src;
1035 }
1036
1037 /* Send data to the CCP AES engine */
1038 while (src.sg_wa.bytes_left) {
1039 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1040 if (!src.sg_wa.bytes_left) {
1041 op.eom = 1;
1042
1043 /* Since we don't retrieve the AES context in ECB
1044 * mode we have to wait for the operation to complete
1045 * on the last piece of data
1046 */
1047 if (aes->mode == CCP_AES_MODE_ECB)
1048 op.soc = 1;
1049 }
1050
1051 ret = cmd_q->ccp->vdata->perform->aes(&op);
1052 if (ret) {
1053 cmd->engine_error = cmd_q->cmd_error;
1054 goto e_dst;
1055 }
1056
1057 ccp_process_data(&src, &dst, &op);
1058 }
1059
1060 if (aes->mode != CCP_AES_MODE_ECB) {
1061 /* Retrieve the AES context - convert from LE to BE using
1062 * 32-byte (256-bit) byteswapping
1063 */
1064 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1065 CCP_PASSTHRU_BYTESWAP_256BIT);
1066 if (ret) {
1067 cmd->engine_error = cmd_q->cmd_error;
1068 goto e_dst;
1069 }
1070
1071 /* ...but we only need AES_BLOCK_SIZE bytes */
1072 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1073 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1074 }
1075
1076e_dst:
1077 if (!in_place)
1078 ccp_free_data(&dst, cmd_q);
1079
1080e_src:
1081 ccp_free_data(&src, cmd_q);
1082
1083e_ctx:
1084 ccp_dm_free(&ctx);
1085
1086e_key:
1087 ccp_dm_free(&key);
1088
1089 return ret;
1090}
1091
1092static noinline_for_stack int
1093ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1094{
1095 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1096 struct ccp_dm_workarea key, ctx;
1097 struct ccp_data src, dst;
1098 struct ccp_op op;
1099 unsigned int unit_size, dm_offset;
1100 bool in_place = false;
1101 unsigned int sb_count;
1102 enum ccp_aes_type aestype;
1103 int ret;
1104
1105 switch (xts->unit_size) {
1106 case CCP_XTS_AES_UNIT_SIZE_16:
1107 unit_size = 16;
1108 break;
1109 case CCP_XTS_AES_UNIT_SIZE_512:
1110 unit_size = 512;
1111 break;
1112 case CCP_XTS_AES_UNIT_SIZE_1024:
1113 unit_size = 1024;
1114 break;
1115 case CCP_XTS_AES_UNIT_SIZE_2048:
1116 unit_size = 2048;
1117 break;
1118 case CCP_XTS_AES_UNIT_SIZE_4096:
1119 unit_size = 4096;
1120 break;
1121
1122 default:
1123 return -EINVAL;
1124 }
1125
1126 if (xts->key_len == AES_KEYSIZE_128)
1127 aestype = CCP_AES_TYPE_128;
1128 else if (xts->key_len == AES_KEYSIZE_256)
1129 aestype = CCP_AES_TYPE_256;
1130 else
1131 return -EINVAL;
1132
1133 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1134 return -EINVAL;
1135
1136 if (xts->iv_len != AES_BLOCK_SIZE)
1137 return -EINVAL;
1138
1139 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1140 return -EINVAL;
1141
1142 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1143 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1144
1145 ret = -EIO;
1146 memset(&op, 0, sizeof(op));
1147 op.cmd_q = cmd_q;
1148 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1149 op.sb_key = cmd_q->sb_key;
1150 op.sb_ctx = cmd_q->sb_ctx;
1151 op.init = 1;
1152 op.u.xts.type = aestype;
1153 op.u.xts.action = xts->action;
1154 op.u.xts.unit_size = xts->unit_size;
1155
1156 /* A version 3 device only supports 128-bit keys, which fits into a
1157 * single SB entry. A version 5 device uses a 512-bit vector, so two
1158 * SB entries.
1159 */
1160 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1161 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1162 else
1163 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
1164 ret = ccp_init_dm_workarea(&key, cmd_q,
1165 sb_count * CCP_SB_BYTES,
1166 DMA_TO_DEVICE);
1167 if (ret)
1168 return ret;
1169
1170 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1171 /* All supported key sizes must be in little endian format.
1172 * Use the 256-bit byte swap passthru option to convert from
1173 * big endian to little endian.
1174 */
1175 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1176 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1177 if (ret)
1178 goto e_key;
1179 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1180 if (ret)
1181 goto e_key;
1182 } else {
1183 /* Version 5 CCPs use a 512-bit space for the key: each portion
1184 * occupies 256 bits, or one entire slot, and is zero-padded.
1185 */
1186 unsigned int pad;
1187
1188 dm_offset = CCP_SB_BYTES;
1189 pad = dm_offset - xts->key_len;
1190 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1191 if (ret)
1192 goto e_key;
1193 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1194 xts->key_len, xts->key_len);
1195 if (ret)
1196 goto e_key;
1197 }
1198 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1199 CCP_PASSTHRU_BYTESWAP_256BIT);
1200 if (ret) {
1201 cmd->engine_error = cmd_q->cmd_error;
1202 goto e_key;
1203 }
1204
1205 /* The AES context fits in a single (32-byte) SB entry and
1206 * for XTS is already in little endian format so no byte swapping
1207 * is needed.
1208 */
1209 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1210 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1211 DMA_BIDIRECTIONAL);
1212 if (ret)
1213 goto e_key;
1214
1215 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1216 if (ret)
1217 goto e_ctx;
1218 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1219 CCP_PASSTHRU_BYTESWAP_NOOP);
1220 if (ret) {
1221 cmd->engine_error = cmd_q->cmd_error;
1222 goto e_ctx;
1223 }
1224
1225 /* Prepare the input and output data workareas. For in-place
1226 * operations we need to set the dma direction to BIDIRECTIONAL
1227 * and copy the src workarea to the dst workarea.
1228 */
1229 if (sg_virt(xts->src) == sg_virt(xts->dst))
1230 in_place = true;
1231
1232 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1233 unit_size,
1234 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1235 if (ret)
1236 goto e_ctx;
1237
1238 if (in_place) {
1239 dst = src;
1240 } else {
1241 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1242 unit_size, DMA_FROM_DEVICE);
1243 if (ret)
1244 goto e_src;
1245 }
1246
1247 /* Send data to the CCP AES engine */
1248 while (src.sg_wa.bytes_left) {
1249 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1250 if (!src.sg_wa.bytes_left)
1251 op.eom = 1;
1252
1253 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1254 if (ret) {
1255 cmd->engine_error = cmd_q->cmd_error;
1256 goto e_dst;
1257 }
1258
1259 ccp_process_data(&src, &dst, &op);
1260 }
1261
1262 /* Retrieve the AES context - convert from LE to BE using
1263 * 32-byte (256-bit) byteswapping
1264 */
1265 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1266 CCP_PASSTHRU_BYTESWAP_256BIT);
1267 if (ret) {
1268 cmd->engine_error = cmd_q->cmd_error;
1269 goto e_dst;
1270 }
1271
1272 /* ...but we only need AES_BLOCK_SIZE bytes */
1273 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1274 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1275
1276e_dst:
1277 if (!in_place)
1278 ccp_free_data(&dst, cmd_q);
1279
1280e_src:
1281 ccp_free_data(&src, cmd_q);
1282
1283e_ctx:
1284 ccp_dm_free(&ctx);
1285
1286e_key:
1287 ccp_dm_free(&key);
1288
1289 return ret;
1290}
1291
1292static noinline_for_stack int
1293ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1294{
1295 struct ccp_des3_engine *des3 = &cmd->u.des3;
1296
1297 struct ccp_dm_workarea key, ctx;
1298 struct ccp_data src, dst;
1299 struct ccp_op op;
1300 unsigned int dm_offset;
1301 unsigned int len_singlekey;
1302 bool in_place = false;
1303 int ret;
1304
1305 /* Error checks */
1306 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1307 return -EINVAL;
1308
1309 if (!cmd_q->ccp->vdata->perform->des3)
1310 return -EINVAL;
1311
1312 if (des3->key_len != DES3_EDE_KEY_SIZE)
1313 return -EINVAL;
1314
1315 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1316 (des3->mode == CCP_DES3_MODE_CBC)) &&
1317 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1318 return -EINVAL;
1319
1320 if (!des3->key || !des3->src || !des3->dst)
1321 return -EINVAL;
1322
1323 if (des3->mode != CCP_DES3_MODE_ECB) {
1324 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1325 return -EINVAL;
1326
1327 if (!des3->iv)
1328 return -EINVAL;
1329 }
1330
1331 /* Zero out all the fields of the command desc */
1332 memset(&op, 0, sizeof(op));
1333
1334 /* Set up the Function field */
1335 op.cmd_q = cmd_q;
1336 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1337 op.sb_key = cmd_q->sb_key;
1338
1339 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1340 op.u.des3.type = des3->type;
1341 op.u.des3.mode = des3->mode;
1342 op.u.des3.action = des3->action;
1343
1344 /*
1345 * All supported key sizes fit in a single (32-byte) KSB entry and
1346 * (like AES) must be in little endian format. Use the 256-bit byte
1347 * swap passthru option to convert from big endian to little endian.
1348 */
1349 ret = ccp_init_dm_workarea(&key, cmd_q,
1350 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1351 DMA_TO_DEVICE);
1352 if (ret)
1353 return ret;
1354
1355 /*
1356 * The contents of the key triplet are in the reverse order of what
1357 * is required by the engine. Copy the 3 pieces individually to put
1358 * them where they belong.
1359 */
1360 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1361
1362 len_singlekey = des3->key_len / 3;
1363 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1364 des3->key, 0, len_singlekey);
1365 if (ret)
1366 goto e_key;
1367 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1368 des3->key, len_singlekey, len_singlekey);
1369 if (ret)
1370 goto e_key;
1371 ret = ccp_set_dm_area(&key, dm_offset,
1372 des3->key, 2 * len_singlekey, len_singlekey);
1373 if (ret)
1374 goto e_key;
1375
1376 /* Copy the key to the SB */
1377 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1378 CCP_PASSTHRU_BYTESWAP_256BIT);
1379 if (ret) {
1380 cmd->engine_error = cmd_q->cmd_error;
1381 goto e_key;
1382 }
1383
1384 /*
1385 * The DES3 context fits in a single (32-byte) KSB entry and
1386 * must be in little endian format. Use the 256-bit byte swap
1387 * passthru option to convert from big endian to little endian.
1388 */
1389 if (des3->mode != CCP_DES3_MODE_ECB) {
1390 op.sb_ctx = cmd_q->sb_ctx;
1391
1392 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1393 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1394 DMA_BIDIRECTIONAL);
1395 if (ret)
1396 goto e_key;
1397
1398 /* Load the context into the LSB */
1399 dm_offset = CCP_SB_BYTES - des3->iv_len;
1400 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1401 des3->iv_len);
1402 if (ret)
1403 goto e_ctx;
1404
1405 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1406 CCP_PASSTHRU_BYTESWAP_256BIT);
1407 if (ret) {
1408 cmd->engine_error = cmd_q->cmd_error;
1409 goto e_ctx;
1410 }
1411 }
1412
1413 /*
1414 * Prepare the input and output data workareas. For in-place
1415 * operations we need to set the dma direction to BIDIRECTIONAL
1416 * and copy the src workarea to the dst workarea.
1417 */
1418 if (sg_virt(des3->src) == sg_virt(des3->dst))
1419 in_place = true;
1420
1421 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1422 DES3_EDE_BLOCK_SIZE,
1423 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1424 if (ret)
1425 goto e_ctx;
1426
1427 if (in_place)
1428 dst = src;
1429 else {
1430 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1431 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1432 if (ret)
1433 goto e_src;
1434 }
1435
1436 /* Send data to the CCP DES3 engine */
1437 while (src.sg_wa.bytes_left) {
1438 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1439 if (!src.sg_wa.bytes_left) {
1440 op.eom = 1;
1441
1442 /* Since we don't retrieve the context in ECB mode
1443 * we have to wait for the operation to complete
1444 * on the last piece of data
1445 */
1446 op.soc = 0;
1447 }
1448
1449 ret = cmd_q->ccp->vdata->perform->des3(&op);
1450 if (ret) {
1451 cmd->engine_error = cmd_q->cmd_error;
1452 goto e_dst;
1453 }
1454
1455 ccp_process_data(&src, &dst, &op);
1456 }
1457
1458 if (des3->mode != CCP_DES3_MODE_ECB) {
1459 /* Retrieve the context and make BE */
1460 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1461 CCP_PASSTHRU_BYTESWAP_256BIT);
1462 if (ret) {
1463 cmd->engine_error = cmd_q->cmd_error;
1464 goto e_dst;
1465 }
1466
1467 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1468 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1469 DES3_EDE_BLOCK_SIZE);
1470 }
1471e_dst:
1472 if (!in_place)
1473 ccp_free_data(&dst, cmd_q);
1474
1475e_src:
1476 ccp_free_data(&src, cmd_q);
1477
1478e_ctx:
1479 if (des3->mode != CCP_DES3_MODE_ECB)
1480 ccp_dm_free(&ctx);
1481
1482e_key:
1483 ccp_dm_free(&key);
1484
1485 return ret;
1486}
1487
1488static noinline_for_stack int
1489ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1490{
1491 struct ccp_sha_engine *sha = &cmd->u.sha;
1492 struct ccp_dm_workarea ctx;
1493 struct ccp_data src;
1494 struct ccp_op op;
1495 unsigned int ioffset, ooffset;
1496 unsigned int digest_size;
1497 int sb_count;
1498 const void *init;
1499 u64 block_size;
1500 int ctx_size;
1501 int ret;
1502
1503 switch (sha->type) {
1504 case CCP_SHA_TYPE_1:
1505 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1506 return -EINVAL;
1507 block_size = SHA1_BLOCK_SIZE;
1508 break;
1509 case CCP_SHA_TYPE_224:
1510 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1511 return -EINVAL;
1512 block_size = SHA224_BLOCK_SIZE;
1513 break;
1514 case CCP_SHA_TYPE_256:
1515 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1516 return -EINVAL;
1517 block_size = SHA256_BLOCK_SIZE;
1518 break;
1519 case CCP_SHA_TYPE_384:
1520 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1521 || sha->ctx_len < SHA384_DIGEST_SIZE)
1522 return -EINVAL;
1523 block_size = SHA384_BLOCK_SIZE;
1524 break;
1525 case CCP_SHA_TYPE_512:
1526 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1527 || sha->ctx_len < SHA512_DIGEST_SIZE)
1528 return -EINVAL;
1529 block_size = SHA512_BLOCK_SIZE;
1530 break;
1531 default:
1532 return -EINVAL;
1533 }
1534
1535 if (!sha->ctx)
1536 return -EINVAL;
1537
1538 if (!sha->final && (sha->src_len & (block_size - 1)))
1539 return -EINVAL;
1540
1541 /* The version 3 device can't handle zero-length input */
1542 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1543
1544 if (!sha->src_len) {
1545 unsigned int digest_len;
1546 const u8 *sha_zero;
1547
1548 /* Not final, just return */
1549 if (!sha->final)
1550 return 0;
1551
1552 /* CCP can't do a zero length sha operation so the
1553 * caller must buffer the data.
1554 */
1555 if (sha->msg_bits)
1556 return -EINVAL;
1557
1558 /* The CCP cannot perform zero-length sha operations
1559 * so the caller is required to buffer data for the
1560 * final operation. However, a sha operation for a
1561 * message with a total length of zero is valid so
1562 * known values are required to supply the result.
1563 */
1564 switch (sha->type) {
1565 case CCP_SHA_TYPE_1:
1566 sha_zero = sha1_zero_message_hash;
1567 digest_len = SHA1_DIGEST_SIZE;
1568 break;
1569 case CCP_SHA_TYPE_224:
1570 sha_zero = sha224_zero_message_hash;
1571 digest_len = SHA224_DIGEST_SIZE;
1572 break;
1573 case CCP_SHA_TYPE_256:
1574 sha_zero = sha256_zero_message_hash;
1575 digest_len = SHA256_DIGEST_SIZE;
1576 break;
1577 default:
1578 return -EINVAL;
1579 }
1580
1581 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1582 digest_len, 1);
1583
1584 return 0;
1585 }
1586 }
1587
1588 /* Set variables used throughout */
1589 switch (sha->type) {
1590 case CCP_SHA_TYPE_1:
1591 digest_size = SHA1_DIGEST_SIZE;
1592 init = (void *) ccp_sha1_init;
1593 ctx_size = SHA1_DIGEST_SIZE;
1594 sb_count = 1;
1595 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1596 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1597 else
1598 ooffset = ioffset = 0;
1599 break;
1600 case CCP_SHA_TYPE_224:
1601 digest_size = SHA224_DIGEST_SIZE;
1602 init = (void *) ccp_sha224_init;
1603 ctx_size = SHA256_DIGEST_SIZE;
1604 sb_count = 1;
1605 ioffset = 0;
1606 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1607 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1608 else
1609 ooffset = 0;
1610 break;
1611 case CCP_SHA_TYPE_256:
1612 digest_size = SHA256_DIGEST_SIZE;
1613 init = (void *) ccp_sha256_init;
1614 ctx_size = SHA256_DIGEST_SIZE;
1615 sb_count = 1;
1616 ooffset = ioffset = 0;
1617 break;
1618 case CCP_SHA_TYPE_384:
1619 digest_size = SHA384_DIGEST_SIZE;
1620 init = (void *) ccp_sha384_init;
1621 ctx_size = SHA512_DIGEST_SIZE;
1622 sb_count = 2;
1623 ioffset = 0;
1624 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1625 break;
1626 case CCP_SHA_TYPE_512:
1627 digest_size = SHA512_DIGEST_SIZE;
1628 init = (void *) ccp_sha512_init;
1629 ctx_size = SHA512_DIGEST_SIZE;
1630 sb_count = 2;
1631 ooffset = ioffset = 0;
1632 break;
1633 default:
1634 ret = -EINVAL;
1635 goto e_data;
1636 }
1637
1638 /* For zero-length plaintext the src pointer is ignored;
1639 * otherwise both parts must be valid
1640 */
1641 if (sha->src_len && !sha->src)
1642 return -EINVAL;
1643
1644 memset(&op, 0, sizeof(op));
1645 op.cmd_q = cmd_q;
1646 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1647 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1648 op.u.sha.type = sha->type;
1649 op.u.sha.msg_bits = sha->msg_bits;
1650
1651 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1652 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1653 * first slot, and the left half in the second. Each portion must then
1654 * be in little endian format: use the 256-bit byte swap option.
1655 */
1656 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1657 DMA_BIDIRECTIONAL);
1658 if (ret)
1659 return ret;
1660 if (sha->first) {
1661 switch (sha->type) {
1662 case CCP_SHA_TYPE_1:
1663 case CCP_SHA_TYPE_224:
1664 case CCP_SHA_TYPE_256:
1665 memcpy(ctx.address + ioffset, init, ctx_size);
1666 break;
1667 case CCP_SHA_TYPE_384:
1668 case CCP_SHA_TYPE_512:
1669 memcpy(ctx.address + ctx_size / 2, init,
1670 ctx_size / 2);
1671 memcpy(ctx.address, init + ctx_size / 2,
1672 ctx_size / 2);
1673 break;
1674 default:
1675 ret = -EINVAL;
1676 goto e_ctx;
1677 }
1678 } else {
1679 /* Restore the context */
1680 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1681 sb_count * CCP_SB_BYTES);
1682 if (ret)
1683 goto e_ctx;
1684 }
1685
1686 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1687 CCP_PASSTHRU_BYTESWAP_256BIT);
1688 if (ret) {
1689 cmd->engine_error = cmd_q->cmd_error;
1690 goto e_ctx;
1691 }
1692
1693 if (sha->src) {
1694 /* Send data to the CCP SHA engine; block_size is set above */
1695 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1696 block_size, DMA_TO_DEVICE);
1697 if (ret)
1698 goto e_ctx;
1699
1700 while (src.sg_wa.bytes_left) {
1701 ccp_prepare_data(&src, NULL, &op, block_size, false);
1702 if (sha->final && !src.sg_wa.bytes_left)
1703 op.eom = 1;
1704
1705 ret = cmd_q->ccp->vdata->perform->sha(&op);
1706 if (ret) {
1707 cmd->engine_error = cmd_q->cmd_error;
1708 goto e_data;
1709 }
1710
1711 ccp_process_data(&src, NULL, &op);
1712 }
1713 } else {
1714 op.eom = 1;
1715 ret = cmd_q->ccp->vdata->perform->sha(&op);
1716 if (ret) {
1717 cmd->engine_error = cmd_q->cmd_error;
1718 goto e_data;
1719 }
1720 }
1721
1722 /* Retrieve the SHA context - convert from LE to BE using
1723 * 32-byte (256-bit) byteswapping to BE
1724 */
1725 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1726 CCP_PASSTHRU_BYTESWAP_256BIT);
1727 if (ret) {
1728 cmd->engine_error = cmd_q->cmd_error;
1729 goto e_data;
1730 }
1731
1732 if (sha->final) {
1733 /* Finishing up, so get the digest */
1734 switch (sha->type) {
1735 case CCP_SHA_TYPE_1:
1736 case CCP_SHA_TYPE_224:
1737 case CCP_SHA_TYPE_256:
1738 ccp_get_dm_area(&ctx, ooffset,
1739 sha->ctx, 0,
1740 digest_size);
1741 break;
1742 case CCP_SHA_TYPE_384:
1743 case CCP_SHA_TYPE_512:
1744 ccp_get_dm_area(&ctx, 0,
1745 sha->ctx, LSB_ITEM_SIZE - ooffset,
1746 LSB_ITEM_SIZE);
1747 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1748 sha->ctx, 0,
1749 LSB_ITEM_SIZE - ooffset);
1750 break;
1751 default:
1752 ret = -EINVAL;
1753 goto e_data;
1754 }
1755 } else {
1756 /* Stash the context */
1757 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1758 sb_count * CCP_SB_BYTES);
1759 }
1760
1761 if (sha->final && sha->opad) {
1762 /* HMAC operation, recursively perform final SHA */
1763 struct ccp_cmd hmac_cmd;
1764 struct scatterlist sg;
1765 u8 *hmac_buf;
1766
1767 if (sha->opad_len != block_size) {
1768 ret = -EINVAL;
1769 goto e_data;
1770 }
1771
1772 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1773 if (!hmac_buf) {
1774 ret = -ENOMEM;
1775 goto e_data;
1776 }
1777 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1778
1779 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1780 switch (sha->type) {
1781 case CCP_SHA_TYPE_1:
1782 case CCP_SHA_TYPE_224:
1783 case CCP_SHA_TYPE_256:
1784 memcpy(hmac_buf + block_size,
1785 ctx.address + ooffset,
1786 digest_size);
1787 break;
1788 case CCP_SHA_TYPE_384:
1789 case CCP_SHA_TYPE_512:
1790 memcpy(hmac_buf + block_size,
1791 ctx.address + LSB_ITEM_SIZE + ooffset,
1792 LSB_ITEM_SIZE);
1793 memcpy(hmac_buf + block_size +
1794 (LSB_ITEM_SIZE - ooffset),
1795 ctx.address,
1796 LSB_ITEM_SIZE);
1797 break;
1798 default:
1799 kfree(hmac_buf);
1800 ret = -EINVAL;
1801 goto e_data;
1802 }
1803
1804 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1805 hmac_cmd.engine = CCP_ENGINE_SHA;
1806 hmac_cmd.u.sha.type = sha->type;
1807 hmac_cmd.u.sha.ctx = sha->ctx;
1808 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1809 hmac_cmd.u.sha.src = &sg;
1810 hmac_cmd.u.sha.src_len = block_size + digest_size;
1811 hmac_cmd.u.sha.opad = NULL;
1812 hmac_cmd.u.sha.opad_len = 0;
1813 hmac_cmd.u.sha.first = 1;
1814 hmac_cmd.u.sha.final = 1;
1815 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1816
1817 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1818 if (ret)
1819 cmd->engine_error = hmac_cmd.engine_error;
1820
1821 kfree(hmac_buf);
1822 }
1823
1824e_data:
1825 if (sha->src)
1826 ccp_free_data(&src, cmd_q);
1827
1828e_ctx:
1829 ccp_dm_free(&ctx);
1830
1831 return ret;
1832}
1833
1834static noinline_for_stack int
1835ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1836{
1837 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1838 struct ccp_dm_workarea exp, src, dst;
1839 struct ccp_op op;
1840 unsigned int sb_count, i_len, o_len;
1841 int ret;
1842
1843 /* Check against the maximum allowable size, in bits */
1844 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
1845 return -EINVAL;
1846
1847 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1848 return -EINVAL;
1849
1850 memset(&op, 0, sizeof(op));
1851 op.cmd_q = cmd_q;
1852 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1853
1854 /* The RSA modulus must precede the message being acted upon, so
1855 * it must be copied to a DMA area where the message and the
1856 * modulus can be concatenated. Therefore the input buffer
1857 * length required is twice the output buffer length (which
1858 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1859 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1860 * required.
1861 */
1862 o_len = 32 * ((rsa->key_size + 255) / 256);
1863 i_len = o_len * 2;
1864
1865 sb_count = 0;
1866 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1867 /* sb_count is the number of storage block slots required
1868 * for the modulus.
1869 */
1870 sb_count = o_len / CCP_SB_BYTES;
1871 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1872 sb_count);
1873 if (!op.sb_key)
1874 return -EIO;
1875 } else {
1876 /* A version 5 device allows a modulus size that will not fit
1877 * in the LSB, so the command will transfer it from memory.
1878 * Set the sb key to the default, even though it's not used.
1879 */
1880 op.sb_key = cmd_q->sb_key;
1881 }
1882
1883 /* The RSA exponent must be in little endian format. Reverse its
1884 * byte order.
1885 */
1886 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1887 if (ret)
1888 goto e_sb;
1889
1890 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1891 if (ret)
1892 goto e_exp;
1893
1894 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1895 /* Copy the exponent to the local storage block, using
1896 * as many 32-byte blocks as were allocated above. It's
1897 * already little endian, so no further change is required.
1898 */
1899 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1900 CCP_PASSTHRU_BYTESWAP_NOOP);
1901 if (ret) {
1902 cmd->engine_error = cmd_q->cmd_error;
1903 goto e_exp;
1904 }
1905 } else {
1906 /* The exponent can be retrieved from memory via DMA. */
1907 op.exp.u.dma.address = exp.dma.address;
1908 op.exp.u.dma.offset = 0;
1909 }
1910
1911 /* Concatenate the modulus and the message. Both the modulus and
1912 * the operands must be in little endian format. Since the input
1913 * is in big endian format it must be converted.
1914 */
1915 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1916 if (ret)
1917 goto e_exp;
1918
1919 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1920 if (ret)
1921 goto e_src;
1922 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1923 if (ret)
1924 goto e_src;
1925
1926 /* Prepare the output area for the operation */
1927 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
1928 if (ret)
1929 goto e_src;
1930
1931 op.soc = 1;
1932 op.src.u.dma.address = src.dma.address;
1933 op.src.u.dma.offset = 0;
1934 op.src.u.dma.length = i_len;
1935 op.dst.u.dma.address = dst.dma.address;
1936 op.dst.u.dma.offset = 0;
1937 op.dst.u.dma.length = o_len;
1938
1939 op.u.rsa.mod_size = rsa->key_size;
1940 op.u.rsa.input_len = i_len;
1941
1942 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1943 if (ret) {
1944 cmd->engine_error = cmd_q->cmd_error;
1945 goto e_dst;
1946 }
1947
1948 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
1949
1950e_dst:
1951 ccp_dm_free(&dst);
1952
1953e_src:
1954 ccp_dm_free(&src);
1955
1956e_exp:
1957 ccp_dm_free(&exp);
1958
1959e_sb:
1960 if (sb_count)
1961 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1962
1963 return ret;
1964}
1965
1966static noinline_for_stack int
1967ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1968{
1969 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1970 struct ccp_dm_workarea mask;
1971 struct ccp_data src, dst;
1972 struct ccp_op op;
1973 bool in_place = false;
1974 unsigned int i;
1975 int ret = 0;
1976
1977 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1978 return -EINVAL;
1979
1980 if (!pt->src || !pt->dst)
1981 return -EINVAL;
1982
1983 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1984 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1985 return -EINVAL;
1986 if (!pt->mask)
1987 return -EINVAL;
1988 }
1989
1990 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1991
1992 memset(&op, 0, sizeof(op));
1993 op.cmd_q = cmd_q;
1994 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1995
1996 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1997 /* Load the mask */
1998 op.sb_key = cmd_q->sb_key;
1999
2000 ret = ccp_init_dm_workarea(&mask, cmd_q,
2001 CCP_PASSTHRU_SB_COUNT *
2002 CCP_SB_BYTES,
2003 DMA_TO_DEVICE);
2004 if (ret)
2005 return ret;
2006
2007 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
2008 if (ret)
2009 goto e_mask;
2010 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2011 CCP_PASSTHRU_BYTESWAP_NOOP);
2012 if (ret) {
2013 cmd->engine_error = cmd_q->cmd_error;
2014 goto e_mask;
2015 }
2016 }
2017
2018 /* Prepare the input and output data workareas. For in-place
2019 * operations we need to set the dma direction to BIDIRECTIONAL
2020 * and copy the src workarea to the dst workarea.
2021 */
2022 if (sg_virt(pt->src) == sg_virt(pt->dst))
2023 in_place = true;
2024
2025 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
2026 CCP_PASSTHRU_MASKSIZE,
2027 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2028 if (ret)
2029 goto e_mask;
2030
2031 if (in_place) {
2032 dst = src;
2033 } else {
2034 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2035 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2036 if (ret)
2037 goto e_src;
2038 }
2039
2040 /* Send data to the CCP Passthru engine
2041 * Because the CCP engine works on a single source and destination
2042 * dma address at a time, each entry in the source scatterlist
2043 * (after the dma_map_sg call) must be less than or equal to the
2044 * (remaining) length in the destination scatterlist entry and the
2045 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2046 */
2047 dst.sg_wa.sg_used = 0;
2048 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2049 if (!dst.sg_wa.sg ||
2050 (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
2051 ret = -EINVAL;
2052 goto e_dst;
2053 }
2054
2055 if (i == src.sg_wa.dma_count) {
2056 op.eom = 1;
2057 op.soc = 1;
2058 }
2059
2060 op.src.type = CCP_MEMTYPE_SYSTEM;
2061 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2062 op.src.u.dma.offset = 0;
2063 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2064
2065 op.dst.type = CCP_MEMTYPE_SYSTEM;
2066 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
2067 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2068 op.dst.u.dma.length = op.src.u.dma.length;
2069
2070 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2071 if (ret) {
2072 cmd->engine_error = cmd_q->cmd_error;
2073 goto e_dst;
2074 }
2075
2076 dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
2077 if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
2078 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2079 dst.sg_wa.sg_used = 0;
2080 }
2081 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2082 }
2083
2084e_dst:
2085 if (!in_place)
2086 ccp_free_data(&dst, cmd_q);
2087
2088e_src:
2089 ccp_free_data(&src, cmd_q);
2090
2091e_mask:
2092 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2093 ccp_dm_free(&mask);
2094
2095 return ret;
2096}
2097
2098static noinline_for_stack int
2099ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2100 struct ccp_cmd *cmd)
2101{
2102 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2103 struct ccp_dm_workarea mask;
2104 struct ccp_op op;
2105 int ret;
2106
2107 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2108 return -EINVAL;
2109
2110 if (!pt->src_dma || !pt->dst_dma)
2111 return -EINVAL;
2112
2113 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2114 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2115 return -EINVAL;
2116 if (!pt->mask)
2117 return -EINVAL;
2118 }
2119
2120 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
2121
2122 memset(&op, 0, sizeof(op));
2123 op.cmd_q = cmd_q;
2124 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2125
2126 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2127 /* Load the mask */
2128 op.sb_key = cmd_q->sb_key;
2129
2130 mask.length = pt->mask_len;
2131 mask.dma.address = pt->mask;
2132 mask.dma.length = pt->mask_len;
2133
2134 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2135 CCP_PASSTHRU_BYTESWAP_NOOP);
2136 if (ret) {
2137 cmd->engine_error = cmd_q->cmd_error;
2138 return ret;
2139 }
2140 }
2141
2142 /* Send data to the CCP Passthru engine */
2143 op.eom = 1;
2144 op.soc = 1;
2145
2146 op.src.type = CCP_MEMTYPE_SYSTEM;
2147 op.src.u.dma.address = pt->src_dma;
2148 op.src.u.dma.offset = 0;
2149 op.src.u.dma.length = pt->src_len;
2150
2151 op.dst.type = CCP_MEMTYPE_SYSTEM;
2152 op.dst.u.dma.address = pt->dst_dma;
2153 op.dst.u.dma.offset = 0;
2154 op.dst.u.dma.length = pt->src_len;
2155
2156 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2157 if (ret)
2158 cmd->engine_error = cmd_q->cmd_error;
2159
2160 return ret;
2161}
2162
2163static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2164{
2165 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2166 struct ccp_dm_workarea src, dst;
2167 struct ccp_op op;
2168 int ret;
2169 u8 *save;
2170
2171 if (!ecc->u.mm.operand_1 ||
2172 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2173 return -EINVAL;
2174
2175 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2176 if (!ecc->u.mm.operand_2 ||
2177 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2178 return -EINVAL;
2179
2180 if (!ecc->u.mm.result ||
2181 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2182 return -EINVAL;
2183
2184 memset(&op, 0, sizeof(op));
2185 op.cmd_q = cmd_q;
2186 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2187
2188 /* Concatenate the modulus and the operands. Both the modulus and
2189 * the operands must be in little endian format. Since the input
2190 * is in big endian format it must be converted and placed in a
2191 * fixed length buffer.
2192 */
2193 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2194 DMA_TO_DEVICE);
2195 if (ret)
2196 return ret;
2197
2198 /* Save the workarea address since it is updated in order to perform
2199 * the concatenation
2200 */
2201 save = src.address;
2202
2203 /* Copy the ECC modulus */
2204 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2205 if (ret)
2206 goto e_src;
2207 src.address += CCP_ECC_OPERAND_SIZE;
2208
2209 /* Copy the first operand */
2210 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2211 ecc->u.mm.operand_1_len);
2212 if (ret)
2213 goto e_src;
2214 src.address += CCP_ECC_OPERAND_SIZE;
2215
2216 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2217 /* Copy the second operand */
2218 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2219 ecc->u.mm.operand_2_len);
2220 if (ret)
2221 goto e_src;
2222 src.address += CCP_ECC_OPERAND_SIZE;
2223 }
2224
2225 /* Restore the workarea address */
2226 src.address = save;
2227
2228 /* Prepare the output area for the operation */
2229 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2230 DMA_FROM_DEVICE);
2231 if (ret)
2232 goto e_src;
2233
2234 op.soc = 1;
2235 op.src.u.dma.address = src.dma.address;
2236 op.src.u.dma.offset = 0;
2237 op.src.u.dma.length = src.length;
2238 op.dst.u.dma.address = dst.dma.address;
2239 op.dst.u.dma.offset = 0;
2240 op.dst.u.dma.length = dst.length;
2241
2242 op.u.ecc.function = cmd->u.ecc.function;
2243
2244 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2245 if (ret) {
2246 cmd->engine_error = cmd_q->cmd_error;
2247 goto e_dst;
2248 }
2249
2250 ecc->ecc_result = le16_to_cpup(
2251 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2252 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2253 ret = -EIO;
2254 goto e_dst;
2255 }
2256
2257 /* Save the ECC result */
2258 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2259 CCP_ECC_MODULUS_BYTES);
2260
2261e_dst:
2262 ccp_dm_free(&dst);
2263
2264e_src:
2265 ccp_dm_free(&src);
2266
2267 return ret;
2268}
2269
2270static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2271{
2272 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2273 struct ccp_dm_workarea src, dst;
2274 struct ccp_op op;
2275 int ret;
2276 u8 *save;
2277
2278 if (!ecc->u.pm.point_1.x ||
2279 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2280 !ecc->u.pm.point_1.y ||
2281 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2282 return -EINVAL;
2283
2284 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2285 if (!ecc->u.pm.point_2.x ||
2286 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2287 !ecc->u.pm.point_2.y ||
2288 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2289 return -EINVAL;
2290 } else {
2291 if (!ecc->u.pm.domain_a ||
2292 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2293 return -EINVAL;
2294
2295 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2296 if (!ecc->u.pm.scalar ||
2297 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2298 return -EINVAL;
2299 }
2300
2301 if (!ecc->u.pm.result.x ||
2302 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2303 !ecc->u.pm.result.y ||
2304 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2305 return -EINVAL;
2306
2307 memset(&op, 0, sizeof(op));
2308 op.cmd_q = cmd_q;
2309 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2310
2311 /* Concatenate the modulus and the operands. Both the modulus and
2312 * the operands must be in little endian format. Since the input
2313 * is in big endian format it must be converted and placed in a
2314 * fixed length buffer.
2315 */
2316 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2317 DMA_TO_DEVICE);
2318 if (ret)
2319 return ret;
2320
2321 /* Save the workarea address since it is updated in order to perform
2322 * the concatenation
2323 */
2324 save = src.address;
2325
2326 /* Copy the ECC modulus */
2327 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2328 if (ret)
2329 goto e_src;
2330 src.address += CCP_ECC_OPERAND_SIZE;
2331
2332 /* Copy the first point X and Y coordinate */
2333 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2334 ecc->u.pm.point_1.x_len);
2335 if (ret)
2336 goto e_src;
2337 src.address += CCP_ECC_OPERAND_SIZE;
2338 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2339 ecc->u.pm.point_1.y_len);
2340 if (ret)
2341 goto e_src;
2342 src.address += CCP_ECC_OPERAND_SIZE;
2343
2344 /* Set the first point Z coordinate to 1 */
2345 *src.address = 0x01;
2346 src.address += CCP_ECC_OPERAND_SIZE;
2347
2348 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2349 /* Copy the second point X and Y coordinate */
2350 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2351 ecc->u.pm.point_2.x_len);
2352 if (ret)
2353 goto e_src;
2354 src.address += CCP_ECC_OPERAND_SIZE;
2355 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2356 ecc->u.pm.point_2.y_len);
2357 if (ret)
2358 goto e_src;
2359 src.address += CCP_ECC_OPERAND_SIZE;
2360
2361 /* Set the second point Z coordinate to 1 */
2362 *src.address = 0x01;
2363 src.address += CCP_ECC_OPERAND_SIZE;
2364 } else {
2365 /* Copy the Domain "a" parameter */
2366 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2367 ecc->u.pm.domain_a_len);
2368 if (ret)
2369 goto e_src;
2370 src.address += CCP_ECC_OPERAND_SIZE;
2371
2372 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2373 /* Copy the scalar value */
2374 ret = ccp_reverse_set_dm_area(&src, 0,
2375 ecc->u.pm.scalar, 0,
2376 ecc->u.pm.scalar_len);
2377 if (ret)
2378 goto e_src;
2379 src.address += CCP_ECC_OPERAND_SIZE;
2380 }
2381 }
2382
2383 /* Restore the workarea address */
2384 src.address = save;
2385
2386 /* Prepare the output area for the operation */
2387 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2388 DMA_FROM_DEVICE);
2389 if (ret)
2390 goto e_src;
2391
2392 op.soc = 1;
2393 op.src.u.dma.address = src.dma.address;
2394 op.src.u.dma.offset = 0;
2395 op.src.u.dma.length = src.length;
2396 op.dst.u.dma.address = dst.dma.address;
2397 op.dst.u.dma.offset = 0;
2398 op.dst.u.dma.length = dst.length;
2399
2400 op.u.ecc.function = cmd->u.ecc.function;
2401
2402 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2403 if (ret) {
2404 cmd->engine_error = cmd_q->cmd_error;
2405 goto e_dst;
2406 }
2407
2408 ecc->ecc_result = le16_to_cpup(
2409 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2410 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2411 ret = -EIO;
2412 goto e_dst;
2413 }
2414
2415 /* Save the workarea address since it is updated as we walk through
2416 * to copy the point math result
2417 */
2418 save = dst.address;
2419
2420 /* Save the ECC result X and Y coordinates */
2421 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2422 CCP_ECC_MODULUS_BYTES);
2423 dst.address += CCP_ECC_OUTPUT_SIZE;
2424 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2425 CCP_ECC_MODULUS_BYTES);
2426
2427 /* Restore the workarea address */
2428 dst.address = save;
2429
2430e_dst:
2431 ccp_dm_free(&dst);
2432
2433e_src:
2434 ccp_dm_free(&src);
2435
2436 return ret;
2437}
2438
2439static noinline_for_stack int
2440ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2441{
2442 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2443
2444 ecc->ecc_result = 0;
2445
2446 if (!ecc->mod ||
2447 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2448 return -EINVAL;
2449
2450 switch (ecc->function) {
2451 case CCP_ECC_FUNCTION_MMUL_384BIT:
2452 case CCP_ECC_FUNCTION_MADD_384BIT:
2453 case CCP_ECC_FUNCTION_MINV_384BIT:
2454 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2455
2456 case CCP_ECC_FUNCTION_PADD_384BIT:
2457 case CCP_ECC_FUNCTION_PMUL_384BIT:
2458 case CCP_ECC_FUNCTION_PDBL_384BIT:
2459 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2460
2461 default:
2462 return -EINVAL;
2463 }
2464}
2465
2466int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2467{
2468 int ret;
2469
2470 cmd->engine_error = 0;
2471 cmd_q->cmd_error = 0;
2472 cmd_q->int_rcvd = 0;
2473 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2474
2475 switch (cmd->engine) {
2476 case CCP_ENGINE_AES:
2477 switch (cmd->u.aes.mode) {
2478 case CCP_AES_MODE_CMAC:
2479 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2480 break;
2481 case CCP_AES_MODE_GCM:
2482 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2483 break;
2484 default:
2485 ret = ccp_run_aes_cmd(cmd_q, cmd);
2486 break;
2487 }
2488 break;
2489 case CCP_ENGINE_XTS_AES_128:
2490 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2491 break;
2492 case CCP_ENGINE_DES3:
2493 ret = ccp_run_des3_cmd(cmd_q, cmd);
2494 break;
2495 case CCP_ENGINE_SHA:
2496 ret = ccp_run_sha_cmd(cmd_q, cmd);
2497 break;
2498 case CCP_ENGINE_RSA:
2499 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2500 break;
2501 case CCP_ENGINE_PASSTHRU:
2502 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2503 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2504 else
2505 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2506 break;
2507 case CCP_ENGINE_ECC:
2508 ret = ccp_run_ecc_cmd(cmd_q, cmd);
2509 break;
2510 default:
2511 ret = -EINVAL;
2512 }
2513
2514 return ret;
2515}
1/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/interrupt.h>
17#include <crypto/scatterwalk.h>
18#include <linux/ccp.h>
19
20#include "ccp-dev.h"
21
22/* SHA initial context values */
23static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
24 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
25 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
26 cpu_to_be32(SHA1_H4), 0, 0, 0,
27};
28
29static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
30 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
31 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
32 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
33 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
34};
35
36static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
37 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
38 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
39 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
40 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
41};
42
43static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
44{
45 int start;
46
47 for (;;) {
48 mutex_lock(&ccp->ksb_mutex);
49
50 start = (u32)bitmap_find_next_zero_area(ccp->ksb,
51 ccp->ksb_count,
52 ccp->ksb_start,
53 count, 0);
54 if (start <= ccp->ksb_count) {
55 bitmap_set(ccp->ksb, start, count);
56
57 mutex_unlock(&ccp->ksb_mutex);
58 break;
59 }
60
61 ccp->ksb_avail = 0;
62
63 mutex_unlock(&ccp->ksb_mutex);
64
65 /* Wait for KSB entries to become available */
66 if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
67 return 0;
68 }
69
70 return KSB_START + start;
71}
72
73static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
74 unsigned int count)
75{
76 if (!start)
77 return;
78
79 mutex_lock(&ccp->ksb_mutex);
80
81 bitmap_clear(ccp->ksb, start - KSB_START, count);
82
83 ccp->ksb_avail = 1;
84
85 mutex_unlock(&ccp->ksb_mutex);
86
87 wake_up_interruptible_all(&ccp->ksb_queue);
88}
89
90static u32 ccp_gen_jobid(struct ccp_device *ccp)
91{
92 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
93}
94
95static void ccp_sg_free(struct ccp_sg_workarea *wa)
96{
97 if (wa->dma_count)
98 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
99
100 wa->dma_count = 0;
101}
102
103static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
104 struct scatterlist *sg, u64 len,
105 enum dma_data_direction dma_dir)
106{
107 memset(wa, 0, sizeof(*wa));
108
109 wa->sg = sg;
110 if (!sg)
111 return 0;
112
113 wa->nents = sg_nents_for_len(sg, len);
114 if (wa->nents < 0)
115 return wa->nents;
116
117 wa->bytes_left = len;
118 wa->sg_used = 0;
119
120 if (len == 0)
121 return 0;
122
123 if (dma_dir == DMA_NONE)
124 return 0;
125
126 wa->dma_sg = sg;
127 wa->dma_dev = dev;
128 wa->dma_dir = dma_dir;
129 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
130 if (!wa->dma_count)
131 return -ENOMEM;
132
133 return 0;
134}
135
136static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
137{
138 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
139
140 if (!wa->sg)
141 return;
142
143 wa->sg_used += nbytes;
144 wa->bytes_left -= nbytes;
145 if (wa->sg_used == wa->sg->length) {
146 wa->sg = sg_next(wa->sg);
147 wa->sg_used = 0;
148 }
149}
150
151static void ccp_dm_free(struct ccp_dm_workarea *wa)
152{
153 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
154 if (wa->address)
155 dma_pool_free(wa->dma_pool, wa->address,
156 wa->dma.address);
157 } else {
158 if (wa->dma.address)
159 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
160 wa->dma.dir);
161 kfree(wa->address);
162 }
163
164 wa->address = NULL;
165 wa->dma.address = 0;
166}
167
168static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
169 struct ccp_cmd_queue *cmd_q,
170 unsigned int len,
171 enum dma_data_direction dir)
172{
173 memset(wa, 0, sizeof(*wa));
174
175 if (!len)
176 return 0;
177
178 wa->dev = cmd_q->ccp->dev;
179 wa->length = len;
180
181 if (len <= CCP_DMAPOOL_MAX_SIZE) {
182 wa->dma_pool = cmd_q->dma_pool;
183
184 wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
185 &wa->dma.address);
186 if (!wa->address)
187 return -ENOMEM;
188
189 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
190
191 memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
192 } else {
193 wa->address = kzalloc(len, GFP_KERNEL);
194 if (!wa->address)
195 return -ENOMEM;
196
197 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
198 dir);
199 if (!wa->dma.address)
200 return -ENOMEM;
201
202 wa->dma.length = len;
203 }
204 wa->dma.dir = dir;
205
206 return 0;
207}
208
209static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
210 struct scatterlist *sg, unsigned int sg_offset,
211 unsigned int len)
212{
213 WARN_ON(!wa->address);
214
215 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
216 0);
217}
218
219static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
220 struct scatterlist *sg, unsigned int sg_offset,
221 unsigned int len)
222{
223 WARN_ON(!wa->address);
224
225 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
226 1);
227}
228
229static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
230 struct scatterlist *sg,
231 unsigned int len, unsigned int se_len,
232 bool sign_extend)
233{
234 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
235 u8 buffer[CCP_REVERSE_BUF_SIZE];
236
237 if (WARN_ON(se_len > sizeof(buffer)))
238 return -EINVAL;
239
240 sg_offset = len;
241 dm_offset = 0;
242 nbytes = len;
243 while (nbytes) {
244 ksb_len = min_t(unsigned int, nbytes, se_len);
245 sg_offset -= ksb_len;
246
247 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
248 for (i = 0; i < ksb_len; i++)
249 wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
250
251 dm_offset += ksb_len;
252 nbytes -= ksb_len;
253
254 if ((ksb_len != se_len) && sign_extend) {
255 /* Must sign-extend to nearest sign-extend length */
256 if (wa->address[dm_offset - 1] & 0x80)
257 memset(wa->address + dm_offset, 0xff,
258 se_len - ksb_len);
259 }
260 }
261
262 return 0;
263}
264
265static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
266 struct scatterlist *sg,
267 unsigned int len)
268{
269 unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
270 u8 buffer[CCP_REVERSE_BUF_SIZE];
271
272 sg_offset = 0;
273 dm_offset = len;
274 nbytes = len;
275 while (nbytes) {
276 ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
277 dm_offset -= ksb_len;
278
279 for (i = 0; i < ksb_len; i++)
280 buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
281 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
282
283 sg_offset += ksb_len;
284 nbytes -= ksb_len;
285 }
286}
287
288static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
289{
290 ccp_dm_free(&data->dm_wa);
291 ccp_sg_free(&data->sg_wa);
292}
293
294static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
295 struct scatterlist *sg, u64 sg_len,
296 unsigned int dm_len,
297 enum dma_data_direction dir)
298{
299 int ret;
300
301 memset(data, 0, sizeof(*data));
302
303 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
304 dir);
305 if (ret)
306 goto e_err;
307
308 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
309 if (ret)
310 goto e_err;
311
312 return 0;
313
314e_err:
315 ccp_free_data(data, cmd_q);
316
317 return ret;
318}
319
320static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
321{
322 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
323 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
324 unsigned int buf_count, nbytes;
325
326 /* Clear the buffer if setting it */
327 if (!from)
328 memset(dm_wa->address, 0, dm_wa->length);
329
330 if (!sg_wa->sg)
331 return 0;
332
333 /* Perform the copy operation
334 * nbytes will always be <= UINT_MAX because dm_wa->length is
335 * an unsigned int
336 */
337 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
338 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
339 nbytes, from);
340
341 /* Update the structures and generate the count */
342 buf_count = 0;
343 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
344 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
345 dm_wa->length - buf_count);
346 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
347
348 buf_count += nbytes;
349 ccp_update_sg_workarea(sg_wa, nbytes);
350 }
351
352 return buf_count;
353}
354
355static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
356{
357 return ccp_queue_buf(data, 0);
358}
359
360static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
361{
362 return ccp_queue_buf(data, 1);
363}
364
365static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
366 struct ccp_op *op, unsigned int block_size,
367 bool blocksize_op)
368{
369 unsigned int sg_src_len, sg_dst_len, op_len;
370
371 /* The CCP can only DMA from/to one address each per operation. This
372 * requires that we find the smallest DMA area between the source
373 * and destination. The resulting len values will always be <= UINT_MAX
374 * because the dma length is an unsigned int.
375 */
376 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
377 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
378
379 if (dst) {
380 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
381 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
382 op_len = min(sg_src_len, sg_dst_len);
383 } else {
384 op_len = sg_src_len;
385 }
386
387 /* The data operation length will be at least block_size in length
388 * or the smaller of available sg room remaining for the source or
389 * the destination
390 */
391 op_len = max(op_len, block_size);
392
393 /* Unless we have to buffer data, there's no reason to wait */
394 op->soc = 0;
395
396 if (sg_src_len < block_size) {
397 /* Not enough data in the sg element, so it
398 * needs to be buffered into a blocksize chunk
399 */
400 int cp_len = ccp_fill_queue_buf(src);
401
402 op->soc = 1;
403 op->src.u.dma.address = src->dm_wa.dma.address;
404 op->src.u.dma.offset = 0;
405 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
406 } else {
407 /* Enough data in the sg element, but we need to
408 * adjust for any previously copied data
409 */
410 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
411 op->src.u.dma.offset = src->sg_wa.sg_used;
412 op->src.u.dma.length = op_len & ~(block_size - 1);
413
414 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
415 }
416
417 if (dst) {
418 if (sg_dst_len < block_size) {
419 /* Not enough room in the sg element or we're on the
420 * last piece of data (when using padding), so the
421 * output needs to be buffered into a blocksize chunk
422 */
423 op->soc = 1;
424 op->dst.u.dma.address = dst->dm_wa.dma.address;
425 op->dst.u.dma.offset = 0;
426 op->dst.u.dma.length = op->src.u.dma.length;
427 } else {
428 /* Enough room in the sg element, but we need to
429 * adjust for any previously used area
430 */
431 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
432 op->dst.u.dma.offset = dst->sg_wa.sg_used;
433 op->dst.u.dma.length = op->src.u.dma.length;
434 }
435 }
436}
437
438static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
439 struct ccp_op *op)
440{
441 op->init = 0;
442
443 if (dst) {
444 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
445 ccp_empty_queue_buf(dst);
446 else
447 ccp_update_sg_workarea(&dst->sg_wa,
448 op->dst.u.dma.length);
449 }
450}
451
452static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
453 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
454 u32 byte_swap, bool from)
455{
456 struct ccp_op op;
457
458 memset(&op, 0, sizeof(op));
459
460 op.cmd_q = cmd_q;
461 op.jobid = jobid;
462 op.eom = 1;
463
464 if (from) {
465 op.soc = 1;
466 op.src.type = CCP_MEMTYPE_KSB;
467 op.src.u.ksb = ksb;
468 op.dst.type = CCP_MEMTYPE_SYSTEM;
469 op.dst.u.dma.address = wa->dma.address;
470 op.dst.u.dma.length = wa->length;
471 } else {
472 op.src.type = CCP_MEMTYPE_SYSTEM;
473 op.src.u.dma.address = wa->dma.address;
474 op.src.u.dma.length = wa->length;
475 op.dst.type = CCP_MEMTYPE_KSB;
476 op.dst.u.ksb = ksb;
477 }
478
479 op.u.passthru.byte_swap = byte_swap;
480
481 return cmd_q->ccp->vdata->perform->perform_passthru(&op);
482}
483
484static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
485 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
486 u32 byte_swap)
487{
488 return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
489}
490
491static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
492 struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
493 u32 byte_swap)
494{
495 return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
496}
497
498static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
499 struct ccp_cmd *cmd)
500{
501 struct ccp_aes_engine *aes = &cmd->u.aes;
502 struct ccp_dm_workarea key, ctx;
503 struct ccp_data src;
504 struct ccp_op op;
505 unsigned int dm_offset;
506 int ret;
507
508 if (!((aes->key_len == AES_KEYSIZE_128) ||
509 (aes->key_len == AES_KEYSIZE_192) ||
510 (aes->key_len == AES_KEYSIZE_256)))
511 return -EINVAL;
512
513 if (aes->src_len & (AES_BLOCK_SIZE - 1))
514 return -EINVAL;
515
516 if (aes->iv_len != AES_BLOCK_SIZE)
517 return -EINVAL;
518
519 if (!aes->key || !aes->iv || !aes->src)
520 return -EINVAL;
521
522 if (aes->cmac_final) {
523 if (aes->cmac_key_len != AES_BLOCK_SIZE)
524 return -EINVAL;
525
526 if (!aes->cmac_key)
527 return -EINVAL;
528 }
529
530 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
531 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
532
533 ret = -EIO;
534 memset(&op, 0, sizeof(op));
535 op.cmd_q = cmd_q;
536 op.jobid = ccp_gen_jobid(cmd_q->ccp);
537 op.ksb_key = cmd_q->ksb_key;
538 op.ksb_ctx = cmd_q->ksb_ctx;
539 op.init = 1;
540 op.u.aes.type = aes->type;
541 op.u.aes.mode = aes->mode;
542 op.u.aes.action = aes->action;
543
544 /* All supported key sizes fit in a single (32-byte) KSB entry
545 * and must be in little endian format. Use the 256-bit byte
546 * swap passthru option to convert from big endian to little
547 * endian.
548 */
549 ret = ccp_init_dm_workarea(&key, cmd_q,
550 CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
551 DMA_TO_DEVICE);
552 if (ret)
553 return ret;
554
555 dm_offset = CCP_KSB_BYTES - aes->key_len;
556 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
557 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
558 CCP_PASSTHRU_BYTESWAP_256BIT);
559 if (ret) {
560 cmd->engine_error = cmd_q->cmd_error;
561 goto e_key;
562 }
563
564 /* The AES context fits in a single (32-byte) KSB entry and
565 * must be in little endian format. Use the 256-bit byte swap
566 * passthru option to convert from big endian to little endian.
567 */
568 ret = ccp_init_dm_workarea(&ctx, cmd_q,
569 CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
570 DMA_BIDIRECTIONAL);
571 if (ret)
572 goto e_key;
573
574 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
575 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
576 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
577 CCP_PASSTHRU_BYTESWAP_256BIT);
578 if (ret) {
579 cmd->engine_error = cmd_q->cmd_error;
580 goto e_ctx;
581 }
582
583 /* Send data to the CCP AES engine */
584 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
585 AES_BLOCK_SIZE, DMA_TO_DEVICE);
586 if (ret)
587 goto e_ctx;
588
589 while (src.sg_wa.bytes_left) {
590 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
591 if (aes->cmac_final && !src.sg_wa.bytes_left) {
592 op.eom = 1;
593
594 /* Push the K1/K2 key to the CCP now */
595 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
596 op.ksb_ctx,
597 CCP_PASSTHRU_BYTESWAP_256BIT);
598 if (ret) {
599 cmd->engine_error = cmd_q->cmd_error;
600 goto e_src;
601 }
602
603 ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
604 aes->cmac_key_len);
605 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
606 CCP_PASSTHRU_BYTESWAP_256BIT);
607 if (ret) {
608 cmd->engine_error = cmd_q->cmd_error;
609 goto e_src;
610 }
611 }
612
613 ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
614 if (ret) {
615 cmd->engine_error = cmd_q->cmd_error;
616 goto e_src;
617 }
618
619 ccp_process_data(&src, NULL, &op);
620 }
621
622 /* Retrieve the AES context - convert from LE to BE using
623 * 32-byte (256-bit) byteswapping
624 */
625 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
626 CCP_PASSTHRU_BYTESWAP_256BIT);
627 if (ret) {
628 cmd->engine_error = cmd_q->cmd_error;
629 goto e_src;
630 }
631
632 /* ...but we only need AES_BLOCK_SIZE bytes */
633 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
634 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
635
636e_src:
637 ccp_free_data(&src, cmd_q);
638
639e_ctx:
640 ccp_dm_free(&ctx);
641
642e_key:
643 ccp_dm_free(&key);
644
645 return ret;
646}
647
648static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
649{
650 struct ccp_aes_engine *aes = &cmd->u.aes;
651 struct ccp_dm_workarea key, ctx;
652 struct ccp_data src, dst;
653 struct ccp_op op;
654 unsigned int dm_offset;
655 bool in_place = false;
656 int ret;
657
658 if (aes->mode == CCP_AES_MODE_CMAC)
659 return ccp_run_aes_cmac_cmd(cmd_q, cmd);
660
661 if (!((aes->key_len == AES_KEYSIZE_128) ||
662 (aes->key_len == AES_KEYSIZE_192) ||
663 (aes->key_len == AES_KEYSIZE_256)))
664 return -EINVAL;
665
666 if (((aes->mode == CCP_AES_MODE_ECB) ||
667 (aes->mode == CCP_AES_MODE_CBC) ||
668 (aes->mode == CCP_AES_MODE_CFB)) &&
669 (aes->src_len & (AES_BLOCK_SIZE - 1)))
670 return -EINVAL;
671
672 if (!aes->key || !aes->src || !aes->dst)
673 return -EINVAL;
674
675 if (aes->mode != CCP_AES_MODE_ECB) {
676 if (aes->iv_len != AES_BLOCK_SIZE)
677 return -EINVAL;
678
679 if (!aes->iv)
680 return -EINVAL;
681 }
682
683 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
684 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
685
686 ret = -EIO;
687 memset(&op, 0, sizeof(op));
688 op.cmd_q = cmd_q;
689 op.jobid = ccp_gen_jobid(cmd_q->ccp);
690 op.ksb_key = cmd_q->ksb_key;
691 op.ksb_ctx = cmd_q->ksb_ctx;
692 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
693 op.u.aes.type = aes->type;
694 op.u.aes.mode = aes->mode;
695 op.u.aes.action = aes->action;
696
697 /* All supported key sizes fit in a single (32-byte) KSB entry
698 * and must be in little endian format. Use the 256-bit byte
699 * swap passthru option to convert from big endian to little
700 * endian.
701 */
702 ret = ccp_init_dm_workarea(&key, cmd_q,
703 CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
704 DMA_TO_DEVICE);
705 if (ret)
706 return ret;
707
708 dm_offset = CCP_KSB_BYTES - aes->key_len;
709 ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
710 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
711 CCP_PASSTHRU_BYTESWAP_256BIT);
712 if (ret) {
713 cmd->engine_error = cmd_q->cmd_error;
714 goto e_key;
715 }
716
717 /* The AES context fits in a single (32-byte) KSB entry and
718 * must be in little endian format. Use the 256-bit byte swap
719 * passthru option to convert from big endian to little endian.
720 */
721 ret = ccp_init_dm_workarea(&ctx, cmd_q,
722 CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
723 DMA_BIDIRECTIONAL);
724 if (ret)
725 goto e_key;
726
727 if (aes->mode != CCP_AES_MODE_ECB) {
728 /* Load the AES context - conver to LE */
729 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
730 ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
731 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
732 CCP_PASSTHRU_BYTESWAP_256BIT);
733 if (ret) {
734 cmd->engine_error = cmd_q->cmd_error;
735 goto e_ctx;
736 }
737 }
738
739 /* Prepare the input and output data workareas. For in-place
740 * operations we need to set the dma direction to BIDIRECTIONAL
741 * and copy the src workarea to the dst workarea.
742 */
743 if (sg_virt(aes->src) == sg_virt(aes->dst))
744 in_place = true;
745
746 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
747 AES_BLOCK_SIZE,
748 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
749 if (ret)
750 goto e_ctx;
751
752 if (in_place) {
753 dst = src;
754 } else {
755 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
756 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
757 if (ret)
758 goto e_src;
759 }
760
761 /* Send data to the CCP AES engine */
762 while (src.sg_wa.bytes_left) {
763 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
764 if (!src.sg_wa.bytes_left) {
765 op.eom = 1;
766
767 /* Since we don't retrieve the AES context in ECB
768 * mode we have to wait for the operation to complete
769 * on the last piece of data
770 */
771 if (aes->mode == CCP_AES_MODE_ECB)
772 op.soc = 1;
773 }
774
775 ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
776 if (ret) {
777 cmd->engine_error = cmd_q->cmd_error;
778 goto e_dst;
779 }
780
781 ccp_process_data(&src, &dst, &op);
782 }
783
784 if (aes->mode != CCP_AES_MODE_ECB) {
785 /* Retrieve the AES context - convert from LE to BE using
786 * 32-byte (256-bit) byteswapping
787 */
788 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
789 CCP_PASSTHRU_BYTESWAP_256BIT);
790 if (ret) {
791 cmd->engine_error = cmd_q->cmd_error;
792 goto e_dst;
793 }
794
795 /* ...but we only need AES_BLOCK_SIZE bytes */
796 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
797 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
798 }
799
800e_dst:
801 if (!in_place)
802 ccp_free_data(&dst, cmd_q);
803
804e_src:
805 ccp_free_data(&src, cmd_q);
806
807e_ctx:
808 ccp_dm_free(&ctx);
809
810e_key:
811 ccp_dm_free(&key);
812
813 return ret;
814}
815
816static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
817 struct ccp_cmd *cmd)
818{
819 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
820 struct ccp_dm_workarea key, ctx;
821 struct ccp_data src, dst;
822 struct ccp_op op;
823 unsigned int unit_size, dm_offset;
824 bool in_place = false;
825 int ret;
826
827 switch (xts->unit_size) {
828 case CCP_XTS_AES_UNIT_SIZE_16:
829 unit_size = 16;
830 break;
831 case CCP_XTS_AES_UNIT_SIZE_512:
832 unit_size = 512;
833 break;
834 case CCP_XTS_AES_UNIT_SIZE_1024:
835 unit_size = 1024;
836 break;
837 case CCP_XTS_AES_UNIT_SIZE_2048:
838 unit_size = 2048;
839 break;
840 case CCP_XTS_AES_UNIT_SIZE_4096:
841 unit_size = 4096;
842 break;
843
844 default:
845 return -EINVAL;
846 }
847
848 if (xts->key_len != AES_KEYSIZE_128)
849 return -EINVAL;
850
851 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
852 return -EINVAL;
853
854 if (xts->iv_len != AES_BLOCK_SIZE)
855 return -EINVAL;
856
857 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
858 return -EINVAL;
859
860 BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
861 BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
862
863 ret = -EIO;
864 memset(&op, 0, sizeof(op));
865 op.cmd_q = cmd_q;
866 op.jobid = ccp_gen_jobid(cmd_q->ccp);
867 op.ksb_key = cmd_q->ksb_key;
868 op.ksb_ctx = cmd_q->ksb_ctx;
869 op.init = 1;
870 op.u.xts.action = xts->action;
871 op.u.xts.unit_size = xts->unit_size;
872
873 /* All supported key sizes fit in a single (32-byte) KSB entry
874 * and must be in little endian format. Use the 256-bit byte
875 * swap passthru option to convert from big endian to little
876 * endian.
877 */
878 ret = ccp_init_dm_workarea(&key, cmd_q,
879 CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
880 DMA_TO_DEVICE);
881 if (ret)
882 return ret;
883
884 dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
885 ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
886 ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
887 ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
888 CCP_PASSTHRU_BYTESWAP_256BIT);
889 if (ret) {
890 cmd->engine_error = cmd_q->cmd_error;
891 goto e_key;
892 }
893
894 /* The AES context fits in a single (32-byte) KSB entry and
895 * for XTS is already in little endian format so no byte swapping
896 * is needed.
897 */
898 ret = ccp_init_dm_workarea(&ctx, cmd_q,
899 CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
900 DMA_BIDIRECTIONAL);
901 if (ret)
902 goto e_key;
903
904 ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
905 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
906 CCP_PASSTHRU_BYTESWAP_NOOP);
907 if (ret) {
908 cmd->engine_error = cmd_q->cmd_error;
909 goto e_ctx;
910 }
911
912 /* Prepare the input and output data workareas. For in-place
913 * operations we need to set the dma direction to BIDIRECTIONAL
914 * and copy the src workarea to the dst workarea.
915 */
916 if (sg_virt(xts->src) == sg_virt(xts->dst))
917 in_place = true;
918
919 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
920 unit_size,
921 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
922 if (ret)
923 goto e_ctx;
924
925 if (in_place) {
926 dst = src;
927 } else {
928 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
929 unit_size, DMA_FROM_DEVICE);
930 if (ret)
931 goto e_src;
932 }
933
934 /* Send data to the CCP AES engine */
935 while (src.sg_wa.bytes_left) {
936 ccp_prepare_data(&src, &dst, &op, unit_size, true);
937 if (!src.sg_wa.bytes_left)
938 op.eom = 1;
939
940 ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
941 if (ret) {
942 cmd->engine_error = cmd_q->cmd_error;
943 goto e_dst;
944 }
945
946 ccp_process_data(&src, &dst, &op);
947 }
948
949 /* Retrieve the AES context - convert from LE to BE using
950 * 32-byte (256-bit) byteswapping
951 */
952 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
953 CCP_PASSTHRU_BYTESWAP_256BIT);
954 if (ret) {
955 cmd->engine_error = cmd_q->cmd_error;
956 goto e_dst;
957 }
958
959 /* ...but we only need AES_BLOCK_SIZE bytes */
960 dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
961 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
962
963e_dst:
964 if (!in_place)
965 ccp_free_data(&dst, cmd_q);
966
967e_src:
968 ccp_free_data(&src, cmd_q);
969
970e_ctx:
971 ccp_dm_free(&ctx);
972
973e_key:
974 ccp_dm_free(&key);
975
976 return ret;
977}
978
979static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
980{
981 struct ccp_sha_engine *sha = &cmd->u.sha;
982 struct ccp_dm_workarea ctx;
983 struct ccp_data src;
984 struct ccp_op op;
985 int ret;
986
987 if (sha->ctx_len != CCP_SHA_CTXSIZE)
988 return -EINVAL;
989
990 if (!sha->ctx)
991 return -EINVAL;
992
993 if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
994 return -EINVAL;
995
996 if (!sha->src_len) {
997 const u8 *sha_zero;
998
999 /* Not final, just return */
1000 if (!sha->final)
1001 return 0;
1002
1003 /* CCP can't do a zero length sha operation so the caller
1004 * must buffer the data.
1005 */
1006 if (sha->msg_bits)
1007 return -EINVAL;
1008
1009 /* The CCP cannot perform zero-length sha operations so the
1010 * caller is required to buffer data for the final operation.
1011 * However, a sha operation for a message with a total length
1012 * of zero is valid so known values are required to supply
1013 * the result.
1014 */
1015 switch (sha->type) {
1016 case CCP_SHA_TYPE_1:
1017 sha_zero = sha1_zero_message_hash;
1018 break;
1019 case CCP_SHA_TYPE_224:
1020 sha_zero = sha224_zero_message_hash;
1021 break;
1022 case CCP_SHA_TYPE_256:
1023 sha_zero = sha256_zero_message_hash;
1024 break;
1025 default:
1026 return -EINVAL;
1027 }
1028
1029 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1030 sha->ctx_len, 1);
1031
1032 return 0;
1033 }
1034
1035 if (!sha->src)
1036 return -EINVAL;
1037
1038 BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
1039
1040 memset(&op, 0, sizeof(op));
1041 op.cmd_q = cmd_q;
1042 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1043 op.ksb_ctx = cmd_q->ksb_ctx;
1044 op.u.sha.type = sha->type;
1045 op.u.sha.msg_bits = sha->msg_bits;
1046
1047 /* The SHA context fits in a single (32-byte) KSB entry and
1048 * must be in little endian format. Use the 256-bit byte swap
1049 * passthru option to convert from big endian to little endian.
1050 */
1051 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1052 CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
1053 DMA_BIDIRECTIONAL);
1054 if (ret)
1055 return ret;
1056
1057 if (sha->first) {
1058 const __be32 *init;
1059
1060 switch (sha->type) {
1061 case CCP_SHA_TYPE_1:
1062 init = ccp_sha1_init;
1063 break;
1064 case CCP_SHA_TYPE_224:
1065 init = ccp_sha224_init;
1066 break;
1067 case CCP_SHA_TYPE_256:
1068 init = ccp_sha256_init;
1069 break;
1070 default:
1071 ret = -EINVAL;
1072 goto e_ctx;
1073 }
1074 memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
1075 } else {
1076 ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
1077 }
1078
1079 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
1080 CCP_PASSTHRU_BYTESWAP_256BIT);
1081 if (ret) {
1082 cmd->engine_error = cmd_q->cmd_error;
1083 goto e_ctx;
1084 }
1085
1086 /* Send data to the CCP SHA engine */
1087 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1088 CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
1089 if (ret)
1090 goto e_ctx;
1091
1092 while (src.sg_wa.bytes_left) {
1093 ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
1094 if (sha->final && !src.sg_wa.bytes_left)
1095 op.eom = 1;
1096
1097 ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
1098 if (ret) {
1099 cmd->engine_error = cmd_q->cmd_error;
1100 goto e_data;
1101 }
1102
1103 ccp_process_data(&src, NULL, &op);
1104 }
1105
1106 /* Retrieve the SHA context - convert from LE to BE using
1107 * 32-byte (256-bit) byteswapping to BE
1108 */
1109 ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
1110 CCP_PASSTHRU_BYTESWAP_256BIT);
1111 if (ret) {
1112 cmd->engine_error = cmd_q->cmd_error;
1113 goto e_data;
1114 }
1115
1116 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
1117
1118 if (sha->final && sha->opad) {
1119 /* HMAC operation, recursively perform final SHA */
1120 struct ccp_cmd hmac_cmd;
1121 struct scatterlist sg;
1122 u64 block_size, digest_size;
1123 u8 *hmac_buf;
1124
1125 switch (sha->type) {
1126 case CCP_SHA_TYPE_1:
1127 block_size = SHA1_BLOCK_SIZE;
1128 digest_size = SHA1_DIGEST_SIZE;
1129 break;
1130 case CCP_SHA_TYPE_224:
1131 block_size = SHA224_BLOCK_SIZE;
1132 digest_size = SHA224_DIGEST_SIZE;
1133 break;
1134 case CCP_SHA_TYPE_256:
1135 block_size = SHA256_BLOCK_SIZE;
1136 digest_size = SHA256_DIGEST_SIZE;
1137 break;
1138 default:
1139 ret = -EINVAL;
1140 goto e_data;
1141 }
1142
1143 if (sha->opad_len != block_size) {
1144 ret = -EINVAL;
1145 goto e_data;
1146 }
1147
1148 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1149 if (!hmac_buf) {
1150 ret = -ENOMEM;
1151 goto e_data;
1152 }
1153 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1154
1155 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1156 memcpy(hmac_buf + block_size, ctx.address, digest_size);
1157
1158 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1159 hmac_cmd.engine = CCP_ENGINE_SHA;
1160 hmac_cmd.u.sha.type = sha->type;
1161 hmac_cmd.u.sha.ctx = sha->ctx;
1162 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1163 hmac_cmd.u.sha.src = &sg;
1164 hmac_cmd.u.sha.src_len = block_size + digest_size;
1165 hmac_cmd.u.sha.opad = NULL;
1166 hmac_cmd.u.sha.opad_len = 0;
1167 hmac_cmd.u.sha.first = 1;
1168 hmac_cmd.u.sha.final = 1;
1169 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1170
1171 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1172 if (ret)
1173 cmd->engine_error = hmac_cmd.engine_error;
1174
1175 kfree(hmac_buf);
1176 }
1177
1178e_data:
1179 ccp_free_data(&src, cmd_q);
1180
1181e_ctx:
1182 ccp_dm_free(&ctx);
1183
1184 return ret;
1185}
1186
1187static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1188{
1189 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1190 struct ccp_dm_workarea exp, src;
1191 struct ccp_data dst;
1192 struct ccp_op op;
1193 unsigned int ksb_count, i_len, o_len;
1194 int ret;
1195
1196 if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1197 return -EINVAL;
1198
1199 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1200 return -EINVAL;
1201
1202 /* The RSA modulus must precede the message being acted upon, so
1203 * it must be copied to a DMA area where the message and the
1204 * modulus can be concatenated. Therefore the input buffer
1205 * length required is twice the output buffer length (which
1206 * must be a multiple of 256-bits).
1207 */
1208 o_len = ((rsa->key_size + 255) / 256) * 32;
1209 i_len = o_len * 2;
1210
1211 ksb_count = o_len / CCP_KSB_BYTES;
1212
1213 memset(&op, 0, sizeof(op));
1214 op.cmd_q = cmd_q;
1215 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1216 op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
1217 if (!op.ksb_key)
1218 return -EIO;
1219
1220 /* The RSA exponent may span multiple (32-byte) KSB entries and must
1221 * be in little endian format. Reverse copy each 32-byte chunk
1222 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1223 * and each byte within that chunk and do not perform any byte swap
1224 * operations on the passthru operation.
1225 */
1226 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1227 if (ret)
1228 goto e_ksb;
1229
1230 ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1231 CCP_KSB_BYTES, false);
1232 if (ret)
1233 goto e_exp;
1234 ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
1235 CCP_PASSTHRU_BYTESWAP_NOOP);
1236 if (ret) {
1237 cmd->engine_error = cmd_q->cmd_error;
1238 goto e_exp;
1239 }
1240
1241 /* Concatenate the modulus and the message. Both the modulus and
1242 * the operands must be in little endian format. Since the input
1243 * is in big endian format it must be converted.
1244 */
1245 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1246 if (ret)
1247 goto e_exp;
1248
1249 ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1250 CCP_KSB_BYTES, false);
1251 if (ret)
1252 goto e_src;
1253 src.address += o_len; /* Adjust the address for the copy operation */
1254 ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1255 CCP_KSB_BYTES, false);
1256 if (ret)
1257 goto e_src;
1258 src.address -= o_len; /* Reset the address to original value */
1259
1260 /* Prepare the output area for the operation */
1261 ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1262 o_len, DMA_FROM_DEVICE);
1263 if (ret)
1264 goto e_src;
1265
1266 op.soc = 1;
1267 op.src.u.dma.address = src.dma.address;
1268 op.src.u.dma.offset = 0;
1269 op.src.u.dma.length = i_len;
1270 op.dst.u.dma.address = dst.dm_wa.dma.address;
1271 op.dst.u.dma.offset = 0;
1272 op.dst.u.dma.length = o_len;
1273
1274 op.u.rsa.mod_size = rsa->key_size;
1275 op.u.rsa.input_len = i_len;
1276
1277 ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
1278 if (ret) {
1279 cmd->engine_error = cmd_q->cmd_error;
1280 goto e_dst;
1281 }
1282
1283 ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
1284
1285e_dst:
1286 ccp_free_data(&dst, cmd_q);
1287
1288e_src:
1289 ccp_dm_free(&src);
1290
1291e_exp:
1292 ccp_dm_free(&exp);
1293
1294e_ksb:
1295 ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
1296
1297 return ret;
1298}
1299
1300static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1301 struct ccp_cmd *cmd)
1302{
1303 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1304 struct ccp_dm_workarea mask;
1305 struct ccp_data src, dst;
1306 struct ccp_op op;
1307 bool in_place = false;
1308 unsigned int i;
1309 int ret;
1310
1311 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1312 return -EINVAL;
1313
1314 if (!pt->src || !pt->dst)
1315 return -EINVAL;
1316
1317 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1318 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1319 return -EINVAL;
1320 if (!pt->mask)
1321 return -EINVAL;
1322 }
1323
1324 BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
1325
1326 memset(&op, 0, sizeof(op));
1327 op.cmd_q = cmd_q;
1328 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1329
1330 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1331 /* Load the mask */
1332 op.ksb_key = cmd_q->ksb_key;
1333
1334 ret = ccp_init_dm_workarea(&mask, cmd_q,
1335 CCP_PASSTHRU_KSB_COUNT *
1336 CCP_KSB_BYTES,
1337 DMA_TO_DEVICE);
1338 if (ret)
1339 return ret;
1340
1341 ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1342 ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
1343 CCP_PASSTHRU_BYTESWAP_NOOP);
1344 if (ret) {
1345 cmd->engine_error = cmd_q->cmd_error;
1346 goto e_mask;
1347 }
1348 }
1349
1350 /* Prepare the input and output data workareas. For in-place
1351 * operations we need to set the dma direction to BIDIRECTIONAL
1352 * and copy the src workarea to the dst workarea.
1353 */
1354 if (sg_virt(pt->src) == sg_virt(pt->dst))
1355 in_place = true;
1356
1357 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1358 CCP_PASSTHRU_MASKSIZE,
1359 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1360 if (ret)
1361 goto e_mask;
1362
1363 if (in_place) {
1364 dst = src;
1365 } else {
1366 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1367 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1368 if (ret)
1369 goto e_src;
1370 }
1371
1372 /* Send data to the CCP Passthru engine
1373 * Because the CCP engine works on a single source and destination
1374 * dma address at a time, each entry in the source scatterlist
1375 * (after the dma_map_sg call) must be less than or equal to the
1376 * (remaining) length in the destination scatterlist entry and the
1377 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1378 */
1379 dst.sg_wa.sg_used = 0;
1380 for (i = 1; i <= src.sg_wa.dma_count; i++) {
1381 if (!dst.sg_wa.sg ||
1382 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1383 ret = -EINVAL;
1384 goto e_dst;
1385 }
1386
1387 if (i == src.sg_wa.dma_count) {
1388 op.eom = 1;
1389 op.soc = 1;
1390 }
1391
1392 op.src.type = CCP_MEMTYPE_SYSTEM;
1393 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1394 op.src.u.dma.offset = 0;
1395 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1396
1397 op.dst.type = CCP_MEMTYPE_SYSTEM;
1398 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
1399 op.dst.u.dma.offset = dst.sg_wa.sg_used;
1400 op.dst.u.dma.length = op.src.u.dma.length;
1401
1402 ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
1403 if (ret) {
1404 cmd->engine_error = cmd_q->cmd_error;
1405 goto e_dst;
1406 }
1407
1408 dst.sg_wa.sg_used += src.sg_wa.sg->length;
1409 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1410 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1411 dst.sg_wa.sg_used = 0;
1412 }
1413 src.sg_wa.sg = sg_next(src.sg_wa.sg);
1414 }
1415
1416e_dst:
1417 if (!in_place)
1418 ccp_free_data(&dst, cmd_q);
1419
1420e_src:
1421 ccp_free_data(&src, cmd_q);
1422
1423e_mask:
1424 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1425 ccp_dm_free(&mask);
1426
1427 return ret;
1428}
1429
1430static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1431{
1432 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1433 struct ccp_dm_workarea src, dst;
1434 struct ccp_op op;
1435 int ret;
1436 u8 *save;
1437
1438 if (!ecc->u.mm.operand_1 ||
1439 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1440 return -EINVAL;
1441
1442 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1443 if (!ecc->u.mm.operand_2 ||
1444 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1445 return -EINVAL;
1446
1447 if (!ecc->u.mm.result ||
1448 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1449 return -EINVAL;
1450
1451 memset(&op, 0, sizeof(op));
1452 op.cmd_q = cmd_q;
1453 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1454
1455 /* Concatenate the modulus and the operands. Both the modulus and
1456 * the operands must be in little endian format. Since the input
1457 * is in big endian format it must be converted and placed in a
1458 * fixed length buffer.
1459 */
1460 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1461 DMA_TO_DEVICE);
1462 if (ret)
1463 return ret;
1464
1465 /* Save the workarea address since it is updated in order to perform
1466 * the concatenation
1467 */
1468 save = src.address;
1469
1470 /* Copy the ECC modulus */
1471 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1472 CCP_ECC_OPERAND_SIZE, false);
1473 if (ret)
1474 goto e_src;
1475 src.address += CCP_ECC_OPERAND_SIZE;
1476
1477 /* Copy the first operand */
1478 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1479 ecc->u.mm.operand_1_len,
1480 CCP_ECC_OPERAND_SIZE, false);
1481 if (ret)
1482 goto e_src;
1483 src.address += CCP_ECC_OPERAND_SIZE;
1484
1485 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1486 /* Copy the second operand */
1487 ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1488 ecc->u.mm.operand_2_len,
1489 CCP_ECC_OPERAND_SIZE, false);
1490 if (ret)
1491 goto e_src;
1492 src.address += CCP_ECC_OPERAND_SIZE;
1493 }
1494
1495 /* Restore the workarea address */
1496 src.address = save;
1497
1498 /* Prepare the output area for the operation */
1499 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1500 DMA_FROM_DEVICE);
1501 if (ret)
1502 goto e_src;
1503
1504 op.soc = 1;
1505 op.src.u.dma.address = src.dma.address;
1506 op.src.u.dma.offset = 0;
1507 op.src.u.dma.length = src.length;
1508 op.dst.u.dma.address = dst.dma.address;
1509 op.dst.u.dma.offset = 0;
1510 op.dst.u.dma.length = dst.length;
1511
1512 op.u.ecc.function = cmd->u.ecc.function;
1513
1514 ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
1515 if (ret) {
1516 cmd->engine_error = cmd_q->cmd_error;
1517 goto e_dst;
1518 }
1519
1520 ecc->ecc_result = le16_to_cpup(
1521 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1522 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1523 ret = -EIO;
1524 goto e_dst;
1525 }
1526
1527 /* Save the ECC result */
1528 ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
1529
1530e_dst:
1531 ccp_dm_free(&dst);
1532
1533e_src:
1534 ccp_dm_free(&src);
1535
1536 return ret;
1537}
1538
1539static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1540{
1541 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1542 struct ccp_dm_workarea src, dst;
1543 struct ccp_op op;
1544 int ret;
1545 u8 *save;
1546
1547 if (!ecc->u.pm.point_1.x ||
1548 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1549 !ecc->u.pm.point_1.y ||
1550 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1551 return -EINVAL;
1552
1553 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1554 if (!ecc->u.pm.point_2.x ||
1555 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1556 !ecc->u.pm.point_2.y ||
1557 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1558 return -EINVAL;
1559 } else {
1560 if (!ecc->u.pm.domain_a ||
1561 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1562 return -EINVAL;
1563
1564 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1565 if (!ecc->u.pm.scalar ||
1566 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1567 return -EINVAL;
1568 }
1569
1570 if (!ecc->u.pm.result.x ||
1571 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1572 !ecc->u.pm.result.y ||
1573 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1574 return -EINVAL;
1575
1576 memset(&op, 0, sizeof(op));
1577 op.cmd_q = cmd_q;
1578 op.jobid = ccp_gen_jobid(cmd_q->ccp);
1579
1580 /* Concatenate the modulus and the operands. Both the modulus and
1581 * the operands must be in little endian format. Since the input
1582 * is in big endian format it must be converted and placed in a
1583 * fixed length buffer.
1584 */
1585 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1586 DMA_TO_DEVICE);
1587 if (ret)
1588 return ret;
1589
1590 /* Save the workarea address since it is updated in order to perform
1591 * the concatenation
1592 */
1593 save = src.address;
1594
1595 /* Copy the ECC modulus */
1596 ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1597 CCP_ECC_OPERAND_SIZE, false);
1598 if (ret)
1599 goto e_src;
1600 src.address += CCP_ECC_OPERAND_SIZE;
1601
1602 /* Copy the first point X and Y coordinate */
1603 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1604 ecc->u.pm.point_1.x_len,
1605 CCP_ECC_OPERAND_SIZE, false);
1606 if (ret)
1607 goto e_src;
1608 src.address += CCP_ECC_OPERAND_SIZE;
1609 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1610 ecc->u.pm.point_1.y_len,
1611 CCP_ECC_OPERAND_SIZE, false);
1612 if (ret)
1613 goto e_src;
1614 src.address += CCP_ECC_OPERAND_SIZE;
1615
1616 /* Set the first point Z coordianate to 1 */
1617 *src.address = 0x01;
1618 src.address += CCP_ECC_OPERAND_SIZE;
1619
1620 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1621 /* Copy the second point X and Y coordinate */
1622 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1623 ecc->u.pm.point_2.x_len,
1624 CCP_ECC_OPERAND_SIZE, false);
1625 if (ret)
1626 goto e_src;
1627 src.address += CCP_ECC_OPERAND_SIZE;
1628 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1629 ecc->u.pm.point_2.y_len,
1630 CCP_ECC_OPERAND_SIZE, false);
1631 if (ret)
1632 goto e_src;
1633 src.address += CCP_ECC_OPERAND_SIZE;
1634
1635 /* Set the second point Z coordianate to 1 */
1636 *src.address = 0x01;
1637 src.address += CCP_ECC_OPERAND_SIZE;
1638 } else {
1639 /* Copy the Domain "a" parameter */
1640 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1641 ecc->u.pm.domain_a_len,
1642 CCP_ECC_OPERAND_SIZE, false);
1643 if (ret)
1644 goto e_src;
1645 src.address += CCP_ECC_OPERAND_SIZE;
1646
1647 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1648 /* Copy the scalar value */
1649 ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
1650 ecc->u.pm.scalar_len,
1651 CCP_ECC_OPERAND_SIZE,
1652 false);
1653 if (ret)
1654 goto e_src;
1655 src.address += CCP_ECC_OPERAND_SIZE;
1656 }
1657 }
1658
1659 /* Restore the workarea address */
1660 src.address = save;
1661
1662 /* Prepare the output area for the operation */
1663 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1664 DMA_FROM_DEVICE);
1665 if (ret)
1666 goto e_src;
1667
1668 op.soc = 1;
1669 op.src.u.dma.address = src.dma.address;
1670 op.src.u.dma.offset = 0;
1671 op.src.u.dma.length = src.length;
1672 op.dst.u.dma.address = dst.dma.address;
1673 op.dst.u.dma.offset = 0;
1674 op.dst.u.dma.length = dst.length;
1675
1676 op.u.ecc.function = cmd->u.ecc.function;
1677
1678 ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
1679 if (ret) {
1680 cmd->engine_error = cmd_q->cmd_error;
1681 goto e_dst;
1682 }
1683
1684 ecc->ecc_result = le16_to_cpup(
1685 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1686 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1687 ret = -EIO;
1688 goto e_dst;
1689 }
1690
1691 /* Save the workarea address since it is updated as we walk through
1692 * to copy the point math result
1693 */
1694 save = dst.address;
1695
1696 /* Save the ECC result X and Y coordinates */
1697 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
1698 CCP_ECC_MODULUS_BYTES);
1699 dst.address += CCP_ECC_OUTPUT_SIZE;
1700 ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
1701 CCP_ECC_MODULUS_BYTES);
1702 dst.address += CCP_ECC_OUTPUT_SIZE;
1703
1704 /* Restore the workarea address */
1705 dst.address = save;
1706
1707e_dst:
1708 ccp_dm_free(&dst);
1709
1710e_src:
1711 ccp_dm_free(&src);
1712
1713 return ret;
1714}
1715
1716static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1717{
1718 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1719
1720 ecc->ecc_result = 0;
1721
1722 if (!ecc->mod ||
1723 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1724 return -EINVAL;
1725
1726 switch (ecc->function) {
1727 case CCP_ECC_FUNCTION_MMUL_384BIT:
1728 case CCP_ECC_FUNCTION_MADD_384BIT:
1729 case CCP_ECC_FUNCTION_MINV_384BIT:
1730 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1731
1732 case CCP_ECC_FUNCTION_PADD_384BIT:
1733 case CCP_ECC_FUNCTION_PMUL_384BIT:
1734 case CCP_ECC_FUNCTION_PDBL_384BIT:
1735 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1736
1737 default:
1738 return -EINVAL;
1739 }
1740}
1741
1742int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1743{
1744 int ret;
1745
1746 cmd->engine_error = 0;
1747 cmd_q->cmd_error = 0;
1748 cmd_q->int_rcvd = 0;
1749 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
1750
1751 switch (cmd->engine) {
1752 case CCP_ENGINE_AES:
1753 ret = ccp_run_aes_cmd(cmd_q, cmd);
1754 break;
1755 case CCP_ENGINE_XTS_AES_128:
1756 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1757 break;
1758 case CCP_ENGINE_SHA:
1759 ret = ccp_run_sha_cmd(cmd_q, cmd);
1760 break;
1761 case CCP_ENGINE_RSA:
1762 ret = ccp_run_rsa_cmd(cmd_q, cmd);
1763 break;
1764 case CCP_ENGINE_PASSTHRU:
1765 ret = ccp_run_passthru_cmd(cmd_q, cmd);
1766 break;
1767 case CCP_ENGINE_ECC:
1768 ret = ccp_run_ecc_cmd(cmd_q, cmd);
1769 break;
1770 default:
1771 ret = -EINVAL;
1772 }
1773
1774 return ret;
1775}