Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
  4 *
  5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  6 * Author: Arnaud Ebalard <arno@natisbad.org>
  7 *
  8 * This work is based on an initial version written by
  9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
 10 */
 11
 12#include <crypto/aes.h>
 13#include <crypto/internal/des.h>
 14
 15#include "cesa.h"
 16
 17struct mv_cesa_des_ctx {
 18	struct mv_cesa_ctx base;
 19	u8 key[DES_KEY_SIZE];
 20};
 21
 22struct mv_cesa_des3_ctx {
 23	struct mv_cesa_ctx base;
 24	u8 key[DES3_EDE_KEY_SIZE];
 25};
 26
 27struct mv_cesa_aes_ctx {
 28	struct mv_cesa_ctx base;
 29	struct crypto_aes_ctx aes;
 30};
 31
 32struct mv_cesa_skcipher_dma_iter {
 33	struct mv_cesa_dma_iter base;
 34	struct mv_cesa_sg_dma_iter src;
 35	struct mv_cesa_sg_dma_iter dst;
 36};
 37
 38static inline void
 39mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
 40			       struct skcipher_request *req)
 41{
 42	mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
 43	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
 44	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
 45}
 46
 47static inline bool
 48mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
 49{
 50	iter->src.op_offset = 0;
 51	iter->dst.op_offset = 0;
 52
 53	return mv_cesa_req_dma_iter_next_op(&iter->base);
 54}
 55
 56static inline void
 57mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
 58{
 59	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 60
 61	if (req->dst != req->src) {
 62		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
 63			     DMA_FROM_DEVICE);
 64		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
 65			     DMA_TO_DEVICE);
 66	} else {
 67		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
 68			     DMA_BIDIRECTIONAL);
 69	}
 70	mv_cesa_dma_cleanup(&creq->base);
 71}
 72
 73static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
 74{
 75	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 76
 77	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 78		mv_cesa_skcipher_dma_cleanup(req);
 79}
 80
 81static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
 82{
 83	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
 84	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
 85	struct mv_cesa_engine *engine = creq->base.engine;
 86	size_t  len = min_t(size_t, req->cryptlen - sreq->offset,
 87			    CESA_SA_SRAM_PAYLOAD_SIZE);
 88
 89	mv_cesa_adjust_op(engine, &sreq->op);
 90	memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
 91
 92	len = sg_pcopy_to_buffer(req->src, creq->src_nents,
 93				 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 94				 len, sreq->offset);
 95
 96	sreq->size = len;
 97	mv_cesa_set_crypt_op_len(&sreq->op, len);
 98
 99	/* FIXME: only update enc_len field */
100	if (!sreq->skip_ctx) {
101		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
102		sreq->skip_ctx = true;
103	} else {
104		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
105	}
106
107	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
108	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
109	BUG_ON(readl(engine->regs + CESA_SA_CMD) &
110	       CESA_SA_CMD_EN_CESA_SA_ACCL0);
111	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
112}
113
114static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
115					u32 status)
116{
117	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
118	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
119	struct mv_cesa_engine *engine = creq->base.engine;
120	size_t len;
121
122	len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
123				   engine->sram + CESA_SA_DATA_SRAM_OFFSET,
124				   sreq->size, sreq->offset);
125
126	sreq->offset += len;
127	if (sreq->offset < req->cryptlen)
128		return -EINPROGRESS;
129
130	return 0;
131}
132
133static int mv_cesa_skcipher_process(struct crypto_async_request *req,
134				    u32 status)
135{
136	struct skcipher_request *skreq = skcipher_request_cast(req);
137	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
138	struct mv_cesa_req *basereq = &creq->base;
139
140	if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
141		return mv_cesa_skcipher_std_process(skreq, status);
142
143	return mv_cesa_dma_process(basereq, status);
144}
145
146static void mv_cesa_skcipher_step(struct crypto_async_request *req)
147{
148	struct skcipher_request *skreq = skcipher_request_cast(req);
149	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
150
151	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
152		mv_cesa_dma_step(&creq->base);
153	else
154		mv_cesa_skcipher_std_step(skreq);
155}
156
157static inline void
158mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
159{
160	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
161	struct mv_cesa_req *basereq = &creq->base;
162
163	mv_cesa_dma_prepare(basereq, basereq->engine);
164}
165
166static inline void
167mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
168{
169	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
170	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
171
172	sreq->size = 0;
173	sreq->offset = 0;
174}
175
176static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
177					    struct mv_cesa_engine *engine)
178{
179	struct skcipher_request *skreq = skcipher_request_cast(req);
180	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
181	creq->base.engine = engine;
182
183	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
184		mv_cesa_skcipher_dma_prepare(skreq);
185	else
186		mv_cesa_skcipher_std_prepare(skreq);
187}
188
189static inline void
190mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
191{
192	struct skcipher_request *skreq = skcipher_request_cast(req);
193
194	mv_cesa_skcipher_cleanup(skreq);
195}
196
197static void
198mv_cesa_skcipher_complete(struct crypto_async_request *req)
199{
200	struct skcipher_request *skreq = skcipher_request_cast(req);
201	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
202	struct mv_cesa_engine *engine = creq->base.engine;
203	unsigned int ivsize;
204
205	atomic_sub(skreq->cryptlen, &engine->load);
206	ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
207
208	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
209		struct mv_cesa_req *basereq;
210
211		basereq = &creq->base;
212		memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
213		       ivsize);
214	} else {
215		memcpy_fromio(skreq->iv,
216			      engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
217			      ivsize);
218	}
219}
220
221static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
222	.step = mv_cesa_skcipher_step,
223	.process = mv_cesa_skcipher_process,
224	.cleanup = mv_cesa_skcipher_req_cleanup,
225	.complete = mv_cesa_skcipher_complete,
226};
227
228static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
229{
230	void *ctx = crypto_tfm_ctx(tfm);
231
232	memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
233}
234
235static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
236{
237	struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
238
239	ctx->ops = &mv_cesa_skcipher_req_ops;
240
241	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
242				    sizeof(struct mv_cesa_skcipher_req));
243
244	return 0;
245}
246
247static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
248			      unsigned int len)
249{
250	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
251	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
252	int remaining;
253	int offset;
254	int ret;
255	int i;
256
257	ret = aes_expandkey(&ctx->aes, key, len);
258	if (ret) {
259		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
260		return ret;
261	}
262
263	remaining = (ctx->aes.key_length - 16) / 4;
264	offset = ctx->aes.key_length + 24 - remaining;
265	for (i = 0; i < remaining; i++)
266		ctx->aes.key_dec[4 + i] =
267			cpu_to_le32(ctx->aes.key_enc[offset + i]);
268
269	return 0;
270}
271
272static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
273			      unsigned int len)
274{
275	struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
276	int err;
277
278	err = verify_skcipher_des_key(cipher, key);
279	if (err)
280		return err;
281
282	memcpy(ctx->key, key, DES_KEY_SIZE);
283
284	return 0;
285}
286
287static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
288				   const u8 *key, unsigned int len)
289{
290	struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
291	int err;
292
293	err = verify_skcipher_des3_key(cipher, key);
294	if (err)
295		return err;
296
297	memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
298
299	return 0;
300}
301
302static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
303					 const struct mv_cesa_op_ctx *op_templ)
304{
305	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
306	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
307		      GFP_KERNEL : GFP_ATOMIC;
308	struct mv_cesa_req *basereq = &creq->base;
309	struct mv_cesa_skcipher_dma_iter iter;
310	bool skip_ctx = false;
311	int ret;
312
313	basereq->chain.first = NULL;
314	basereq->chain.last = NULL;
315
316	if (req->src != req->dst) {
317		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
318				 DMA_TO_DEVICE);
319		if (!ret)
320			return -ENOMEM;
321
322		ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
323				 DMA_FROM_DEVICE);
324		if (!ret) {
325			ret = -ENOMEM;
326			goto err_unmap_src;
327		}
328	} else {
329		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
330				 DMA_BIDIRECTIONAL);
331		if (!ret)
332			return -ENOMEM;
333	}
334
335	mv_cesa_tdma_desc_iter_init(&basereq->chain);
336	mv_cesa_skcipher_req_iter_init(&iter, req);
337
338	do {
339		struct mv_cesa_op_ctx *op;
340
341		op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
342		if (IS_ERR(op)) {
343			ret = PTR_ERR(op);
344			goto err_free_tdma;
345		}
346		skip_ctx = true;
347
348		mv_cesa_set_crypt_op_len(op, iter.base.op_len);
349
350		/* Add input transfers */
351		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
352						   &iter.src, flags);
353		if (ret)
354			goto err_free_tdma;
355
356		/* Add dummy desc to launch the crypto operation */
357		ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
358		if (ret)
359			goto err_free_tdma;
360
361		/* Add output transfers */
362		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
363						   &iter.dst, flags);
364		if (ret)
365			goto err_free_tdma;
366
367	} while (mv_cesa_skcipher_req_iter_next_op(&iter));
368
369	/* Add output data for IV */
370	ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
371				    CESA_SA_DATA_SRAM_OFFSET,
372				    CESA_TDMA_SRC_IN_SRAM, flags);
373
374	if (ret)
375		goto err_free_tdma;
376
377	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
378
379	return 0;
380
381err_free_tdma:
382	mv_cesa_dma_cleanup(basereq);
383	if (req->dst != req->src)
384		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
385			     DMA_FROM_DEVICE);
386
387err_unmap_src:
388	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
389		     req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
390
391	return ret;
392}
393
394static inline int
395mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
396			      const struct mv_cesa_op_ctx *op_templ)
397{
398	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
399	struct mv_cesa_skcipher_std_req *sreq = &creq->std;
400	struct mv_cesa_req *basereq = &creq->base;
401
402	sreq->op = *op_templ;
403	sreq->skip_ctx = false;
404	basereq->chain.first = NULL;
405	basereq->chain.last = NULL;
406
407	return 0;
408}
409
410static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
411				     struct mv_cesa_op_ctx *tmpl)
412{
413	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
414	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
415	unsigned int blksize = crypto_skcipher_blocksize(tfm);
416	int ret;
417
418	if (!IS_ALIGNED(req->cryptlen, blksize))
419		return -EINVAL;
420
421	creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
422	if (creq->src_nents < 0) {
423		dev_err(cesa_dev->dev, "Invalid number of src SG");
424		return creq->src_nents;
425	}
426	creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
427	if (creq->dst_nents < 0) {
428		dev_err(cesa_dev->dev, "Invalid number of dst SG");
429		return creq->dst_nents;
430	}
431
432	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
433			      CESA_SA_DESC_CFG_OP_MSK);
434
435	if (cesa_dev->caps->has_tdma)
436		ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
437	else
438		ret = mv_cesa_skcipher_std_req_init(req, tmpl);
439
440	return ret;
441}
442
443static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
444				      struct mv_cesa_op_ctx *tmpl)
445{
446	int ret;
447	struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
448	struct mv_cesa_engine *engine;
449
450	ret = mv_cesa_skcipher_req_init(req, tmpl);
451	if (ret)
452		return ret;
453
454	engine = mv_cesa_select_engine(req->cryptlen);
455	mv_cesa_skcipher_prepare(&req->base, engine);
456
457	ret = mv_cesa_queue_req(&req->base, &creq->base);
458
459	if (mv_cesa_req_needs_cleanup(&req->base, ret))
460		mv_cesa_skcipher_cleanup(req);
461
462	return ret;
463}
464
465static int mv_cesa_des_op(struct skcipher_request *req,
466			  struct mv_cesa_op_ctx *tmpl)
467{
468	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
469
470	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
471			      CESA_SA_DESC_CFG_CRYPTM_MSK);
472
473	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
474
475	return mv_cesa_skcipher_queue_req(req, tmpl);
476}
477
478static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
479{
480	struct mv_cesa_op_ctx tmpl;
481
482	mv_cesa_set_op_cfg(&tmpl,
483			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
484			   CESA_SA_DESC_CFG_DIR_ENC);
485
486	return mv_cesa_des_op(req, &tmpl);
487}
488
489static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
490{
491	struct mv_cesa_op_ctx tmpl;
492
493	mv_cesa_set_op_cfg(&tmpl,
494			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
495			   CESA_SA_DESC_CFG_DIR_DEC);
496
497	return mv_cesa_des_op(req, &tmpl);
498}
499
500struct skcipher_alg mv_cesa_ecb_des_alg = {
501	.setkey = mv_cesa_des_setkey,
502	.encrypt = mv_cesa_ecb_des_encrypt,
503	.decrypt = mv_cesa_ecb_des_decrypt,
504	.min_keysize = DES_KEY_SIZE,
505	.max_keysize = DES_KEY_SIZE,
506	.base = {
507		.cra_name = "ecb(des)",
508		.cra_driver_name = "mv-ecb-des",
509		.cra_priority = 300,
510		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
511		.cra_blocksize = DES_BLOCK_SIZE,
512		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
513		.cra_alignmask = 0,
514		.cra_module = THIS_MODULE,
515		.cra_init = mv_cesa_skcipher_cra_init,
516		.cra_exit = mv_cesa_skcipher_cra_exit,
517	},
518};
519
520static int mv_cesa_cbc_des_op(struct skcipher_request *req,
521			      struct mv_cesa_op_ctx *tmpl)
522{
523	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
524			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
525
526	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
527
528	return mv_cesa_des_op(req, tmpl);
529}
530
531static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
532{
533	struct mv_cesa_op_ctx tmpl;
534
535	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
536
537	return mv_cesa_cbc_des_op(req, &tmpl);
538}
539
540static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
541{
542	struct mv_cesa_op_ctx tmpl;
543
544	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
545
546	return mv_cesa_cbc_des_op(req, &tmpl);
547}
548
549struct skcipher_alg mv_cesa_cbc_des_alg = {
550	.setkey = mv_cesa_des_setkey,
551	.encrypt = mv_cesa_cbc_des_encrypt,
552	.decrypt = mv_cesa_cbc_des_decrypt,
553	.min_keysize = DES_KEY_SIZE,
554	.max_keysize = DES_KEY_SIZE,
555	.ivsize = DES_BLOCK_SIZE,
556	.base = {
557		.cra_name = "cbc(des)",
558		.cra_driver_name = "mv-cbc-des",
559		.cra_priority = 300,
560		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
561		.cra_blocksize = DES_BLOCK_SIZE,
562		.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
563		.cra_alignmask = 0,
564		.cra_module = THIS_MODULE,
565		.cra_init = mv_cesa_skcipher_cra_init,
566		.cra_exit = mv_cesa_skcipher_cra_exit,
567	},
568};
569
570static int mv_cesa_des3_op(struct skcipher_request *req,
571			   struct mv_cesa_op_ctx *tmpl)
572{
573	struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
574
575	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
576			      CESA_SA_DESC_CFG_CRYPTM_MSK);
577
578	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
579
580	return mv_cesa_skcipher_queue_req(req, tmpl);
581}
582
583static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
584{
585	struct mv_cesa_op_ctx tmpl;
586
587	mv_cesa_set_op_cfg(&tmpl,
588			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
589			   CESA_SA_DESC_CFG_3DES_EDE |
590			   CESA_SA_DESC_CFG_DIR_ENC);
591
592	return mv_cesa_des3_op(req, &tmpl);
593}
594
595static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
596{
597	struct mv_cesa_op_ctx tmpl;
598
599	mv_cesa_set_op_cfg(&tmpl,
600			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
601			   CESA_SA_DESC_CFG_3DES_EDE |
602			   CESA_SA_DESC_CFG_DIR_DEC);
603
604	return mv_cesa_des3_op(req, &tmpl);
605}
606
607struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
608	.setkey = mv_cesa_des3_ede_setkey,
609	.encrypt = mv_cesa_ecb_des3_ede_encrypt,
610	.decrypt = mv_cesa_ecb_des3_ede_decrypt,
611	.min_keysize = DES3_EDE_KEY_SIZE,
612	.max_keysize = DES3_EDE_KEY_SIZE,
613	.ivsize = DES3_EDE_BLOCK_SIZE,
614	.base = {
615		.cra_name = "ecb(des3_ede)",
616		.cra_driver_name = "mv-ecb-des3-ede",
617		.cra_priority = 300,
618		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
619		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
620		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
621		.cra_alignmask = 0,
622		.cra_module = THIS_MODULE,
623		.cra_init = mv_cesa_skcipher_cra_init,
624		.cra_exit = mv_cesa_skcipher_cra_exit,
625	},
626};
627
628static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
629			       struct mv_cesa_op_ctx *tmpl)
630{
631	memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
632
633	return mv_cesa_des3_op(req, tmpl);
634}
635
636static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
637{
638	struct mv_cesa_op_ctx tmpl;
639
640	mv_cesa_set_op_cfg(&tmpl,
641			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
642			   CESA_SA_DESC_CFG_3DES_EDE |
643			   CESA_SA_DESC_CFG_DIR_ENC);
644
645	return mv_cesa_cbc_des3_op(req, &tmpl);
646}
647
648static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
649{
650	struct mv_cesa_op_ctx tmpl;
651
652	mv_cesa_set_op_cfg(&tmpl,
653			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
654			   CESA_SA_DESC_CFG_3DES_EDE |
655			   CESA_SA_DESC_CFG_DIR_DEC);
656
657	return mv_cesa_cbc_des3_op(req, &tmpl);
658}
659
660struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
661	.setkey = mv_cesa_des3_ede_setkey,
662	.encrypt = mv_cesa_cbc_des3_ede_encrypt,
663	.decrypt = mv_cesa_cbc_des3_ede_decrypt,
664	.min_keysize = DES3_EDE_KEY_SIZE,
665	.max_keysize = DES3_EDE_KEY_SIZE,
666	.ivsize = DES3_EDE_BLOCK_SIZE,
667	.base = {
668		.cra_name = "cbc(des3_ede)",
669		.cra_driver_name = "mv-cbc-des3-ede",
670		.cra_priority = 300,
671		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
672		.cra_blocksize = DES3_EDE_BLOCK_SIZE,
673		.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
674		.cra_alignmask = 0,
675		.cra_module = THIS_MODULE,
676		.cra_init = mv_cesa_skcipher_cra_init,
677		.cra_exit = mv_cesa_skcipher_cra_exit,
678	},
679};
680
681static int mv_cesa_aes_op(struct skcipher_request *req,
682			  struct mv_cesa_op_ctx *tmpl)
683{
684	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
685	int i;
686	u32 *key;
687	u32 cfg;
688
689	cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
690
691	if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
692		key = ctx->aes.key_dec;
693	else
694		key = ctx->aes.key_enc;
695
696	for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
697		tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
698
699	if (ctx->aes.key_length == 24)
700		cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
701	else if (ctx->aes.key_length == 32)
702		cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
703
704	mv_cesa_update_op_cfg(tmpl, cfg,
705			      CESA_SA_DESC_CFG_CRYPTM_MSK |
706			      CESA_SA_DESC_CFG_AES_LEN_MSK);
707
708	return mv_cesa_skcipher_queue_req(req, tmpl);
709}
710
711static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
712{
713	struct mv_cesa_op_ctx tmpl;
714
715	mv_cesa_set_op_cfg(&tmpl,
716			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
717			   CESA_SA_DESC_CFG_DIR_ENC);
718
719	return mv_cesa_aes_op(req, &tmpl);
720}
721
722static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
723{
724	struct mv_cesa_op_ctx tmpl;
725
726	mv_cesa_set_op_cfg(&tmpl,
727			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
728			   CESA_SA_DESC_CFG_DIR_DEC);
729
730	return mv_cesa_aes_op(req, &tmpl);
731}
732
733struct skcipher_alg mv_cesa_ecb_aes_alg = {
734	.setkey = mv_cesa_aes_setkey,
735	.encrypt = mv_cesa_ecb_aes_encrypt,
736	.decrypt = mv_cesa_ecb_aes_decrypt,
737	.min_keysize = AES_MIN_KEY_SIZE,
738	.max_keysize = AES_MAX_KEY_SIZE,
739	.base = {
740		.cra_name = "ecb(aes)",
741		.cra_driver_name = "mv-ecb-aes",
742		.cra_priority = 300,
743		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
744		.cra_blocksize = AES_BLOCK_SIZE,
745		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
746		.cra_alignmask = 0,
747		.cra_module = THIS_MODULE,
748		.cra_init = mv_cesa_skcipher_cra_init,
749		.cra_exit = mv_cesa_skcipher_cra_exit,
750	},
751};
752
753static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
754			      struct mv_cesa_op_ctx *tmpl)
755{
756	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
757			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
758	memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
759
760	return mv_cesa_aes_op(req, tmpl);
761}
762
763static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
764{
765	struct mv_cesa_op_ctx tmpl;
766
767	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
768
769	return mv_cesa_cbc_aes_op(req, &tmpl);
770}
771
772static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
773{
774	struct mv_cesa_op_ctx tmpl;
775
776	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
777
778	return mv_cesa_cbc_aes_op(req, &tmpl);
779}
780
781struct skcipher_alg mv_cesa_cbc_aes_alg = {
782	.setkey = mv_cesa_aes_setkey,
783	.encrypt = mv_cesa_cbc_aes_encrypt,
784	.decrypt = mv_cesa_cbc_aes_decrypt,
785	.min_keysize = AES_MIN_KEY_SIZE,
786	.max_keysize = AES_MAX_KEY_SIZE,
787	.ivsize = AES_BLOCK_SIZE,
788	.base = {
789		.cra_name = "cbc(aes)",
790		.cra_driver_name = "mv-cbc-aes",
791		.cra_priority = 300,
792		.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
793		.cra_blocksize = AES_BLOCK_SIZE,
794		.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
795		.cra_alignmask = 0,
796		.cra_module = THIS_MODULE,
797		.cra_init = mv_cesa_skcipher_cra_init,
798		.cra_exit = mv_cesa_skcipher_cra_exit,
799	},
800};