Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
  3 *
  4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  5 * Author: Arnaud Ebalard <arno@natisbad.org>
  6 *
  7 * This work is based on an initial version written by
  8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  9 *
 10 * This program is free software; you can redistribute it and/or modify it
 11 * under the terms of the GNU General Public License version 2 as published
 12 * by the Free Software Foundation.
 13 */
 14
 15#include <crypto/aes.h>
 16#include <crypto/des.h>
 17
 18#include "cesa.h"
 19
 20struct mv_cesa_des_ctx {
 21	struct mv_cesa_ctx base;
 22	u8 key[DES_KEY_SIZE];
 23};
 24
 25struct mv_cesa_des3_ctx {
 26	struct mv_cesa_ctx base;
 27	u8 key[DES3_EDE_KEY_SIZE];
 28};
 29
 30struct mv_cesa_aes_ctx {
 31	struct mv_cesa_ctx base;
 32	struct crypto_aes_ctx aes;
 33};
 34
 35struct mv_cesa_ablkcipher_dma_iter {
 36	struct mv_cesa_dma_iter base;
 37	struct mv_cesa_sg_dma_iter src;
 38	struct mv_cesa_sg_dma_iter dst;
 39};
 40
 41static inline void
 42mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
 43				 struct ablkcipher_request *req)
 44{
 45	mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
 46	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
 47	mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
 48}
 49
 50static inline bool
 51mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
 52{
 53	iter->src.op_offset = 0;
 54	iter->dst.op_offset = 0;
 55
 56	return mv_cesa_req_dma_iter_next_op(&iter->base);
 57}
 58
 59static inline void
 60mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
 61{
 62	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
 63
 64	if (req->dst != req->src) {
 65		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
 66			     DMA_FROM_DEVICE);
 67		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
 68			     DMA_TO_DEVICE);
 69	} else {
 70		dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
 71			     DMA_BIDIRECTIONAL);
 72	}
 73	mv_cesa_dma_cleanup(&creq->base);
 74}
 75
 76static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
 77{
 78	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
 79
 80	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
 81		mv_cesa_ablkcipher_dma_cleanup(req);
 82}
 83
 84static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
 85{
 86	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
 87	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
 88	struct mv_cesa_engine *engine = creq->base.engine;
 89	size_t  len = min_t(size_t, req->nbytes - sreq->offset,
 90			    CESA_SA_SRAM_PAYLOAD_SIZE);
 91
 92	mv_cesa_adjust_op(engine, &sreq->op);
 93	memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
 94
 95	len = sg_pcopy_to_buffer(req->src, creq->src_nents,
 96				 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
 97				 len, sreq->offset);
 98
 99	sreq->size = len;
100	mv_cesa_set_crypt_op_len(&sreq->op, len);
101
102	/* FIXME: only update enc_len field */
103	if (!sreq->skip_ctx) {
104		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
105		sreq->skip_ctx = true;
106	} else {
107		memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
108	}
109
110	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
111	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
112	BUG_ON(readl(engine->regs + CESA_SA_CMD) &
113	       CESA_SA_CMD_EN_CESA_SA_ACCL0);
114	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
115}
116
117static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
118					  u32 status)
119{
120	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
121	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
122	struct mv_cesa_engine *engine = creq->base.engine;
123	size_t len;
124
125	len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
126				   engine->sram + CESA_SA_DATA_SRAM_OFFSET,
127				   sreq->size, sreq->offset);
128
129	sreq->offset += len;
130	if (sreq->offset < req->nbytes)
131		return -EINPROGRESS;
132
133	return 0;
134}
135
136static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
137				      u32 status)
138{
139	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
140	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
141	struct mv_cesa_req *basereq = &creq->base;
142
143	if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
144		return mv_cesa_ablkcipher_std_process(ablkreq, status);
145
146	return mv_cesa_dma_process(basereq, status);
147}
148
149static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
150{
151	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
152	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
153
154	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
155		mv_cesa_dma_step(&creq->base);
156	else
157		mv_cesa_ablkcipher_std_step(ablkreq);
158}
159
160static inline void
161mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
162{
163	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
164	struct mv_cesa_req *basereq = &creq->base;
165
166	mv_cesa_dma_prepare(basereq, basereq->engine);
167}
168
169static inline void
170mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
171{
172	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
173	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
174
175	sreq->size = 0;
176	sreq->offset = 0;
177}
178
179static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
180					      struct mv_cesa_engine *engine)
181{
182	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
183	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
184	creq->base.engine = engine;
185
186	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187		mv_cesa_ablkcipher_dma_prepare(ablkreq);
188	else
189		mv_cesa_ablkcipher_std_prepare(ablkreq);
190}
191
192static inline void
193mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
194{
195	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
196
197	mv_cesa_ablkcipher_cleanup(ablkreq);
198}
199
200static void
201mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
202{
203	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
204	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
205	struct mv_cesa_engine *engine = creq->base.engine;
206	unsigned int ivsize;
207
208	atomic_sub(ablkreq->nbytes, &engine->load);
209	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
210
211	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212		struct mv_cesa_req *basereq;
213
214		basereq = &creq->base;
215		memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
216		       ivsize);
217	} else {
218		memcpy_fromio(ablkreq->info,
219			      engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
220			      ivsize);
221	}
222}
223
224static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
225	.step = mv_cesa_ablkcipher_step,
226	.process = mv_cesa_ablkcipher_process,
227	.cleanup = mv_cesa_ablkcipher_req_cleanup,
228	.complete = mv_cesa_ablkcipher_complete,
229};
230
231static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
232{
233	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
234
235	ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
236
237	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
238
239	return 0;
240}
241
242static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
243			      unsigned int len)
244{
245	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
246	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
247	int remaining;
248	int offset;
249	int ret;
250	int i;
251
252	ret = crypto_aes_expand_key(&ctx->aes, key, len);
253	if (ret) {
254		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
255		return ret;
256	}
257
258	remaining = (ctx->aes.key_length - 16) / 4;
259	offset = ctx->aes.key_length + 24 - remaining;
260	for (i = 0; i < remaining; i++)
261		ctx->aes.key_dec[4 + i] =
262			cpu_to_le32(ctx->aes.key_enc[offset + i]);
263
264	return 0;
265}
266
267static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
268			      unsigned int len)
269{
270	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
271	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
272	u32 tmp[DES_EXPKEY_WORDS];
273	int ret;
274
275	if (len != DES_KEY_SIZE) {
276		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
277		return -EINVAL;
278	}
279
280	ret = des_ekey(tmp, key);
281	if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
282		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
283		return -EINVAL;
284	}
285
286	memcpy(ctx->key, key, DES_KEY_SIZE);
287
288	return 0;
289}
290
291static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
292				   const u8 *key, unsigned int len)
293{
294	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
295	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
296
297	if (len != DES3_EDE_KEY_SIZE) {
298		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
299		return -EINVAL;
300	}
301
302	memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
303
304	return 0;
305}
306
307static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
308				const struct mv_cesa_op_ctx *op_templ)
309{
310	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
311	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
312		      GFP_KERNEL : GFP_ATOMIC;
313	struct mv_cesa_req *basereq = &creq->base;
314	struct mv_cesa_ablkcipher_dma_iter iter;
315	bool skip_ctx = false;
316	int ret;
317	unsigned int ivsize;
318
319	basereq->chain.first = NULL;
320	basereq->chain.last = NULL;
321
322	if (req->src != req->dst) {
323		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
324				 DMA_TO_DEVICE);
325		if (!ret)
326			return -ENOMEM;
327
328		ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
329				 DMA_FROM_DEVICE);
330		if (!ret) {
331			ret = -ENOMEM;
332			goto err_unmap_src;
333		}
334	} else {
335		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
336				 DMA_BIDIRECTIONAL);
337		if (!ret)
338			return -ENOMEM;
339	}
340
341	mv_cesa_tdma_desc_iter_init(&basereq->chain);
342	mv_cesa_ablkcipher_req_iter_init(&iter, req);
343
344	do {
345		struct mv_cesa_op_ctx *op;
346
347		op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
348		if (IS_ERR(op)) {
349			ret = PTR_ERR(op);
350			goto err_free_tdma;
351		}
352		skip_ctx = true;
353
354		mv_cesa_set_crypt_op_len(op, iter.base.op_len);
355
356		/* Add input transfers */
357		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
358						   &iter.src, flags);
359		if (ret)
360			goto err_free_tdma;
361
362		/* Add dummy desc to launch the crypto operation */
363		ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
364		if (ret)
365			goto err_free_tdma;
366
367		/* Add output transfers */
368		ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
369						   &iter.dst, flags);
370		if (ret)
371			goto err_free_tdma;
372
373	} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
374
375	/* Add output data for IV */
376	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
377	ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
378				    CESA_SA_DATA_SRAM_OFFSET,
379				    CESA_TDMA_SRC_IN_SRAM, flags);
380
381	if (ret)
382		goto err_free_tdma;
383
384	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
385
386	return 0;
387
388err_free_tdma:
389	mv_cesa_dma_cleanup(basereq);
390	if (req->dst != req->src)
391		dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
392			     DMA_FROM_DEVICE);
393
394err_unmap_src:
395	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
396		     req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
397
398	return ret;
399}
400
401static inline int
402mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
403				const struct mv_cesa_op_ctx *op_templ)
404{
405	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
406	struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
407	struct mv_cesa_req *basereq = &creq->base;
408
409	sreq->op = *op_templ;
410	sreq->skip_ctx = false;
411	basereq->chain.first = NULL;
412	basereq->chain.last = NULL;
413
414	return 0;
415}
416
417static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
418				       struct mv_cesa_op_ctx *tmpl)
419{
420	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
421	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
422	unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
423	int ret;
424
425	if (!IS_ALIGNED(req->nbytes, blksize))
426		return -EINVAL;
427
428	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
429	if (creq->src_nents < 0) {
430		dev_err(cesa_dev->dev, "Invalid number of src SG");
431		return creq->src_nents;
432	}
433	creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
434	if (creq->dst_nents < 0) {
435		dev_err(cesa_dev->dev, "Invalid number of dst SG");
436		return creq->dst_nents;
437	}
438
439	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
440			      CESA_SA_DESC_CFG_OP_MSK);
441
442	if (cesa_dev->caps->has_tdma)
443		ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
444	else
445		ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
446
447	return ret;
448}
449
450static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
451					struct mv_cesa_op_ctx *tmpl)
452{
453	int ret;
454	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
455	struct mv_cesa_engine *engine;
456
457	ret = mv_cesa_ablkcipher_req_init(req, tmpl);
458	if (ret)
459		return ret;
460
461	engine = mv_cesa_select_engine(req->nbytes);
462	mv_cesa_ablkcipher_prepare(&req->base, engine);
463
464	ret = mv_cesa_queue_req(&req->base, &creq->base);
465
466	if (mv_cesa_req_needs_cleanup(&req->base, ret))
467		mv_cesa_ablkcipher_cleanup(req);
468
469	return ret;
470}
471
472static int mv_cesa_des_op(struct ablkcipher_request *req,
473			  struct mv_cesa_op_ctx *tmpl)
474{
475	struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
476
477	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
478			      CESA_SA_DESC_CFG_CRYPTM_MSK);
479
480	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
481
482	return mv_cesa_ablkcipher_queue_req(req, tmpl);
483}
484
485static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
486{
487	struct mv_cesa_op_ctx tmpl;
488
489	mv_cesa_set_op_cfg(&tmpl,
490			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
491			   CESA_SA_DESC_CFG_DIR_ENC);
492
493	return mv_cesa_des_op(req, &tmpl);
494}
495
496static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
497{
498	struct mv_cesa_op_ctx tmpl;
499
500	mv_cesa_set_op_cfg(&tmpl,
501			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
502			   CESA_SA_DESC_CFG_DIR_DEC);
503
504	return mv_cesa_des_op(req, &tmpl);
505}
506
507struct crypto_alg mv_cesa_ecb_des_alg = {
508	.cra_name = "ecb(des)",
509	.cra_driver_name = "mv-ecb-des",
510	.cra_priority = 300,
511	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
512		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
513	.cra_blocksize = DES_BLOCK_SIZE,
514	.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
515	.cra_alignmask = 0,
516	.cra_type = &crypto_ablkcipher_type,
517	.cra_module = THIS_MODULE,
518	.cra_init = mv_cesa_ablkcipher_cra_init,
519	.cra_u = {
520		.ablkcipher = {
521			.min_keysize = DES_KEY_SIZE,
522			.max_keysize = DES_KEY_SIZE,
523			.setkey = mv_cesa_des_setkey,
524			.encrypt = mv_cesa_ecb_des_encrypt,
525			.decrypt = mv_cesa_ecb_des_decrypt,
526		},
527	},
528};
529
530static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
531			      struct mv_cesa_op_ctx *tmpl)
532{
533	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
534			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
535
536	memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
537
538	return mv_cesa_des_op(req, tmpl);
539}
540
541static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
542{
543	struct mv_cesa_op_ctx tmpl;
544
545	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
546
547	return mv_cesa_cbc_des_op(req, &tmpl);
548}
549
550static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
551{
552	struct mv_cesa_op_ctx tmpl;
553
554	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
555
556	return mv_cesa_cbc_des_op(req, &tmpl);
557}
558
559struct crypto_alg mv_cesa_cbc_des_alg = {
560	.cra_name = "cbc(des)",
561	.cra_driver_name = "mv-cbc-des",
562	.cra_priority = 300,
563	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
564		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
565	.cra_blocksize = DES_BLOCK_SIZE,
566	.cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
567	.cra_alignmask = 0,
568	.cra_type = &crypto_ablkcipher_type,
569	.cra_module = THIS_MODULE,
570	.cra_init = mv_cesa_ablkcipher_cra_init,
571	.cra_u = {
572		.ablkcipher = {
573			.min_keysize = DES_KEY_SIZE,
574			.max_keysize = DES_KEY_SIZE,
575			.ivsize	     = DES_BLOCK_SIZE,
576			.setkey = mv_cesa_des_setkey,
577			.encrypt = mv_cesa_cbc_des_encrypt,
578			.decrypt = mv_cesa_cbc_des_decrypt,
579		},
580	},
581};
582
583static int mv_cesa_des3_op(struct ablkcipher_request *req,
584			   struct mv_cesa_op_ctx *tmpl)
585{
586	struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
587
588	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
589			      CESA_SA_DESC_CFG_CRYPTM_MSK);
590
591	memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
592
593	return mv_cesa_ablkcipher_queue_req(req, tmpl);
594}
595
596static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
597{
598	struct mv_cesa_op_ctx tmpl;
599
600	mv_cesa_set_op_cfg(&tmpl,
601			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
602			   CESA_SA_DESC_CFG_3DES_EDE |
603			   CESA_SA_DESC_CFG_DIR_ENC);
604
605	return mv_cesa_des3_op(req, &tmpl);
606}
607
608static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
609{
610	struct mv_cesa_op_ctx tmpl;
611
612	mv_cesa_set_op_cfg(&tmpl,
613			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
614			   CESA_SA_DESC_CFG_3DES_EDE |
615			   CESA_SA_DESC_CFG_DIR_DEC);
616
617	return mv_cesa_des3_op(req, &tmpl);
618}
619
620struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
621	.cra_name = "ecb(des3_ede)",
622	.cra_driver_name = "mv-ecb-des3-ede",
623	.cra_priority = 300,
624	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
625		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
626	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
627	.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
628	.cra_alignmask = 0,
629	.cra_type = &crypto_ablkcipher_type,
630	.cra_module = THIS_MODULE,
631	.cra_init = mv_cesa_ablkcipher_cra_init,
632	.cra_u = {
633		.ablkcipher = {
634			.min_keysize = DES3_EDE_KEY_SIZE,
635			.max_keysize = DES3_EDE_KEY_SIZE,
636			.ivsize	     = DES3_EDE_BLOCK_SIZE,
637			.setkey = mv_cesa_des3_ede_setkey,
638			.encrypt = mv_cesa_ecb_des3_ede_encrypt,
639			.decrypt = mv_cesa_ecb_des3_ede_decrypt,
640		},
641	},
642};
643
644static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
645			       struct mv_cesa_op_ctx *tmpl)
646{
647	memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
648
649	return mv_cesa_des3_op(req, tmpl);
650}
651
652static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
653{
654	struct mv_cesa_op_ctx tmpl;
655
656	mv_cesa_set_op_cfg(&tmpl,
657			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
658			   CESA_SA_DESC_CFG_3DES_EDE |
659			   CESA_SA_DESC_CFG_DIR_ENC);
660
661	return mv_cesa_cbc_des3_op(req, &tmpl);
662}
663
664static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
665{
666	struct mv_cesa_op_ctx tmpl;
667
668	mv_cesa_set_op_cfg(&tmpl,
669			   CESA_SA_DESC_CFG_CRYPTCM_CBC |
670			   CESA_SA_DESC_CFG_3DES_EDE |
671			   CESA_SA_DESC_CFG_DIR_DEC);
672
673	return mv_cesa_cbc_des3_op(req, &tmpl);
674}
675
676struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
677	.cra_name = "cbc(des3_ede)",
678	.cra_driver_name = "mv-cbc-des3-ede",
679	.cra_priority = 300,
680	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
681		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
682	.cra_blocksize = DES3_EDE_BLOCK_SIZE,
683	.cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
684	.cra_alignmask = 0,
685	.cra_type = &crypto_ablkcipher_type,
686	.cra_module = THIS_MODULE,
687	.cra_init = mv_cesa_ablkcipher_cra_init,
688	.cra_u = {
689		.ablkcipher = {
690			.min_keysize = DES3_EDE_KEY_SIZE,
691			.max_keysize = DES3_EDE_KEY_SIZE,
692			.ivsize	     = DES3_EDE_BLOCK_SIZE,
693			.setkey = mv_cesa_des3_ede_setkey,
694			.encrypt = mv_cesa_cbc_des3_ede_encrypt,
695			.decrypt = mv_cesa_cbc_des3_ede_decrypt,
696		},
697	},
698};
699
700static int mv_cesa_aes_op(struct ablkcipher_request *req,
701			  struct mv_cesa_op_ctx *tmpl)
702{
703	struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
704	int i;
705	u32 *key;
706	u32 cfg;
707
708	cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
709
710	if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
711		key = ctx->aes.key_dec;
712	else
713		key = ctx->aes.key_enc;
714
715	for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
716		tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
717
718	if (ctx->aes.key_length == 24)
719		cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
720	else if (ctx->aes.key_length == 32)
721		cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
722
723	mv_cesa_update_op_cfg(tmpl, cfg,
724			      CESA_SA_DESC_CFG_CRYPTM_MSK |
725			      CESA_SA_DESC_CFG_AES_LEN_MSK);
726
727	return mv_cesa_ablkcipher_queue_req(req, tmpl);
728}
729
730static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
731{
732	struct mv_cesa_op_ctx tmpl;
733
734	mv_cesa_set_op_cfg(&tmpl,
735			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
736			   CESA_SA_DESC_CFG_DIR_ENC);
737
738	return mv_cesa_aes_op(req, &tmpl);
739}
740
741static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
742{
743	struct mv_cesa_op_ctx tmpl;
744
745	mv_cesa_set_op_cfg(&tmpl,
746			   CESA_SA_DESC_CFG_CRYPTCM_ECB |
747			   CESA_SA_DESC_CFG_DIR_DEC);
748
749	return mv_cesa_aes_op(req, &tmpl);
750}
751
752struct crypto_alg mv_cesa_ecb_aes_alg = {
753	.cra_name = "ecb(aes)",
754	.cra_driver_name = "mv-ecb-aes",
755	.cra_priority = 300,
756	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
757		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
758	.cra_blocksize = AES_BLOCK_SIZE,
759	.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
760	.cra_alignmask = 0,
761	.cra_type = &crypto_ablkcipher_type,
762	.cra_module = THIS_MODULE,
763	.cra_init = mv_cesa_ablkcipher_cra_init,
764	.cra_u = {
765		.ablkcipher = {
766			.min_keysize = AES_MIN_KEY_SIZE,
767			.max_keysize = AES_MAX_KEY_SIZE,
768			.setkey = mv_cesa_aes_setkey,
769			.encrypt = mv_cesa_ecb_aes_encrypt,
770			.decrypt = mv_cesa_ecb_aes_decrypt,
771		},
772	},
773};
774
775static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
776			      struct mv_cesa_op_ctx *tmpl)
777{
778	mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
779			      CESA_SA_DESC_CFG_CRYPTCM_MSK);
780	memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
781
782	return mv_cesa_aes_op(req, tmpl);
783}
784
785static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
786{
787	struct mv_cesa_op_ctx tmpl;
788
789	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
790
791	return mv_cesa_cbc_aes_op(req, &tmpl);
792}
793
794static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
795{
796	struct mv_cesa_op_ctx tmpl;
797
798	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
799
800	return mv_cesa_cbc_aes_op(req, &tmpl);
801}
802
803struct crypto_alg mv_cesa_cbc_aes_alg = {
804	.cra_name = "cbc(aes)",
805	.cra_driver_name = "mv-cbc-aes",
806	.cra_priority = 300,
807	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
808		     CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
809	.cra_blocksize = AES_BLOCK_SIZE,
810	.cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
811	.cra_alignmask = 0,
812	.cra_type = &crypto_ablkcipher_type,
813	.cra_module = THIS_MODULE,
814	.cra_init = mv_cesa_ablkcipher_cra_init,
815	.cra_u = {
816		.ablkcipher = {
817			.min_keysize = AES_MIN_KEY_SIZE,
818			.max_keysize = AES_MAX_KEY_SIZE,
819			.ivsize = AES_BLOCK_SIZE,
820			.setkey = mv_cesa_aes_setkey,
821			.encrypt = mv_cesa_cbc_aes_encrypt,
822			.decrypt = mv_cesa_cbc_aes_decrypt,
823		},
824	},
825};