Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Cryptographic API.
  4 *
  5 * Support for OMAP AES GCM HW acceleration.
  6 *
  7 * Copyright (c) 2016 Texas Instruments Incorporated
  8 */
  9
 10#include <crypto/aes.h>
 11#include <crypto/engine.h>
 12#include <crypto/gcm.h>
 13#include <crypto/internal/aead.h>
 14#include <crypto/scatterwalk.h>
 15#include <crypto/skcipher.h>
 16#include <linux/errno.h>
 
 17#include <linux/dma-mapping.h>
 18#include <linux/dmaengine.h>
 19#include <linux/interrupt.h>
 20#include <linux/kernel.h>
 21#include <linux/omap-dma.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/scatterlist.h>
 24#include <linux/string.h>
 
 
 
 25
 26#include "omap-crypto.h"
 27#include "omap-aes.h"
 28
 29static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
 30				     struct aead_request *req);
 31
 32static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
 33{
 34	struct aead_request *req = dd->aead_req;
 35
 
 36	dd->in_sg = NULL;
 37	dd->out_sg = NULL;
 38
 39	crypto_finalize_aead_request(dd->engine, req, ret);
 40
 41	pm_runtime_mark_last_busy(dd->dev);
 42	pm_runtime_put_autosuspend(dd->dev);
 43}
 44
 45static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
 46{
 47	u8 *tag;
 48	int alen, clen, i, ret = 0, nsg;
 49	struct omap_aes_reqctx *rctx;
 50
 51	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
 52	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
 53	rctx = aead_request_ctx(dd->aead_req);
 54
 55	nsg = !!(dd->assoc_len && dd->total);
 56
 57	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
 58			       DMA_FROM_DEVICE);
 59	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
 60	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
 61	omap_aes_crypt_dma_stop(dd);
 62
 63	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
 64			    dd->aead_req->assoclen, dd->total,
 65			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
 66
 67	if (dd->flags & FLAGS_ENCRYPT)
 68		scatterwalk_map_and_copy(rctx->auth_tag,
 69					 dd->aead_req->dst,
 70					 dd->total + dd->aead_req->assoclen,
 71					 dd->authsize, 1);
 72
 73	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
 74			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
 75
 76	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
 77			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
 78
 79	if (!(dd->flags & FLAGS_ENCRYPT)) {
 80		tag = (u8 *)rctx->auth_tag;
 81		for (i = 0; i < dd->authsize; i++) {
 82			if (tag[i]) {
 
 83				ret = -EBADMSG;
 84			}
 85		}
 86	}
 87
 88	omap_aes_gcm_finish_req(dd, ret);
 
 89}
 90
 91static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
 92				     struct aead_request *req)
 93{
 94	int alen, clen, cryptlen, assoclen, ret;
 95	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 96	unsigned int authlen = crypto_aead_authsize(aead);
 97	struct scatterlist *tmp, sg_arr[2];
 98	int nsg;
 99	u16 flags;
100
101	assoclen = req->assoclen;
102	cryptlen = req->cryptlen;
103
104	if (dd->flags & FLAGS_RFC4106_GCM)
105		assoclen -= 8;
106
107	if (!(dd->flags & FLAGS_ENCRYPT))
108		cryptlen -= authlen;
109
110	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
111	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
112
113	nsg = !!(assoclen && cryptlen);
114
115	omap_aes_clear_copy_flags(dd);
116
117	sg_init_table(dd->in_sgl, nsg + 1);
118	if (assoclen) {
119		tmp = req->src;
120		ret = omap_crypto_align_sg(&tmp, assoclen,
121					   AES_BLOCK_SIZE, dd->in_sgl,
122					   OMAP_CRYPTO_COPY_DATA |
123					   OMAP_CRYPTO_ZERO_BUF |
124					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
125					   FLAGS_ASSOC_DATA_ST_SHIFT,
126					   &dd->flags);
127		if (ret)
128			return ret;
129	}
130
131	if (cryptlen) {
132		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
133
134		if (nsg)
135			sg_unmark_end(dd->in_sgl);
136
137		ret = omap_crypto_align_sg(&tmp, cryptlen,
138					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
139					   OMAP_CRYPTO_COPY_DATA |
140					   OMAP_CRYPTO_ZERO_BUF |
141					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
142					   FLAGS_IN_DATA_ST_SHIFT,
143					   &dd->flags);
144		if (ret)
145			return ret;
146	}
147
148	dd->in_sg = dd->in_sgl;
149	dd->total = cryptlen;
150	dd->assoc_len = assoclen;
151	dd->authsize = authlen;
152
153	dd->out_sg = req->dst;
154	dd->orig_out = req->dst;
155
156	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
157
158	flags = 0;
159	if (req->src == req->dst || dd->out_sg == sg_arr)
160		flags |= OMAP_CRYPTO_FORCE_COPY;
161
162	if (cryptlen) {
163		ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
164					   AES_BLOCK_SIZE, &dd->out_sgl,
165					   flags,
166					   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
167		if (ret)
168			return ret;
169	}
170
171	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
172	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
173
174	return 0;
175}
176
 
 
 
 
 
 
 
 
 
 
 
177static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
178{
179	struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 
 
 
 
 
 
 
 
 
 
180
181	aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
182	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183}
184
185void omap_aes_gcm_dma_out_callback(void *data)
186{
187	struct omap_aes_dev *dd = data;
188	struct omap_aes_reqctx *rctx;
189	int i, val;
190	u32 *auth_tag, tag[4];
191
192	if (!(dd->flags & FLAGS_ENCRYPT))
193		scatterwalk_map_and_copy(tag, dd->aead_req->src,
194					 dd->total + dd->aead_req->assoclen,
195					 dd->authsize, 0);
196
197	rctx = aead_request_ctx(dd->aead_req);
198	auth_tag = (u32 *)rctx->auth_tag;
199	for (i = 0; i < 4; i++) {
200		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
201		auth_tag[i] = val ^ auth_tag[i];
202		if (!(dd->flags & FLAGS_ENCRYPT))
203			auth_tag[i] = auth_tag[i] ^ tag[i];
204	}
205
206	omap_aes_gcm_done_task(dd);
207}
208
209static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
210				     struct aead_request *req)
211{
 
 
 
 
 
 
 
212	if (req)
213		return crypto_transfer_aead_request_to_engine(dd->engine, req);
 
 
 
 
214
215	return 0;
216}
 
 
 
217
218static int omap_aes_gcm_prepare_req(struct aead_request *req,
219				    struct omap_aes_dev *dd)
220{
221	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
222	struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
223	int err;
 
 
224
 
 
225	dd->aead_req = req;
226
227	rctx->mode &= FLAGS_MODE_MASK;
228	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
229
230	err = omap_aes_gcm_copy_buffers(dd, req);
231	if (err)
232		return err;
233
234	dd->ctx = &ctx->octx;
 
 
 
 
 
 
 
235
236	return omap_aes_write_ctrl(dd);
237}
238
239static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
240{
241	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
242	struct crypto_aead *aead = crypto_aead_reqtfm(req);
243	unsigned int authlen = crypto_aead_authsize(aead);
244	struct omap_aes_dev *dd;
245	__be32 counter = cpu_to_be32(1);
246	int err, assoclen;
247
248	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
249	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
250
251	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
252	if (err)
253		return err;
254
255	if (mode & FLAGS_RFC4106_GCM)
256		assoclen = req->assoclen - 8;
257	else
258		assoclen = req->assoclen;
259	if (assoclen + req->cryptlen == 0) {
260		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
261					 1);
262		return 0;
263	}
264
265	dd = omap_aes_find_dev(rctx);
266	if (!dd)
267		return -ENODEV;
268	rctx->mode = mode;
269
270	return omap_aes_gcm_handle_queue(dd, req);
271}
272
273int omap_aes_gcm_encrypt(struct aead_request *req)
274{
275	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
276
277	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
278	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
279}
280
281int omap_aes_gcm_decrypt(struct aead_request *req)
282{
283	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
284
285	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
286	return omap_aes_gcm_crypt(req, FLAGS_GCM);
287}
288
289int omap_aes_4106gcm_encrypt(struct aead_request *req)
290{
291	struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
292	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
293
294	memcpy(rctx->iv, ctx->octx.nonce, 4);
295	memcpy(rctx->iv + 4, req->iv, 8);
296	return crypto_ipsec_check_assoclen(req->assoclen) ?:
297	       omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
298				  FLAGS_RFC4106_GCM);
299}
300
301int omap_aes_4106gcm_decrypt(struct aead_request *req)
302{
303	struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
304	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
305
306	memcpy(rctx->iv, ctx->octx.nonce, 4);
307	memcpy(rctx->iv + 4, req->iv, 8);
308	return crypto_ipsec_check_assoclen(req->assoclen) ?:
309	       omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
310}
311
312int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
313			unsigned int keylen)
314{
315	struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
316	int ret;
317
318	ret = aes_expandkey(&ctx->actx, key, keylen);
319	if (ret)
320		return ret;
321
322	memcpy(ctx->octx.key, key, keylen);
323	ctx->octx.keylen = keylen;
324
325	return 0;
326}
327
328int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
329			    unsigned int keylen)
330{
331	struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
332	int ret;
333
334	if (keylen < 4)
335		return -EINVAL;
336	keylen -= 4;
337
338	ret = aes_expandkey(&ctx->actx, key, keylen);
339	if (ret)
340		return ret;
341
342	memcpy(ctx->octx.key, key, keylen);
343	memcpy(ctx->octx.nonce, key + keylen, 4);
344	ctx->octx.keylen = keylen;
345
346	return 0;
347}
348
349int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
350{
351	return crypto_gcm_check_authsize(authsize);
352}
353
354int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
355				 unsigned int authsize)
356{
357	return crypto_rfc4106_check_authsize(authsize);
358}
359
360int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
361{
362	struct aead_request *req = container_of(areq, struct aead_request,
363						base);
364	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
365	struct omap_aes_dev *dd = rctx->dd;
366	int ret;
367
368	if (!dd)
369		return -ENODEV;
370
371	ret = omap_aes_gcm_prepare_req(req, dd);
372	if (ret)
373		return ret;
374
375	if (dd->in_sg_len)
376		ret = omap_aes_crypt_dma_start(dd);
377	else
378		omap_aes_gcm_dma_out_callback(dd);
379
380	return ret;
381}
382
383int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
384{
385	crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
386
387	return 0;
388}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Cryptographic API.
  4 *
  5 * Support for OMAP AES GCM HW acceleration.
  6 *
  7 * Copyright (c) 2016 Texas Instruments Incorporated
  8 */
  9
 
 
 
 
 
 
 10#include <linux/errno.h>
 11#include <linux/scatterlist.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/dmaengine.h>
 
 
 14#include <linux/omap-dma.h>
 15#include <linux/interrupt.h>
 16#include <crypto/aes.h>
 17#include <crypto/gcm.h>
 18#include <crypto/scatterwalk.h>
 19#include <crypto/skcipher.h>
 20#include <crypto/internal/aead.h>
 21
 22#include "omap-crypto.h"
 23#include "omap-aes.h"
 24
 25static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
 26				     struct aead_request *req);
 27
 28static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
 29{
 30	struct aead_request *req = dd->aead_req;
 31
 32	dd->flags &= ~FLAGS_BUSY;
 33	dd->in_sg = NULL;
 34	dd->out_sg = NULL;
 35
 36	req->base.complete(&req->base, ret);
 
 
 
 37}
 38
 39static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
 40{
 41	u8 *tag;
 42	int alen, clen, i, ret = 0, nsg;
 43	struct omap_aes_reqctx *rctx;
 44
 45	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
 46	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
 47	rctx = aead_request_ctx(dd->aead_req);
 48
 49	nsg = !!(dd->assoc_len && dd->total);
 50
 51	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
 52			       DMA_FROM_DEVICE);
 53	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
 54	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
 55	omap_aes_crypt_dma_stop(dd);
 56
 57	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
 58			    dd->aead_req->assoclen, dd->total,
 59			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
 60
 61	if (dd->flags & FLAGS_ENCRYPT)
 62		scatterwalk_map_and_copy(rctx->auth_tag,
 63					 dd->aead_req->dst,
 64					 dd->total + dd->aead_req->assoclen,
 65					 dd->authsize, 1);
 66
 67	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
 68			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
 69
 70	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
 71			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
 72
 73	if (!(dd->flags & FLAGS_ENCRYPT)) {
 74		tag = (u8 *)rctx->auth_tag;
 75		for (i = 0; i < dd->authsize; i++) {
 76			if (tag[i]) {
 77				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
 78				ret = -EBADMSG;
 79			}
 80		}
 81	}
 82
 83	omap_aes_gcm_finish_req(dd, ret);
 84	omap_aes_gcm_handle_queue(dd, NULL);
 85}
 86
 87static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
 88				     struct aead_request *req)
 89{
 90	int alen, clen, cryptlen, assoclen, ret;
 91	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 92	unsigned int authlen = crypto_aead_authsize(aead);
 93	struct scatterlist *tmp, sg_arr[2];
 94	int nsg;
 95	u16 flags;
 96
 97	assoclen = req->assoclen;
 98	cryptlen = req->cryptlen;
 99
100	if (dd->flags & FLAGS_RFC4106_GCM)
101		assoclen -= 8;
102
103	if (!(dd->flags & FLAGS_ENCRYPT))
104		cryptlen -= authlen;
105
106	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
107	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
108
109	nsg = !!(assoclen && cryptlen);
110
111	omap_aes_clear_copy_flags(dd);
112
113	sg_init_table(dd->in_sgl, nsg + 1);
114	if (assoclen) {
115		tmp = req->src;
116		ret = omap_crypto_align_sg(&tmp, assoclen,
117					   AES_BLOCK_SIZE, dd->in_sgl,
118					   OMAP_CRYPTO_COPY_DATA |
119					   OMAP_CRYPTO_ZERO_BUF |
120					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
121					   FLAGS_ASSOC_DATA_ST_SHIFT,
122					   &dd->flags);
 
 
123	}
124
125	if (cryptlen) {
126		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
127
 
 
 
128		ret = omap_crypto_align_sg(&tmp, cryptlen,
129					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
130					   OMAP_CRYPTO_COPY_DATA |
131					   OMAP_CRYPTO_ZERO_BUF |
132					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
133					   FLAGS_IN_DATA_ST_SHIFT,
134					   &dd->flags);
 
 
135	}
136
137	dd->in_sg = dd->in_sgl;
138	dd->total = cryptlen;
139	dd->assoc_len = assoclen;
140	dd->authsize = authlen;
141
142	dd->out_sg = req->dst;
143	dd->orig_out = req->dst;
144
145	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
146
147	flags = 0;
148	if (req->src == req->dst || dd->out_sg == sg_arr)
149		flags |= OMAP_CRYPTO_FORCE_COPY;
150
151	ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
152				   AES_BLOCK_SIZE, &dd->out_sgl,
153				   flags,
154				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
155	if (ret)
156		return ret;
 
 
157
158	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
159	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
160
161	return 0;
162}
163
164static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
165{
166	struct omap_aes_gcm_result *res = req->data;
167
168	if (err == -EINPROGRESS)
169		return;
170
171	res->err = err;
172	complete(&res->completion);
173}
174
175static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
176{
177	struct scatterlist iv_sg, tag_sg;
178	struct skcipher_request *sk_req;
179	struct omap_aes_gcm_result result;
180	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
181	int ret = 0;
182
183	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
184	if (!sk_req) {
185		pr_err("skcipher: Failed to allocate request\n");
186		return -ENOMEM;
187	}
188
189	init_completion(&result.completion);
190
191	sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
192	sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
193	skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
194				      omap_aes_gcm_complete, &result);
195	ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
196	skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
197				   NULL);
198	ret = crypto_skcipher_encrypt(sk_req);
199	switch (ret) {
200	case 0:
201		break;
202	case -EINPROGRESS:
203	case -EBUSY:
204		ret = wait_for_completion_interruptible(&result.completion);
205		if (!ret) {
206			ret = result.err;
207			if (!ret) {
208				reinit_completion(&result.completion);
209				break;
210			}
211		}
212		/* fall through */
213	default:
214		pr_err("Encryption of IV failed for GCM mode\n");
215		break;
216	}
217
218	skcipher_request_free(sk_req);
219	return ret;
220}
221
222void omap_aes_gcm_dma_out_callback(void *data)
223{
224	struct omap_aes_dev *dd = data;
225	struct omap_aes_reqctx *rctx;
226	int i, val;
227	u32 *auth_tag, tag[4];
228
229	if (!(dd->flags & FLAGS_ENCRYPT))
230		scatterwalk_map_and_copy(tag, dd->aead_req->src,
231					 dd->total + dd->aead_req->assoclen,
232					 dd->authsize, 0);
233
234	rctx = aead_request_ctx(dd->aead_req);
235	auth_tag = (u32 *)rctx->auth_tag;
236	for (i = 0; i < 4; i++) {
237		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
238		auth_tag[i] = val ^ auth_tag[i];
239		if (!(dd->flags & FLAGS_ENCRYPT))
240			auth_tag[i] = auth_tag[i] ^ tag[i];
241	}
242
243	omap_aes_gcm_done_task(dd);
244}
245
246static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
247				     struct aead_request *req)
248{
249	struct omap_aes_ctx *ctx;
250	struct aead_request *backlog;
251	struct omap_aes_reqctx *rctx;
252	unsigned long flags;
253	int err, ret = 0;
254
255	spin_lock_irqsave(&dd->lock, flags);
256	if (req)
257		ret = aead_enqueue_request(&dd->aead_queue, req);
258	if (dd->flags & FLAGS_BUSY) {
259		spin_unlock_irqrestore(&dd->lock, flags);
260		return ret;
261	}
262
263	backlog = aead_get_backlog(&dd->aead_queue);
264	req = aead_dequeue_request(&dd->aead_queue);
265	if (req)
266		dd->flags |= FLAGS_BUSY;
267	spin_unlock_irqrestore(&dd->lock, flags);
268
269	if (!req)
270		return ret;
271
272	if (backlog)
273		backlog->base.complete(&backlog->base, -EINPROGRESS);
274
275	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
276	rctx = aead_request_ctx(req);
277
278	dd->ctx = ctx;
279	rctx->dd = dd;
280	dd->aead_req = req;
281
282	rctx->mode &= FLAGS_MODE_MASK;
283	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
284
285	err = omap_aes_gcm_copy_buffers(dd, req);
286	if (err)
287		return err;
288
289	err = omap_aes_write_ctrl(dd);
290	if (!err)
291		err = omap_aes_crypt_dma_start(dd);
292
293	if (err) {
294		omap_aes_gcm_finish_req(dd, err);
295		omap_aes_gcm_handle_queue(dd, NULL);
296	}
297
298	return ret;
299}
300
301static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
302{
303	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
304	struct crypto_aead *aead = crypto_aead_reqtfm(req);
305	unsigned int authlen = crypto_aead_authsize(aead);
306	struct omap_aes_dev *dd;
307	__be32 counter = cpu_to_be32(1);
308	int err, assoclen;
309
310	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
311	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
312
313	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
314	if (err)
315		return err;
316
317	if (mode & FLAGS_RFC4106_GCM)
318		assoclen = req->assoclen - 8;
319	else
320		assoclen = req->assoclen;
321	if (assoclen + req->cryptlen == 0) {
322		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
323					 1);
324		return 0;
325	}
326
327	dd = omap_aes_find_dev(rctx);
328	if (!dd)
329		return -ENODEV;
330	rctx->mode = mode;
331
332	return omap_aes_gcm_handle_queue(dd, req);
333}
334
335int omap_aes_gcm_encrypt(struct aead_request *req)
336{
337	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
338
339	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
340	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
341}
342
343int omap_aes_gcm_decrypt(struct aead_request *req)
344{
345	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
346
347	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
348	return omap_aes_gcm_crypt(req, FLAGS_GCM);
349}
350
351int omap_aes_4106gcm_encrypt(struct aead_request *req)
352{
353	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
354	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
355
356	memcpy(rctx->iv, ctx->nonce, 4);
357	memcpy(rctx->iv + 4, req->iv, 8);
358	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
 
359				  FLAGS_RFC4106_GCM);
360}
361
362int omap_aes_4106gcm_decrypt(struct aead_request *req)
363{
364	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
365	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
366
367	memcpy(rctx->iv, ctx->nonce, 4);
368	memcpy(rctx->iv + 4, req->iv, 8);
369	return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
 
370}
371
372int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
373			unsigned int keylen)
374{
375	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
 
376
377	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
378	    keylen != AES_KEYSIZE_256)
379		return -EINVAL;
380
381	memcpy(ctx->key, key, keylen);
382	ctx->keylen = keylen;
383
384	return 0;
385}
386
387int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
388			    unsigned int keylen)
389{
390	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
 
391
392	if (keylen < 4)
393		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
395	keylen -= 4;
396	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
397	    keylen != AES_KEYSIZE_256)
398		return -EINVAL;
 
 
 
 
 
 
 
399
400	memcpy(ctx->key, key, keylen);
401	memcpy(ctx->nonce, key + keylen, 4);
402	ctx->keylen = keylen;
403
404	return 0;
405}