Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for OMAP AES GCM HW acceleration.
6 *
7 * Copyright (c) 2016 Texas Instruments Incorporated
8 */
9
10#include <crypto/aes.h>
11#include <crypto/engine.h>
12#include <crypto/gcm.h>
13#include <crypto/internal/aead.h>
14#include <crypto/scatterwalk.h>
15#include <crypto/skcipher.h>
16#include <linux/errno.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/interrupt.h>
20#include <linux/kernel.h>
21#include <linux/omap-dma.h>
22#include <linux/pm_runtime.h>
23#include <linux/scatterlist.h>
24#include <linux/string.h>
25
26#include "omap-crypto.h"
27#include "omap-aes.h"
28
29static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
30 struct aead_request *req);
31
32static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
33{
34 struct aead_request *req = dd->aead_req;
35
36 dd->in_sg = NULL;
37 dd->out_sg = NULL;
38
39 crypto_finalize_aead_request(dd->engine, req, ret);
40
41 pm_runtime_mark_last_busy(dd->dev);
42 pm_runtime_put_autosuspend(dd->dev);
43}
44
45static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
46{
47 u8 *tag;
48 int alen, clen, i, ret = 0, nsg;
49 struct omap_aes_reqctx *rctx;
50
51 alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
52 clen = ALIGN(dd->total, AES_BLOCK_SIZE);
53 rctx = aead_request_ctx(dd->aead_req);
54
55 nsg = !!(dd->assoc_len && dd->total);
56
57 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
58 DMA_FROM_DEVICE);
59 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
60 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
61 omap_aes_crypt_dma_stop(dd);
62
63 omap_crypto_cleanup(dd->out_sg, dd->orig_out,
64 dd->aead_req->assoclen, dd->total,
65 FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
66
67 if (dd->flags & FLAGS_ENCRYPT)
68 scatterwalk_map_and_copy(rctx->auth_tag,
69 dd->aead_req->dst,
70 dd->total + dd->aead_req->assoclen,
71 dd->authsize, 1);
72
73 omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
74 FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
75
76 omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
77 FLAGS_IN_DATA_ST_SHIFT, dd->flags);
78
79 if (!(dd->flags & FLAGS_ENCRYPT)) {
80 tag = (u8 *)rctx->auth_tag;
81 for (i = 0; i < dd->authsize; i++) {
82 if (tag[i]) {
83 ret = -EBADMSG;
84 }
85 }
86 }
87
88 omap_aes_gcm_finish_req(dd, ret);
89}
90
91static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
92 struct aead_request *req)
93{
94 int alen, clen, cryptlen, assoclen, ret;
95 struct crypto_aead *aead = crypto_aead_reqtfm(req);
96 unsigned int authlen = crypto_aead_authsize(aead);
97 struct scatterlist *tmp, sg_arr[2];
98 int nsg;
99 u16 flags;
100
101 assoclen = req->assoclen;
102 cryptlen = req->cryptlen;
103
104 if (dd->flags & FLAGS_RFC4106_GCM)
105 assoclen -= 8;
106
107 if (!(dd->flags & FLAGS_ENCRYPT))
108 cryptlen -= authlen;
109
110 alen = ALIGN(assoclen, AES_BLOCK_SIZE);
111 clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
112
113 nsg = !!(assoclen && cryptlen);
114
115 omap_aes_clear_copy_flags(dd);
116
117 sg_init_table(dd->in_sgl, nsg + 1);
118 if (assoclen) {
119 tmp = req->src;
120 ret = omap_crypto_align_sg(&tmp, assoclen,
121 AES_BLOCK_SIZE, dd->in_sgl,
122 OMAP_CRYPTO_COPY_DATA |
123 OMAP_CRYPTO_ZERO_BUF |
124 OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
125 FLAGS_ASSOC_DATA_ST_SHIFT,
126 &dd->flags);
127 if (ret)
128 return ret;
129 }
130
131 if (cryptlen) {
132 tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
133
134 if (nsg)
135 sg_unmark_end(dd->in_sgl);
136
137 ret = omap_crypto_align_sg(&tmp, cryptlen,
138 AES_BLOCK_SIZE, &dd->in_sgl[nsg],
139 OMAP_CRYPTO_COPY_DATA |
140 OMAP_CRYPTO_ZERO_BUF |
141 OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
142 FLAGS_IN_DATA_ST_SHIFT,
143 &dd->flags);
144 if (ret)
145 return ret;
146 }
147
148 dd->in_sg = dd->in_sgl;
149 dd->total = cryptlen;
150 dd->assoc_len = assoclen;
151 dd->authsize = authlen;
152
153 dd->out_sg = req->dst;
154 dd->orig_out = req->dst;
155
156 dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, req->assoclen);
157
158 flags = 0;
159 if (req->src == req->dst || dd->out_sg == sg_arr)
160 flags |= OMAP_CRYPTO_FORCE_COPY;
161
162 if (cryptlen) {
163 ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
164 AES_BLOCK_SIZE, &dd->out_sgl,
165 flags,
166 FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
167 if (ret)
168 return ret;
169 }
170
171 dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
172 dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
173
174 return 0;
175}
176
177static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
178{
179 struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
180
181 aes_encrypt(&ctx->actx, (u8 *)tag, (u8 *)iv);
182 return 0;
183}
184
185void omap_aes_gcm_dma_out_callback(void *data)
186{
187 struct omap_aes_dev *dd = data;
188 struct omap_aes_reqctx *rctx;
189 int i, val;
190 u32 *auth_tag, tag[4];
191
192 if (!(dd->flags & FLAGS_ENCRYPT))
193 scatterwalk_map_and_copy(tag, dd->aead_req->src,
194 dd->total + dd->aead_req->assoclen,
195 dd->authsize, 0);
196
197 rctx = aead_request_ctx(dd->aead_req);
198 auth_tag = (u32 *)rctx->auth_tag;
199 for (i = 0; i < 4; i++) {
200 val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
201 auth_tag[i] = val ^ auth_tag[i];
202 if (!(dd->flags & FLAGS_ENCRYPT))
203 auth_tag[i] = auth_tag[i] ^ tag[i];
204 }
205
206 omap_aes_gcm_done_task(dd);
207}
208
209static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
210 struct aead_request *req)
211{
212 if (req)
213 return crypto_transfer_aead_request_to_engine(dd->engine, req);
214
215 return 0;
216}
217
218static int omap_aes_gcm_prepare_req(struct aead_request *req,
219 struct omap_aes_dev *dd)
220{
221 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
222 struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
223 int err;
224
225 dd->aead_req = req;
226
227 rctx->mode &= FLAGS_MODE_MASK;
228 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
229
230 err = omap_aes_gcm_copy_buffers(dd, req);
231 if (err)
232 return err;
233
234 dd->ctx = &ctx->octx;
235
236 return omap_aes_write_ctrl(dd);
237}
238
239static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
240{
241 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
242 struct crypto_aead *aead = crypto_aead_reqtfm(req);
243 unsigned int authlen = crypto_aead_authsize(aead);
244 struct omap_aes_dev *dd;
245 __be32 counter = cpu_to_be32(1);
246 int err, assoclen;
247
248 memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
249 memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
250
251 err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
252 if (err)
253 return err;
254
255 if (mode & FLAGS_RFC4106_GCM)
256 assoclen = req->assoclen - 8;
257 else
258 assoclen = req->assoclen;
259 if (assoclen + req->cryptlen == 0) {
260 scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
261 1);
262 return 0;
263 }
264
265 dd = omap_aes_find_dev(rctx);
266 if (!dd)
267 return -ENODEV;
268 rctx->mode = mode;
269
270 return omap_aes_gcm_handle_queue(dd, req);
271}
272
273int omap_aes_gcm_encrypt(struct aead_request *req)
274{
275 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
276
277 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
278 return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
279}
280
281int omap_aes_gcm_decrypt(struct aead_request *req)
282{
283 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
284
285 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
286 return omap_aes_gcm_crypt(req, FLAGS_GCM);
287}
288
289int omap_aes_4106gcm_encrypt(struct aead_request *req)
290{
291 struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
292 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
293
294 memcpy(rctx->iv, ctx->octx.nonce, 4);
295 memcpy(rctx->iv + 4, req->iv, 8);
296 return crypto_ipsec_check_assoclen(req->assoclen) ?:
297 omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
298 FLAGS_RFC4106_GCM);
299}
300
301int omap_aes_4106gcm_decrypt(struct aead_request *req)
302{
303 struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
304 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
305
306 memcpy(rctx->iv, ctx->octx.nonce, 4);
307 memcpy(rctx->iv + 4, req->iv, 8);
308 return crypto_ipsec_check_assoclen(req->assoclen) ?:
309 omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
310}
311
312int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
313 unsigned int keylen)
314{
315 struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
316 int ret;
317
318 ret = aes_expandkey(&ctx->actx, key, keylen);
319 if (ret)
320 return ret;
321
322 memcpy(ctx->octx.key, key, keylen);
323 ctx->octx.keylen = keylen;
324
325 return 0;
326}
327
328int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
329 unsigned int keylen)
330{
331 struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
332 int ret;
333
334 if (keylen < 4)
335 return -EINVAL;
336 keylen -= 4;
337
338 ret = aes_expandkey(&ctx->actx, key, keylen);
339 if (ret)
340 return ret;
341
342 memcpy(ctx->octx.key, key, keylen);
343 memcpy(ctx->octx.nonce, key + keylen, 4);
344 ctx->octx.keylen = keylen;
345
346 return 0;
347}
348
349int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
350{
351 return crypto_gcm_check_authsize(authsize);
352}
353
354int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
355 unsigned int authsize)
356{
357 return crypto_rfc4106_check_authsize(authsize);
358}
359
360int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
361{
362 struct aead_request *req = container_of(areq, struct aead_request,
363 base);
364 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
365 struct omap_aes_dev *dd = rctx->dd;
366 int ret;
367
368 if (!dd)
369 return -ENODEV;
370
371 ret = omap_aes_gcm_prepare_req(req, dd);
372 if (ret)
373 return ret;
374
375 if (dd->in_sg_len)
376 ret = omap_aes_crypt_dma_start(dd);
377 else
378 omap_aes_gcm_dma_out_callback(dd);
379
380 return ret;
381}
382
383int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
384{
385 crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
386
387 return 0;
388}
1/*
2 * Cryptographic API.
3 *
4 * Support for OMAP AES GCM HW acceleration.
5 *
6 * Copyright (c) 2016 Texas Instruments Incorporated
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 */
13
14#include <linux/errno.h>
15#include <linux/scatterlist.h>
16#include <linux/dma-mapping.h>
17#include <linux/dmaengine.h>
18#include <linux/omap-dma.h>
19#include <linux/interrupt.h>
20#include <crypto/aes.h>
21#include <crypto/gcm.h>
22#include <crypto/scatterwalk.h>
23#include <crypto/skcipher.h>
24#include <crypto/internal/aead.h>
25
26#include "omap-crypto.h"
27#include "omap-aes.h"
28
29static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
30 struct aead_request *req);
31
32static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
33{
34 struct aead_request *req = dd->aead_req;
35
36 dd->flags &= ~FLAGS_BUSY;
37 dd->in_sg = NULL;
38 dd->out_sg = NULL;
39
40 req->base.complete(&req->base, ret);
41}
42
43static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
44{
45 u8 *tag;
46 int alen, clen, i, ret = 0, nsg;
47 struct omap_aes_reqctx *rctx;
48
49 alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
50 clen = ALIGN(dd->total, AES_BLOCK_SIZE);
51 rctx = aead_request_ctx(dd->aead_req);
52
53 nsg = !!(dd->assoc_len && dd->total);
54
55 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
56 DMA_FROM_DEVICE);
57 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
58 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
59 omap_aes_crypt_dma_stop(dd);
60
61 omap_crypto_cleanup(dd->out_sg, dd->orig_out,
62 dd->aead_req->assoclen, dd->total,
63 FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
64
65 if (dd->flags & FLAGS_ENCRYPT)
66 scatterwalk_map_and_copy(rctx->auth_tag,
67 dd->aead_req->dst,
68 dd->total + dd->aead_req->assoclen,
69 dd->authsize, 1);
70
71 omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
72 FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
73
74 omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
75 FLAGS_IN_DATA_ST_SHIFT, dd->flags);
76
77 if (!(dd->flags & FLAGS_ENCRYPT)) {
78 tag = (u8 *)rctx->auth_tag;
79 for (i = 0; i < dd->authsize; i++) {
80 if (tag[i]) {
81 dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
82 ret = -EBADMSG;
83 }
84 }
85 }
86
87 omap_aes_gcm_finish_req(dd, ret);
88 omap_aes_gcm_handle_queue(dd, NULL);
89}
90
91static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
92 struct aead_request *req)
93{
94 int alen, clen, cryptlen, assoclen, ret;
95 struct crypto_aead *aead = crypto_aead_reqtfm(req);
96 unsigned int authlen = crypto_aead_authsize(aead);
97 struct scatterlist *tmp, sg_arr[2];
98 int nsg;
99 u16 flags;
100
101 assoclen = req->assoclen;
102 cryptlen = req->cryptlen;
103
104 if (dd->flags & FLAGS_RFC4106_GCM)
105 assoclen -= 8;
106
107 if (!(dd->flags & FLAGS_ENCRYPT))
108 cryptlen -= authlen;
109
110 alen = ALIGN(assoclen, AES_BLOCK_SIZE);
111 clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
112
113 nsg = !!(assoclen && cryptlen);
114
115 omap_aes_clear_copy_flags(dd);
116
117 sg_init_table(dd->in_sgl, nsg + 1);
118 if (assoclen) {
119 tmp = req->src;
120 ret = omap_crypto_align_sg(&tmp, assoclen,
121 AES_BLOCK_SIZE, dd->in_sgl,
122 OMAP_CRYPTO_COPY_DATA |
123 OMAP_CRYPTO_ZERO_BUF |
124 OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
125 FLAGS_ASSOC_DATA_ST_SHIFT,
126 &dd->flags);
127 }
128
129 if (cryptlen) {
130 tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
131
132 ret = omap_crypto_align_sg(&tmp, cryptlen,
133 AES_BLOCK_SIZE, &dd->in_sgl[nsg],
134 OMAP_CRYPTO_COPY_DATA |
135 OMAP_CRYPTO_ZERO_BUF |
136 OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
137 FLAGS_IN_DATA_ST_SHIFT,
138 &dd->flags);
139 }
140
141 dd->in_sg = dd->in_sgl;
142 dd->total = cryptlen;
143 dd->assoc_len = assoclen;
144 dd->authsize = authlen;
145
146 dd->out_sg = req->dst;
147 dd->orig_out = req->dst;
148
149 dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
150
151 flags = 0;
152 if (req->src == req->dst || dd->out_sg == sg_arr)
153 flags |= OMAP_CRYPTO_FORCE_COPY;
154
155 ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
156 AES_BLOCK_SIZE, &dd->out_sgl,
157 flags,
158 FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
159 if (ret)
160 return ret;
161
162 dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
163 dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
164
165 return 0;
166}
167
168static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
169{
170 struct omap_aes_gcm_result *res = req->data;
171
172 if (err == -EINPROGRESS)
173 return;
174
175 res->err = err;
176 complete(&res->completion);
177}
178
179static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
180{
181 struct scatterlist iv_sg, tag_sg;
182 struct skcipher_request *sk_req;
183 struct omap_aes_gcm_result result;
184 struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
185 int ret = 0;
186
187 sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
188 if (!sk_req) {
189 pr_err("skcipher: Failed to allocate request\n");
190 return -ENOMEM;
191 }
192
193 init_completion(&result.completion);
194
195 sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
196 sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
197 skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
198 omap_aes_gcm_complete, &result);
199 ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
200 skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
201 NULL);
202 ret = crypto_skcipher_encrypt(sk_req);
203 switch (ret) {
204 case 0:
205 break;
206 case -EINPROGRESS:
207 case -EBUSY:
208 ret = wait_for_completion_interruptible(&result.completion);
209 if (!ret) {
210 ret = result.err;
211 if (!ret) {
212 reinit_completion(&result.completion);
213 break;
214 }
215 }
216 /* fall through */
217 default:
218 pr_err("Encryption of IV failed for GCM mode\n");
219 break;
220 }
221
222 skcipher_request_free(sk_req);
223 return ret;
224}
225
226void omap_aes_gcm_dma_out_callback(void *data)
227{
228 struct omap_aes_dev *dd = data;
229 struct omap_aes_reqctx *rctx;
230 int i, val;
231 u32 *auth_tag, tag[4];
232
233 if (!(dd->flags & FLAGS_ENCRYPT))
234 scatterwalk_map_and_copy(tag, dd->aead_req->src,
235 dd->total + dd->aead_req->assoclen,
236 dd->authsize, 0);
237
238 rctx = aead_request_ctx(dd->aead_req);
239 auth_tag = (u32 *)rctx->auth_tag;
240 for (i = 0; i < 4; i++) {
241 val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
242 auth_tag[i] = val ^ auth_tag[i];
243 if (!(dd->flags & FLAGS_ENCRYPT))
244 auth_tag[i] = auth_tag[i] ^ tag[i];
245 }
246
247 omap_aes_gcm_done_task(dd);
248}
249
250static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
251 struct aead_request *req)
252{
253 struct omap_aes_ctx *ctx;
254 struct aead_request *backlog;
255 struct omap_aes_reqctx *rctx;
256 unsigned long flags;
257 int err, ret = 0;
258
259 spin_lock_irqsave(&dd->lock, flags);
260 if (req)
261 ret = aead_enqueue_request(&dd->aead_queue, req);
262 if (dd->flags & FLAGS_BUSY) {
263 spin_unlock_irqrestore(&dd->lock, flags);
264 return ret;
265 }
266
267 backlog = aead_get_backlog(&dd->aead_queue);
268 req = aead_dequeue_request(&dd->aead_queue);
269 if (req)
270 dd->flags |= FLAGS_BUSY;
271 spin_unlock_irqrestore(&dd->lock, flags);
272
273 if (!req)
274 return ret;
275
276 if (backlog)
277 backlog->base.complete(&backlog->base, -EINPROGRESS);
278
279 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
280 rctx = aead_request_ctx(req);
281
282 dd->ctx = ctx;
283 rctx->dd = dd;
284 dd->aead_req = req;
285
286 rctx->mode &= FLAGS_MODE_MASK;
287 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
288
289 err = omap_aes_gcm_copy_buffers(dd, req);
290 if (err)
291 return err;
292
293 err = omap_aes_write_ctrl(dd);
294 if (!err)
295 err = omap_aes_crypt_dma_start(dd);
296
297 if (err) {
298 omap_aes_gcm_finish_req(dd, err);
299 omap_aes_gcm_handle_queue(dd, NULL);
300 }
301
302 return ret;
303}
304
305static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
306{
307 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
308 struct crypto_aead *aead = crypto_aead_reqtfm(req);
309 unsigned int authlen = crypto_aead_authsize(aead);
310 struct omap_aes_dev *dd;
311 __be32 counter = cpu_to_be32(1);
312 int err, assoclen;
313
314 memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
315 memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
316
317 err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
318 if (err)
319 return err;
320
321 if (mode & FLAGS_RFC4106_GCM)
322 assoclen = req->assoclen - 8;
323 else
324 assoclen = req->assoclen;
325 if (assoclen + req->cryptlen == 0) {
326 scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
327 1);
328 return 0;
329 }
330
331 dd = omap_aes_find_dev(rctx);
332 if (!dd)
333 return -ENODEV;
334 rctx->mode = mode;
335
336 return omap_aes_gcm_handle_queue(dd, req);
337}
338
339int omap_aes_gcm_encrypt(struct aead_request *req)
340{
341 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
342
343 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
344 return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
345}
346
347int omap_aes_gcm_decrypt(struct aead_request *req)
348{
349 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
350
351 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
352 return omap_aes_gcm_crypt(req, FLAGS_GCM);
353}
354
355int omap_aes_4106gcm_encrypt(struct aead_request *req)
356{
357 struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
358 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
359
360 memcpy(rctx->iv, ctx->nonce, 4);
361 memcpy(rctx->iv + 4, req->iv, 8);
362 return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
363 FLAGS_RFC4106_GCM);
364}
365
366int omap_aes_4106gcm_decrypt(struct aead_request *req)
367{
368 struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
369 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
370
371 memcpy(rctx->iv, ctx->nonce, 4);
372 memcpy(rctx->iv + 4, req->iv, 8);
373 return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
374}
375
376int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
377 unsigned int keylen)
378{
379 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
380
381 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
382 keylen != AES_KEYSIZE_256)
383 return -EINVAL;
384
385 memcpy(ctx->key, key, keylen);
386 ctx->keylen = keylen;
387
388 return 0;
389}
390
391int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
392 unsigned int keylen)
393{
394 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
395
396 if (keylen < 4)
397 return -EINVAL;
398
399 keylen -= 4;
400 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
401 keylen != AES_KEYSIZE_256)
402 return -EINVAL;
403
404 memcpy(ctx->key, key, keylen);
405 memcpy(ctx->nonce, key + keylen, 4);
406 ctx->keylen = keylen;
407
408 return 0;
409}