Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/device.h>
7#include <linux/interrupt.h>
8#include <linux/moduleparam.h>
9#include <linux/types.h>
10#include <crypto/aes.h>
11#include <crypto/internal/des.h>
12#include <crypto/internal/skcipher.h>
13
14#include "cipher.h"
15
16static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
17module_param(aes_sw_max_len, uint, 0644);
18MODULE_PARM_DESC(aes_sw_max_len,
19 "Only use hardware for AES requests larger than this "
20 "[0=always use hardware; anything <16 breaks AES-GCM; default="
21 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
22
23static LIST_HEAD(skcipher_algs);
24
25static void qce_skcipher_done(void *data)
26{
27 struct crypto_async_request *async_req = data;
28 struct skcipher_request *req = skcipher_request_cast(async_req);
29 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
30 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
31 struct qce_device *qce = tmpl->qce;
32 struct qce_result_dump *result_buf = qce->dma.result_buf;
33 enum dma_data_direction dir_src, dir_dst;
34 u32 status;
35 int error;
36 bool diff_dst;
37
38 diff_dst = (req->src != req->dst) ? true : false;
39 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
40 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
41
42 error = qce_dma_terminate_all(&qce->dma);
43 if (error)
44 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
45 error);
46
47 if (diff_dst)
48 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
49 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
50
51 sg_free_table(&rctx->dst_tbl);
52
53 error = qce_check_status(qce, &status);
54 if (error < 0)
55 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
56
57 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
58 qce->async_req_done(tmpl->qce, error);
59}
60
61static int
62qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
63{
64 struct skcipher_request *req = skcipher_request_cast(async_req);
65 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
66 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
67 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
68 struct qce_device *qce = tmpl->qce;
69 enum dma_data_direction dir_src, dir_dst;
70 struct scatterlist *sg;
71 bool diff_dst;
72 gfp_t gfp;
73 int ret;
74
75 rctx->iv = req->iv;
76 rctx->ivsize = crypto_skcipher_ivsize(skcipher);
77 rctx->cryptlen = req->cryptlen;
78
79 diff_dst = (req->src != req->dst) ? true : false;
80 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
81 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
82
83 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
84 if (diff_dst)
85 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
86 else
87 rctx->dst_nents = rctx->src_nents;
88 if (rctx->src_nents < 0) {
89 dev_err(qce->dev, "Invalid numbers of src SG.\n");
90 return rctx->src_nents;
91 }
92 if (rctx->dst_nents < 0) {
93 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
94 return -rctx->dst_nents;
95 }
96
97 rctx->dst_nents += 1;
98
99 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
100 GFP_KERNEL : GFP_ATOMIC;
101
102 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
103 if (ret)
104 return ret;
105
106 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
107
108 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
109 if (IS_ERR(sg)) {
110 ret = PTR_ERR(sg);
111 goto error_free;
112 }
113
114 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
115 QCE_RESULT_BUF_SZ);
116 if (IS_ERR(sg)) {
117 ret = PTR_ERR(sg);
118 goto error_free;
119 }
120
121 sg_mark_end(sg);
122 rctx->dst_sg = rctx->dst_tbl.sgl;
123
124 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
125 if (ret < 0)
126 goto error_free;
127
128 if (diff_dst) {
129 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
130 if (ret < 0)
131 goto error_unmap_dst;
132 rctx->src_sg = req->src;
133 } else {
134 rctx->src_sg = rctx->dst_sg;
135 }
136
137 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
138 rctx->dst_sg, rctx->dst_nents,
139 qce_skcipher_done, async_req);
140 if (ret)
141 goto error_unmap_src;
142
143 qce_dma_issue_pending(&qce->dma);
144
145 ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
146 if (ret)
147 goto error_terminate;
148
149 return 0;
150
151error_terminate:
152 qce_dma_terminate_all(&qce->dma);
153error_unmap_src:
154 if (diff_dst)
155 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
156error_unmap_dst:
157 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
158error_free:
159 sg_free_table(&rctx->dst_tbl);
160 return ret;
161}
162
163static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
164 unsigned int keylen)
165{
166 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
167 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
168 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
169 int ret;
170
171 if (!key || !keylen)
172 return -EINVAL;
173
174 switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
175 case AES_KEYSIZE_128:
176 case AES_KEYSIZE_256:
177 memcpy(ctx->enc_key, key, keylen);
178 break;
179 }
180
181 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
182 if (!ret)
183 ctx->enc_keylen = keylen;
184 return ret;
185}
186
187static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
188 unsigned int keylen)
189{
190 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
191 int err;
192
193 err = verify_skcipher_des_key(ablk, key);
194 if (err)
195 return err;
196
197 ctx->enc_keylen = keylen;
198 memcpy(ctx->enc_key, key, keylen);
199 return 0;
200}
201
202static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
203 unsigned int keylen)
204{
205 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
206 int err;
207
208 err = verify_skcipher_des3_key(ablk, key);
209 if (err)
210 return err;
211
212 ctx->enc_keylen = keylen;
213 memcpy(ctx->enc_key, key, keylen);
214 return 0;
215}
216
217static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
218{
219 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
220 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
221 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
222 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
223 int keylen;
224 int ret;
225
226 rctx->flags = tmpl->alg_flags;
227 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
228 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
229
230 /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
231 * is not a multiple of it; pass such requests to the fallback
232 */
233 if (IS_AES(rctx->flags) &&
234 (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
235 req->cryptlen <= aes_sw_max_len) ||
236 (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
237 req->cryptlen % QCE_SECTOR_SIZE))) {
238 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
239 skcipher_request_set_callback(&rctx->fallback_req,
240 req->base.flags,
241 req->base.complete,
242 req->base.data);
243 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
244 req->dst, req->cryptlen, req->iv);
245 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
246 crypto_skcipher_decrypt(&rctx->fallback_req);
247 return ret;
248 }
249
250 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
251}
252
253static int qce_skcipher_encrypt(struct skcipher_request *req)
254{
255 return qce_skcipher_crypt(req, 1);
256}
257
258static int qce_skcipher_decrypt(struct skcipher_request *req)
259{
260 return qce_skcipher_crypt(req, 0);
261}
262
263static int qce_skcipher_init(struct crypto_skcipher *tfm)
264{
265 /* take the size without the fallback skcipher_request at the end */
266 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
267 fallback_req));
268 return 0;
269}
270
271static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
272{
273 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
274
275 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
276 0, CRYPTO_ALG_NEED_FALLBACK);
277 if (IS_ERR(ctx->fallback))
278 return PTR_ERR(ctx->fallback);
279
280 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
281 crypto_skcipher_reqsize(ctx->fallback));
282 return 0;
283}
284
285static void qce_skcipher_exit(struct crypto_skcipher *tfm)
286{
287 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
288
289 crypto_free_skcipher(ctx->fallback);
290}
291
292struct qce_skcipher_def {
293 unsigned long flags;
294 const char *name;
295 const char *drv_name;
296 unsigned int blocksize;
297 unsigned int chunksize;
298 unsigned int ivsize;
299 unsigned int min_keysize;
300 unsigned int max_keysize;
301};
302
303static const struct qce_skcipher_def skcipher_def[] = {
304 {
305 .flags = QCE_ALG_AES | QCE_MODE_ECB,
306 .name = "ecb(aes)",
307 .drv_name = "ecb-aes-qce",
308 .blocksize = AES_BLOCK_SIZE,
309 .ivsize = AES_BLOCK_SIZE,
310 .min_keysize = AES_MIN_KEY_SIZE,
311 .max_keysize = AES_MAX_KEY_SIZE,
312 },
313 {
314 .flags = QCE_ALG_AES | QCE_MODE_CBC,
315 .name = "cbc(aes)",
316 .drv_name = "cbc-aes-qce",
317 .blocksize = AES_BLOCK_SIZE,
318 .ivsize = AES_BLOCK_SIZE,
319 .min_keysize = AES_MIN_KEY_SIZE,
320 .max_keysize = AES_MAX_KEY_SIZE,
321 },
322 {
323 .flags = QCE_ALG_AES | QCE_MODE_CTR,
324 .name = "ctr(aes)",
325 .drv_name = "ctr-aes-qce",
326 .blocksize = 1,
327 .chunksize = AES_BLOCK_SIZE,
328 .ivsize = AES_BLOCK_SIZE,
329 .min_keysize = AES_MIN_KEY_SIZE,
330 .max_keysize = AES_MAX_KEY_SIZE,
331 },
332 {
333 .flags = QCE_ALG_AES | QCE_MODE_XTS,
334 .name = "xts(aes)",
335 .drv_name = "xts-aes-qce",
336 .blocksize = AES_BLOCK_SIZE,
337 .ivsize = AES_BLOCK_SIZE,
338 .min_keysize = AES_MIN_KEY_SIZE * 2,
339 .max_keysize = AES_MAX_KEY_SIZE * 2,
340 },
341 {
342 .flags = QCE_ALG_DES | QCE_MODE_ECB,
343 .name = "ecb(des)",
344 .drv_name = "ecb-des-qce",
345 .blocksize = DES_BLOCK_SIZE,
346 .ivsize = 0,
347 .min_keysize = DES_KEY_SIZE,
348 .max_keysize = DES_KEY_SIZE,
349 },
350 {
351 .flags = QCE_ALG_DES | QCE_MODE_CBC,
352 .name = "cbc(des)",
353 .drv_name = "cbc-des-qce",
354 .blocksize = DES_BLOCK_SIZE,
355 .ivsize = DES_BLOCK_SIZE,
356 .min_keysize = DES_KEY_SIZE,
357 .max_keysize = DES_KEY_SIZE,
358 },
359 {
360 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
361 .name = "ecb(des3_ede)",
362 .drv_name = "ecb-3des-qce",
363 .blocksize = DES3_EDE_BLOCK_SIZE,
364 .ivsize = 0,
365 .min_keysize = DES3_EDE_KEY_SIZE,
366 .max_keysize = DES3_EDE_KEY_SIZE,
367 },
368 {
369 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
370 .name = "cbc(des3_ede)",
371 .drv_name = "cbc-3des-qce",
372 .blocksize = DES3_EDE_BLOCK_SIZE,
373 .ivsize = DES3_EDE_BLOCK_SIZE,
374 .min_keysize = DES3_EDE_KEY_SIZE,
375 .max_keysize = DES3_EDE_KEY_SIZE,
376 },
377};
378
379static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
380 struct qce_device *qce)
381{
382 struct qce_alg_template *tmpl;
383 struct skcipher_alg *alg;
384 int ret;
385
386 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
387 if (!tmpl)
388 return -ENOMEM;
389
390 alg = &tmpl->alg.skcipher;
391
392 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
393 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
394 def->drv_name);
395
396 alg->base.cra_blocksize = def->blocksize;
397 alg->chunksize = def->chunksize;
398 alg->ivsize = def->ivsize;
399 alg->min_keysize = def->min_keysize;
400 alg->max_keysize = def->max_keysize;
401 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
402 IS_DES(def->flags) ? qce_des_setkey :
403 qce_skcipher_setkey;
404 alg->encrypt = qce_skcipher_encrypt;
405 alg->decrypt = qce_skcipher_decrypt;
406
407 alg->base.cra_priority = 300;
408 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
409 CRYPTO_ALG_ALLOCATES_MEMORY |
410 CRYPTO_ALG_KERN_DRIVER_ONLY;
411 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
412 alg->base.cra_alignmask = 0;
413 alg->base.cra_module = THIS_MODULE;
414
415 if (IS_AES(def->flags)) {
416 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
417 alg->init = qce_skcipher_init_fallback;
418 alg->exit = qce_skcipher_exit;
419 } else {
420 alg->init = qce_skcipher_init;
421 }
422
423 INIT_LIST_HEAD(&tmpl->entry);
424 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
425 tmpl->alg_flags = def->flags;
426 tmpl->qce = qce;
427
428 ret = crypto_register_skcipher(alg);
429 if (ret) {
430 kfree(tmpl);
431 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
432 return ret;
433 }
434
435 list_add_tail(&tmpl->entry, &skcipher_algs);
436 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
437 return 0;
438}
439
440static void qce_skcipher_unregister(struct qce_device *qce)
441{
442 struct qce_alg_template *tmpl, *n;
443
444 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
445 crypto_unregister_skcipher(&tmpl->alg.skcipher);
446 list_del(&tmpl->entry);
447 kfree(tmpl);
448 }
449}
450
451static int qce_skcipher_register(struct qce_device *qce)
452{
453 int ret, i;
454
455 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
456 ret = qce_skcipher_register_one(&skcipher_def[i], qce);
457 if (ret)
458 goto err;
459 }
460
461 return 0;
462err:
463 qce_skcipher_unregister(qce);
464 return ret;
465}
466
467const struct qce_algo_ops skcipher_ops = {
468 .type = CRYPTO_ALG_TYPE_SKCIPHER,
469 .register_algs = qce_skcipher_register,
470 .unregister_algs = qce_skcipher_unregister,
471 .async_req_handle = qce_skcipher_async_req_handle,
472};