Loading...
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/skbuff.h>
48#include <linux/rtnetlink.h>
49#include <linux/highmem.h>
50#include <linux/scatterlist.h>
51
52#include <crypto/aes.h>
53#include <crypto/algapi.h>
54#include <crypto/hash.h>
55#include <crypto/gcm.h>
56#include <crypto/sha.h>
57#include <crypto/authenc.h>
58#include <crypto/ctr.h>
59#include <crypto/gf128mul.h>
60#include <crypto/internal/aead.h>
61#include <crypto/null.h>
62#include <crypto/internal/skcipher.h>
63#include <crypto/aead.h>
64#include <crypto/scatterwalk.h>
65#include <crypto/internal/hash.h>
66
67#include "t4fw_api.h"
68#include "t4_msg.h"
69#include "chcr_core.h"
70#include "chcr_algo.h"
71#include "chcr_crypto.h"
72
73#define IV AES_BLOCK_SIZE
74
75static unsigned int sgl_ent_len[] = {
76 0, 0, 16, 24, 40, 48, 64, 72, 88,
77 96, 112, 120, 136, 144, 160, 168, 184,
78 192, 208, 216, 232, 240, 256, 264, 280,
79 288, 304, 312, 328, 336, 352, 360, 376
80};
81
82static unsigned int dsgl_ent_len[] = {
83 0, 32, 32, 48, 48, 64, 64, 80, 80,
84 112, 112, 128, 128, 144, 144, 160, 160,
85 192, 192, 208, 208, 224, 224, 240, 240,
86 272, 272, 288, 288, 304, 304, 320, 320
87};
88
89static u32 round_constant[11] = {
90 0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 0x1B000000, 0x36000000, 0x6C000000
93};
94
95static int chcr_handle_cipher_resp(struct skcipher_request *req,
96 unsigned char *input, int err);
97
98static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99{
100 return ctx->crypto_ctx->aeadctx;
101}
102
103static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104{
105 return ctx->crypto_ctx->ablkctx;
106}
107
108static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109{
110 return ctx->crypto_ctx->hmacctx;
111}
112
113static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114{
115 return gctx->ctx->gcm;
116}
117
118static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119{
120 return gctx->ctx->authenc;
121}
122
123static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124{
125 return container_of(ctx->dev, struct uld_ctx, dev);
126}
127
128static inline int is_ofld_imm(const struct sk_buff *skb)
129{
130 return (skb->len <= SGE_MAX_WR_LEN);
131}
132
133static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
134{
135 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
136}
137
138static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
139 unsigned int entlen,
140 unsigned int skip)
141{
142 int nents = 0;
143 unsigned int less;
144 unsigned int skip_len = 0;
145
146 while (sg && skip) {
147 if (sg_dma_len(sg) <= skip) {
148 skip -= sg_dma_len(sg);
149 skip_len = 0;
150 sg = sg_next(sg);
151 } else {
152 skip_len = skip;
153 skip = 0;
154 }
155 }
156
157 while (sg && reqlen) {
158 less = min(reqlen, sg_dma_len(sg) - skip_len);
159 nents += DIV_ROUND_UP(less, entlen);
160 reqlen -= less;
161 skip_len = 0;
162 sg = sg_next(sg);
163 }
164 return nents;
165}
166
167static inline int get_aead_subtype(struct crypto_aead *aead)
168{
169 struct aead_alg *alg = crypto_aead_alg(aead);
170 struct chcr_alg_template *chcr_crypto_alg =
171 container_of(alg, struct chcr_alg_template, alg.aead);
172 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
173}
174
175void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
176{
177 u8 temp[SHA512_DIGEST_SIZE];
178 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179 int authsize = crypto_aead_authsize(tfm);
180 struct cpl_fw6_pld *fw6_pld;
181 int cmp = 0;
182
183 fw6_pld = (struct cpl_fw6_pld *)input;
184 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
185 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
186 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
187 } else {
188
189 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
190 authsize, req->assoclen +
191 req->cryptlen - authsize);
192 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
193 }
194 if (cmp)
195 *err = -EBADMSG;
196 else
197 *err = 0;
198}
199
200static int chcr_inc_wrcount(struct chcr_dev *dev)
201{
202 if (dev->state == CHCR_DETACH)
203 return 1;
204 atomic_inc(&dev->inflight);
205 return 0;
206}
207
208static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209{
210 atomic_dec(&dev->inflight);
211}
212
213static inline int chcr_handle_aead_resp(struct aead_request *req,
214 unsigned char *input,
215 int err)
216{
217 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
218 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219 struct chcr_dev *dev = a_ctx(tfm)->dev;
220
221 chcr_aead_common_exit(req);
222 if (reqctx->verify == VERIFY_SW) {
223 chcr_verify_tag(req, input, &err);
224 reqctx->verify = VERIFY_HW;
225 }
226 chcr_dec_wrcount(dev);
227 req->base.complete(&req->base, err);
228
229 return err;
230}
231
232static void get_aes_decrypt_key(unsigned char *dec_key,
233 const unsigned char *key,
234 unsigned int keylength)
235{
236 u32 temp;
237 u32 w_ring[MAX_NK];
238 int i, j, k;
239 u8 nr, nk;
240
241 switch (keylength) {
242 case AES_KEYLENGTH_128BIT:
243 nk = KEYLENGTH_4BYTES;
244 nr = NUMBER_OF_ROUNDS_10;
245 break;
246 case AES_KEYLENGTH_192BIT:
247 nk = KEYLENGTH_6BYTES;
248 nr = NUMBER_OF_ROUNDS_12;
249 break;
250 case AES_KEYLENGTH_256BIT:
251 nk = KEYLENGTH_8BYTES;
252 nr = NUMBER_OF_ROUNDS_14;
253 break;
254 default:
255 return;
256 }
257 for (i = 0; i < nk; i++)
258 w_ring[i] = get_unaligned_be32(&key[i * 4]);
259
260 i = 0;
261 temp = w_ring[nk - 1];
262 while (i + nk < (nr + 1) * 4) {
263 if (!(i % nk)) {
264 /* RotWord(temp) */
265 temp = (temp << 8) | (temp >> 24);
266 temp = aes_ks_subword(temp);
267 temp ^= round_constant[i / nk];
268 } else if (nk == 8 && (i % 4 == 0)) {
269 temp = aes_ks_subword(temp);
270 }
271 w_ring[i % nk] ^= temp;
272 temp = w_ring[i % nk];
273 i++;
274 }
275 i--;
276 for (k = 0, j = i % nk; k < nk; k++) {
277 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
278 j--;
279 if (j < 0)
280 j += nk;
281 }
282}
283
284static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
285{
286 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
287
288 switch (ds) {
289 case SHA1_DIGEST_SIZE:
290 base_hash = crypto_alloc_shash("sha1", 0, 0);
291 break;
292 case SHA224_DIGEST_SIZE:
293 base_hash = crypto_alloc_shash("sha224", 0, 0);
294 break;
295 case SHA256_DIGEST_SIZE:
296 base_hash = crypto_alloc_shash("sha256", 0, 0);
297 break;
298 case SHA384_DIGEST_SIZE:
299 base_hash = crypto_alloc_shash("sha384", 0, 0);
300 break;
301 case SHA512_DIGEST_SIZE:
302 base_hash = crypto_alloc_shash("sha512", 0, 0);
303 break;
304 }
305
306 return base_hash;
307}
308
309static int chcr_compute_partial_hash(struct shash_desc *desc,
310 char *iopad, char *result_hash,
311 int digest_size)
312{
313 struct sha1_state sha1_st;
314 struct sha256_state sha256_st;
315 struct sha512_state sha512_st;
316 int error;
317
318 if (digest_size == SHA1_DIGEST_SIZE) {
319 error = crypto_shash_init(desc) ?:
320 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
321 crypto_shash_export(desc, (void *)&sha1_st);
322 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
323 } else if (digest_size == SHA224_DIGEST_SIZE) {
324 error = crypto_shash_init(desc) ?:
325 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
326 crypto_shash_export(desc, (void *)&sha256_st);
327 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
328
329 } else if (digest_size == SHA256_DIGEST_SIZE) {
330 error = crypto_shash_init(desc) ?:
331 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 crypto_shash_export(desc, (void *)&sha256_st);
333 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334
335 } else if (digest_size == SHA384_DIGEST_SIZE) {
336 error = crypto_shash_init(desc) ?:
337 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
338 crypto_shash_export(desc, (void *)&sha512_st);
339 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
340
341 } else if (digest_size == SHA512_DIGEST_SIZE) {
342 error = crypto_shash_init(desc) ?:
343 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 crypto_shash_export(desc, (void *)&sha512_st);
345 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 } else {
347 error = -EINVAL;
348 pr_err("Unknown digest size %d\n", digest_size);
349 }
350 return error;
351}
352
353static void chcr_change_order(char *buf, int ds)
354{
355 int i;
356
357 if (ds == SHA512_DIGEST_SIZE) {
358 for (i = 0; i < (ds / sizeof(u64)); i++)
359 *((__be64 *)buf + i) =
360 cpu_to_be64(*((u64 *)buf + i));
361 } else {
362 for (i = 0; i < (ds / sizeof(u32)); i++)
363 *((__be32 *)buf + i) =
364 cpu_to_be32(*((u32 *)buf + i));
365 }
366}
367
368static inline int is_hmac(struct crypto_tfm *tfm)
369{
370 struct crypto_alg *alg = tfm->__crt_alg;
371 struct chcr_alg_template *chcr_crypto_alg =
372 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
373 alg.hash);
374 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
375 return 1;
376 return 0;
377}
378
379static inline void dsgl_walk_init(struct dsgl_walk *walk,
380 struct cpl_rx_phys_dsgl *dsgl)
381{
382 walk->dsgl = dsgl;
383 walk->nents = 0;
384 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
385}
386
387static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
388 int pci_chan_id)
389{
390 struct cpl_rx_phys_dsgl *phys_cpl;
391
392 phys_cpl = walk->dsgl;
393
394 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
396 phys_cpl->pcirlxorder_to_noofsgentr =
397 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401 CPL_RX_PHYS_DSGL_DCAID_V(0) |
402 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
403 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404 phys_cpl->rss_hdr_int.qid = htons(qid);
405 phys_cpl->rss_hdr_int.hash_val = 0;
406 phys_cpl->rss_hdr_int.channel = pci_chan_id;
407}
408
409static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
410 size_t size,
411 dma_addr_t addr)
412{
413 int j;
414
415 if (!size)
416 return;
417 j = walk->nents;
418 walk->to->len[j % 8] = htons(size);
419 walk->to->addr[j % 8] = cpu_to_be64(addr);
420 j++;
421 if ((j % 8) == 0)
422 walk->to++;
423 walk->nents = j;
424}
425
426static void dsgl_walk_add_sg(struct dsgl_walk *walk,
427 struct scatterlist *sg,
428 unsigned int slen,
429 unsigned int skip)
430{
431 int skip_len = 0;
432 unsigned int left_size = slen, len = 0;
433 unsigned int j = walk->nents;
434 int offset, ent_len;
435
436 if (!slen)
437 return;
438 while (sg && skip) {
439 if (sg_dma_len(sg) <= skip) {
440 skip -= sg_dma_len(sg);
441 skip_len = 0;
442 sg = sg_next(sg);
443 } else {
444 skip_len = skip;
445 skip = 0;
446 }
447 }
448
449 while (left_size && sg) {
450 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
451 offset = 0;
452 while (len) {
453 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
454 walk->to->len[j % 8] = htons(ent_len);
455 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
456 offset + skip_len);
457 offset += ent_len;
458 len -= ent_len;
459 j++;
460 if ((j % 8) == 0)
461 walk->to++;
462 }
463 walk->last_sg = sg;
464 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
465 skip_len) + skip_len;
466 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
467 skip_len = 0;
468 sg = sg_next(sg);
469 }
470 walk->nents = j;
471}
472
473static inline void ulptx_walk_init(struct ulptx_walk *walk,
474 struct ulptx_sgl *ulp)
475{
476 walk->sgl = ulp;
477 walk->nents = 0;
478 walk->pair_idx = 0;
479 walk->pair = ulp->sge;
480 walk->last_sg = NULL;
481 walk->last_sg_len = 0;
482}
483
484static inline void ulptx_walk_end(struct ulptx_walk *walk)
485{
486 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
487 ULPTX_NSGE_V(walk->nents));
488}
489
490
491static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
492 size_t size,
493 dma_addr_t addr)
494{
495 if (!size)
496 return;
497
498 if (walk->nents == 0) {
499 walk->sgl->len0 = cpu_to_be32(size);
500 walk->sgl->addr0 = cpu_to_be64(addr);
501 } else {
502 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
503 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
504 walk->pair_idx = !walk->pair_idx;
505 if (!walk->pair_idx)
506 walk->pair++;
507 }
508 walk->nents++;
509}
510
511static void ulptx_walk_add_sg(struct ulptx_walk *walk,
512 struct scatterlist *sg,
513 unsigned int len,
514 unsigned int skip)
515{
516 int small;
517 int skip_len = 0;
518 unsigned int sgmin;
519
520 if (!len)
521 return;
522 while (sg && skip) {
523 if (sg_dma_len(sg) <= skip) {
524 skip -= sg_dma_len(sg);
525 skip_len = 0;
526 sg = sg_next(sg);
527 } else {
528 skip_len = skip;
529 skip = 0;
530 }
531 }
532 WARN(!sg, "SG should not be null here\n");
533 if (sg && (walk->nents == 0)) {
534 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
535 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
536 walk->sgl->len0 = cpu_to_be32(sgmin);
537 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
538 walk->nents++;
539 len -= sgmin;
540 walk->last_sg = sg;
541 walk->last_sg_len = sgmin + skip_len;
542 skip_len += sgmin;
543 if (sg_dma_len(sg) == skip_len) {
544 sg = sg_next(sg);
545 skip_len = 0;
546 }
547 }
548
549 while (sg && len) {
550 small = min(sg_dma_len(sg) - skip_len, len);
551 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
552 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
553 walk->pair->addr[walk->pair_idx] =
554 cpu_to_be64(sg_dma_address(sg) + skip_len);
555 walk->pair_idx = !walk->pair_idx;
556 walk->nents++;
557 if (!walk->pair_idx)
558 walk->pair++;
559 len -= sgmin;
560 skip_len += sgmin;
561 walk->last_sg = sg;
562 walk->last_sg_len = skip_len;
563 if (sg_dma_len(sg) == skip_len) {
564 sg = sg_next(sg);
565 skip_len = 0;
566 }
567 }
568}
569
570static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
571{
572 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
573 struct chcr_alg_template *chcr_crypto_alg =
574 container_of(alg, struct chcr_alg_template, alg.skcipher);
575
576 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
577}
578
579static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
580{
581 struct adapter *adap = netdev2adap(dev);
582 struct sge_uld_txq_info *txq_info =
583 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
584 struct sge_uld_txq *txq;
585 int ret = 0;
586
587 local_bh_disable();
588 txq = &txq_info->uldtxq[idx];
589 spin_lock(&txq->sendq.lock);
590 if (txq->full)
591 ret = -1;
592 spin_unlock(&txq->sendq.lock);
593 local_bh_enable();
594 return ret;
595}
596
597static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
598 struct _key_ctx *key_ctx)
599{
600 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
601 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
602 } else {
603 memcpy(key_ctx->key,
604 ablkctx->key + (ablkctx->enckey_len >> 1),
605 ablkctx->enckey_len >> 1);
606 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
607 ablkctx->rrkey, ablkctx->enckey_len >> 1);
608 }
609 return 0;
610}
611
612static int chcr_hash_ent_in_wr(struct scatterlist *src,
613 unsigned int minsg,
614 unsigned int space,
615 unsigned int srcskip)
616{
617 int srclen = 0;
618 int srcsg = minsg;
619 int soffset = 0, sless;
620
621 if (sg_dma_len(src) == srcskip) {
622 src = sg_next(src);
623 srcskip = 0;
624 }
625 while (src && space > (sgl_ent_len[srcsg + 1])) {
626 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
627 CHCR_SRC_SG_SIZE);
628 srclen += sless;
629 soffset += sless;
630 srcsg++;
631 if (sg_dma_len(src) == (soffset + srcskip)) {
632 src = sg_next(src);
633 soffset = 0;
634 srcskip = 0;
635 }
636 }
637 return srclen;
638}
639
640static int chcr_sg_ent_in_wr(struct scatterlist *src,
641 struct scatterlist *dst,
642 unsigned int minsg,
643 unsigned int space,
644 unsigned int srcskip,
645 unsigned int dstskip)
646{
647 int srclen = 0, dstlen = 0;
648 int srcsg = minsg, dstsg = minsg;
649 int offset = 0, soffset = 0, less, sless = 0;
650
651 if (sg_dma_len(src) == srcskip) {
652 src = sg_next(src);
653 srcskip = 0;
654 }
655 if (sg_dma_len(dst) == dstskip) {
656 dst = sg_next(dst);
657 dstskip = 0;
658 }
659
660 while (src && dst &&
661 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
662 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
663 CHCR_SRC_SG_SIZE);
664 srclen += sless;
665 srcsg++;
666 offset = 0;
667 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
668 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
669 if (srclen <= dstlen)
670 break;
671 less = min_t(unsigned int, sg_dma_len(dst) - offset -
672 dstskip, CHCR_DST_SG_SIZE);
673 dstlen += less;
674 offset += less;
675 if ((offset + dstskip) == sg_dma_len(dst)) {
676 dst = sg_next(dst);
677 offset = 0;
678 }
679 dstsg++;
680 dstskip = 0;
681 }
682 soffset += sless;
683 if ((soffset + srcskip) == sg_dma_len(src)) {
684 src = sg_next(src);
685 srcskip = 0;
686 soffset = 0;
687 }
688
689 }
690 return min(srclen, dstlen);
691}
692
693static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
694 struct skcipher_request *req,
695 u8 *iv,
696 unsigned short op_type)
697{
698 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
699 int err;
700
701 skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
702 skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
703 req->base.complete, req->base.data);
704 skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
705 req->cryptlen, iv);
706
707 err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
708 crypto_skcipher_encrypt(&reqctx->fallback_req);
709
710 return err;
711
712}
713
714static inline int get_qidxs(struct crypto_async_request *req,
715 unsigned int *txqidx, unsigned int *rxqidx)
716{
717 struct crypto_tfm *tfm = req->tfm;
718 int ret = 0;
719
720 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
721 case CRYPTO_ALG_TYPE_AEAD:
722 {
723 struct aead_request *aead_req =
724 container_of(req, struct aead_request, base);
725 struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
726 *txqidx = reqctx->txqidx;
727 *rxqidx = reqctx->rxqidx;
728 break;
729 }
730 case CRYPTO_ALG_TYPE_SKCIPHER:
731 {
732 struct skcipher_request *sk_req =
733 container_of(req, struct skcipher_request, base);
734 struct chcr_skcipher_req_ctx *reqctx =
735 skcipher_request_ctx(sk_req);
736 *txqidx = reqctx->txqidx;
737 *rxqidx = reqctx->rxqidx;
738 break;
739 }
740 case CRYPTO_ALG_TYPE_AHASH:
741 {
742 struct ahash_request *ahash_req =
743 container_of(req, struct ahash_request, base);
744 struct chcr_ahash_req_ctx *reqctx =
745 ahash_request_ctx(ahash_req);
746 *txqidx = reqctx->txqidx;
747 *rxqidx = reqctx->rxqidx;
748 break;
749 }
750 default:
751 ret = -EINVAL;
752 /* should never get here */
753 BUG();
754 break;
755 }
756 return ret;
757}
758
759static inline void create_wreq(struct chcr_context *ctx,
760 struct chcr_wr *chcr_req,
761 struct crypto_async_request *req,
762 unsigned int imm,
763 int hash_sz,
764 unsigned int len16,
765 unsigned int sc_len,
766 unsigned int lcb)
767{
768 struct uld_ctx *u_ctx = ULD_CTX(ctx);
769 unsigned int tx_channel_id, rx_channel_id;
770 unsigned int txqidx = 0, rxqidx = 0;
771 unsigned int qid, fid;
772
773 get_qidxs(req, &txqidx, &rxqidx);
774 qid = u_ctx->lldi.rxq_ids[rxqidx];
775 fid = u_ctx->lldi.rxq_ids[0];
776 tx_channel_id = txqidx / ctx->txq_perchan;
777 rx_channel_id = rxqidx / ctx->rxq_perchan;
778
779
780 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
781 chcr_req->wreq.pld_size_hash_size =
782 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
783 chcr_req->wreq.len16_pkd =
784 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
785 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
786 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
787 !!lcb, txqidx);
788
789 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
790 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
791 ((sizeof(chcr_req->wreq)) >> 4)));
792 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
793 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
794 sizeof(chcr_req->key_ctx) + sc_len);
795}
796
797/**
798 * create_cipher_wr - form the WR for cipher operations
799 * @req: cipher req.
800 * @ctx: crypto driver context of the request.
801 * @qid: ingress qid where response of this WR should be received.
802 * @op_type: encryption or decryption
803 */
804static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
805{
806 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
807 struct chcr_context *ctx = c_ctx(tfm);
808 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
809 struct sk_buff *skb = NULL;
810 struct chcr_wr *chcr_req;
811 struct cpl_rx_phys_dsgl *phys_cpl;
812 struct ulptx_sgl *ulptx;
813 struct chcr_skcipher_req_ctx *reqctx =
814 skcipher_request_ctx(wrparam->req);
815 unsigned int temp = 0, transhdr_len, dst_size;
816 int error;
817 int nents;
818 unsigned int kctx_len;
819 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
820 GFP_KERNEL : GFP_ATOMIC;
821 struct adapter *adap = padap(ctx->dev);
822 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
823
824 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
825 reqctx->dst_ofst);
826 dst_size = get_space_for_phys_dsgl(nents);
827 kctx_len = roundup(ablkctx->enckey_len, 16);
828 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
829 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
830 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
831 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
832 (sgl_len(nents) * 8);
833 transhdr_len += temp;
834 transhdr_len = roundup(transhdr_len, 16);
835 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
836 if (!skb) {
837 error = -ENOMEM;
838 goto err;
839 }
840 chcr_req = __skb_put_zero(skb, transhdr_len);
841 chcr_req->sec_cpl.op_ivinsrtofst =
842 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
843
844 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
845 chcr_req->sec_cpl.aadstart_cipherstop_hi =
846 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
847
848 chcr_req->sec_cpl.cipherstop_lo_authinsert =
849 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
850 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
851 ablkctx->ciph_mode,
852 0, 0, IV >> 1);
853 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
854 0, 1, dst_size);
855
856 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
857 if ((reqctx->op == CHCR_DECRYPT_OP) &&
858 (!(get_cryptoalg_subtype(tfm) ==
859 CRYPTO_ALG_SUB_TYPE_CTR)) &&
860 (!(get_cryptoalg_subtype(tfm) ==
861 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
862 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
863 } else {
864 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
865 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
866 memcpy(chcr_req->key_ctx.key, ablkctx->key,
867 ablkctx->enckey_len);
868 } else {
869 memcpy(chcr_req->key_ctx.key, ablkctx->key +
870 (ablkctx->enckey_len >> 1),
871 ablkctx->enckey_len >> 1);
872 memcpy(chcr_req->key_ctx.key +
873 (ablkctx->enckey_len >> 1),
874 ablkctx->key,
875 ablkctx->enckey_len >> 1);
876 }
877 }
878 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
879 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
880 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
881 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
882
883 atomic_inc(&adap->chcr_stats.cipher_rqst);
884 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
885 + (reqctx->imm ? (wrparam->bytes) : 0);
886 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
887 transhdr_len, temp,
888 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
889 reqctx->skb = skb;
890
891 if (reqctx->op && (ablkctx->ciph_mode ==
892 CHCR_SCMD_CIPHER_MODE_AES_CBC))
893 sg_pcopy_to_buffer(wrparam->req->src,
894 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
895 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
896
897 return skb;
898err:
899 return ERR_PTR(error);
900}
901
902static inline int chcr_keyctx_ck_size(unsigned int keylen)
903{
904 int ck_size = 0;
905
906 if (keylen == AES_KEYSIZE_128)
907 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
908 else if (keylen == AES_KEYSIZE_192)
909 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
910 else if (keylen == AES_KEYSIZE_256)
911 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
912 else
913 ck_size = 0;
914
915 return ck_size;
916}
917static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
918 const u8 *key,
919 unsigned int keylen)
920{
921 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
922
923 crypto_skcipher_clear_flags(ablkctx->sw_cipher,
924 CRYPTO_TFM_REQ_MASK);
925 crypto_skcipher_set_flags(ablkctx->sw_cipher,
926 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
927 return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
928}
929
930static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
931 const u8 *key,
932 unsigned int keylen)
933{
934 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
935 unsigned int ck_size, context_size;
936 u16 alignment = 0;
937 int err;
938
939 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
940 if (err)
941 goto badkey_err;
942
943 ck_size = chcr_keyctx_ck_size(keylen);
944 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
945 memcpy(ablkctx->key, key, keylen);
946 ablkctx->enckey_len = keylen;
947 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
948 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
949 keylen + alignment) >> 4;
950
951 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
952 0, 0, context_size);
953 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
954 return 0;
955badkey_err:
956 ablkctx->enckey_len = 0;
957
958 return err;
959}
960
961static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
962 const u8 *key,
963 unsigned int keylen)
964{
965 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
966 unsigned int ck_size, context_size;
967 u16 alignment = 0;
968 int err;
969
970 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
971 if (err)
972 goto badkey_err;
973 ck_size = chcr_keyctx_ck_size(keylen);
974 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
975 memcpy(ablkctx->key, key, keylen);
976 ablkctx->enckey_len = keylen;
977 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
978 keylen + alignment) >> 4;
979
980 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
981 0, 0, context_size);
982 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
983
984 return 0;
985badkey_err:
986 ablkctx->enckey_len = 0;
987
988 return err;
989}
990
991static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
992 const u8 *key,
993 unsigned int keylen)
994{
995 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
996 unsigned int ck_size, context_size;
997 u16 alignment = 0;
998 int err;
999
1000 if (keylen < CTR_RFC3686_NONCE_SIZE)
1001 return -EINVAL;
1002 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1003 CTR_RFC3686_NONCE_SIZE);
1004
1005 keylen -= CTR_RFC3686_NONCE_SIZE;
1006 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1007 if (err)
1008 goto badkey_err;
1009
1010 ck_size = chcr_keyctx_ck_size(keylen);
1011 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1012 memcpy(ablkctx->key, key, keylen);
1013 ablkctx->enckey_len = keylen;
1014 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1015 keylen + alignment) >> 4;
1016
1017 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1018 0, 0, context_size);
1019 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1020
1021 return 0;
1022badkey_err:
1023 ablkctx->enckey_len = 0;
1024
1025 return err;
1026}
1027static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1028{
1029 unsigned int size = AES_BLOCK_SIZE;
1030 __be32 *b = (__be32 *)(dstiv + size);
1031 u32 c, prev;
1032
1033 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1034 for (; size >= 4; size -= 4) {
1035 prev = be32_to_cpu(*--b);
1036 c = prev + add;
1037 *b = cpu_to_be32(c);
1038 if (prev < c)
1039 break;
1040 add = 1;
1041 }
1042
1043}
1044
1045static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1046{
1047 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1048 u64 c;
1049 u32 temp = be32_to_cpu(*--b);
1050
1051 temp = ~temp;
1052 c = (u64)temp + 1; // No of block can processed without overflow
1053 if ((bytes / AES_BLOCK_SIZE) >= c)
1054 bytes = c * AES_BLOCK_SIZE;
1055 return bytes;
1056}
1057
1058static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1059 u32 isfinal)
1060{
1061 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1062 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1063 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1064 struct crypto_aes_ctx aes;
1065 int ret, i;
1066 u8 *key;
1067 unsigned int keylen;
1068 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1069 int round8 = round / 8;
1070
1071 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1072
1073 keylen = ablkctx->enckey_len / 2;
1074 key = ablkctx->key + keylen;
1075 /* For a 192 bit key remove the padded zeroes which was
1076 * added in chcr_xts_setkey
1077 */
1078 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1079 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1080 ret = aes_expandkey(&aes, key, keylen - 8);
1081 else
1082 ret = aes_expandkey(&aes, key, keylen);
1083 if (ret)
1084 return ret;
1085 aes_encrypt(&aes, iv, iv);
1086 for (i = 0; i < round8; i++)
1087 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1088
1089 for (i = 0; i < (round % 8); i++)
1090 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1091
1092 if (!isfinal)
1093 aes_decrypt(&aes, iv, iv);
1094
1095 memzero_explicit(&aes, sizeof(aes));
1096 return 0;
1097}
1098
1099static int chcr_update_cipher_iv(struct skcipher_request *req,
1100 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1101{
1102 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1103 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1104 int subtype = get_cryptoalg_subtype(tfm);
1105 int ret = 0;
1106
1107 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1108 ctr_add_iv(iv, req->iv, (reqctx->processed /
1109 AES_BLOCK_SIZE));
1110 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1111 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1112 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1113 AES_BLOCK_SIZE) + 1);
1114 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1115 ret = chcr_update_tweak(req, iv, 0);
1116 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1117 if (reqctx->op)
1118 /*Updated before sending last WR*/
1119 memcpy(iv, req->iv, AES_BLOCK_SIZE);
1120 else
1121 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1122 }
1123
1124 return ret;
1125
1126}
1127
1128/* We need separate function for final iv because in rfc3686 Initial counter
1129 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1130 * for subsequent update requests
1131 */
1132
1133static int chcr_final_cipher_iv(struct skcipher_request *req,
1134 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1135{
1136 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1137 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1138 int subtype = get_cryptoalg_subtype(tfm);
1139 int ret = 0;
1140
1141 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1142 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1143 AES_BLOCK_SIZE));
1144 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1145 if (!reqctx->partial_req)
1146 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1147 else
1148 ret = chcr_update_tweak(req, iv, 1);
1149 }
1150 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1151 /*Already updated for Decrypt*/
1152 if (!reqctx->op)
1153 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1154
1155 }
1156 return ret;
1157
1158}
1159
1160static int chcr_handle_cipher_resp(struct skcipher_request *req,
1161 unsigned char *input, int err)
1162{
1163 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1164 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1165 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1166 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1167 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1168 struct chcr_dev *dev = c_ctx(tfm)->dev;
1169 struct chcr_context *ctx = c_ctx(tfm);
1170 struct adapter *adap = padap(ctx->dev);
1171 struct cipher_wr_param wrparam;
1172 struct sk_buff *skb;
1173 int bytes;
1174
1175 if (err)
1176 goto unmap;
1177 if (req->cryptlen == reqctx->processed) {
1178 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1179 req);
1180 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1181 goto complete;
1182 }
1183
1184 if (!reqctx->imm) {
1185 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1186 CIP_SPACE_LEFT(ablkctx->enckey_len),
1187 reqctx->src_ofst, reqctx->dst_ofst);
1188 if ((bytes + reqctx->processed) >= req->cryptlen)
1189 bytes = req->cryptlen - reqctx->processed;
1190 else
1191 bytes = rounddown(bytes, 16);
1192 } else {
1193 /*CTR mode counter overfloa*/
1194 bytes = req->cryptlen - reqctx->processed;
1195 }
1196 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1197 if (err)
1198 goto unmap;
1199
1200 if (unlikely(bytes == 0)) {
1201 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1202 req);
1203 memcpy(req->iv, reqctx->init_iv, IV);
1204 atomic_inc(&adap->chcr_stats.fallback);
1205 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1206 reqctx->op);
1207 goto complete;
1208 }
1209
1210 if (get_cryptoalg_subtype(tfm) ==
1211 CRYPTO_ALG_SUB_TYPE_CTR)
1212 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1213 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1214 wrparam.req = req;
1215 wrparam.bytes = bytes;
1216 skb = create_cipher_wr(&wrparam);
1217 if (IS_ERR(skb)) {
1218 pr_err("%s : Failed to form WR. No memory\n", __func__);
1219 err = PTR_ERR(skb);
1220 goto unmap;
1221 }
1222 skb->dev = u_ctx->lldi.ports[0];
1223 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1224 chcr_send_wr(skb);
1225 reqctx->last_req_len = bytes;
1226 reqctx->processed += bytes;
1227 if (get_cryptoalg_subtype(tfm) ==
1228 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1229 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1230 complete(&ctx->cbc_aes_aio_done);
1231 }
1232 return 0;
1233unmap:
1234 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1235complete:
1236 if (get_cryptoalg_subtype(tfm) ==
1237 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1238 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1239 complete(&ctx->cbc_aes_aio_done);
1240 }
1241 chcr_dec_wrcount(dev);
1242 req->base.complete(&req->base, err);
1243 return err;
1244}
1245
1246static int process_cipher(struct skcipher_request *req,
1247 unsigned short qid,
1248 struct sk_buff **skb,
1249 unsigned short op_type)
1250{
1251 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1252 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1253 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1254 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1255 struct adapter *adap = padap(c_ctx(tfm)->dev);
1256 struct cipher_wr_param wrparam;
1257 int bytes, err = -EINVAL;
1258 int subtype;
1259
1260 reqctx->processed = 0;
1261 reqctx->partial_req = 0;
1262 if (!req->iv)
1263 goto error;
1264 subtype = get_cryptoalg_subtype(tfm);
1265 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1266 (req->cryptlen == 0) ||
1267 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1268 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1269 goto fallback;
1270 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1271 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1272 goto fallback;
1273 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1274 ablkctx->enckey_len, req->cryptlen, ivsize);
1275 goto error;
1276 }
1277
1278 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1279 if (err)
1280 goto error;
1281 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1282 AES_MIN_KEY_SIZE +
1283 sizeof(struct cpl_rx_phys_dsgl) +
1284 /*Min dsgl size*/
1285 32))) {
1286 /* Can be sent as Imm*/
1287 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1288
1289 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1290 CHCR_DST_SG_SIZE, 0);
1291 phys_dsgl = get_space_for_phys_dsgl(dnents);
1292 kctx_len = roundup(ablkctx->enckey_len, 16);
1293 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1294 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1295 SGE_MAX_WR_LEN;
1296 bytes = IV + req->cryptlen;
1297
1298 } else {
1299 reqctx->imm = 0;
1300 }
1301
1302 if (!reqctx->imm) {
1303 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1304 CIP_SPACE_LEFT(ablkctx->enckey_len),
1305 0, 0);
1306 if ((bytes + reqctx->processed) >= req->cryptlen)
1307 bytes = req->cryptlen - reqctx->processed;
1308 else
1309 bytes = rounddown(bytes, 16);
1310 } else {
1311 bytes = req->cryptlen;
1312 }
1313 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1314 bytes = adjust_ctr_overflow(req->iv, bytes);
1315 }
1316 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1317 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1318 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1319 CTR_RFC3686_IV_SIZE);
1320
1321 /* initialize counter portion of counter block */
1322 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1323 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1324 memcpy(reqctx->init_iv, reqctx->iv, IV);
1325
1326 } else {
1327
1328 memcpy(reqctx->iv, req->iv, IV);
1329 memcpy(reqctx->init_iv, req->iv, IV);
1330 }
1331 if (unlikely(bytes == 0)) {
1332 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1333 req);
1334fallback: atomic_inc(&adap->chcr_stats.fallback);
1335 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1336 subtype ==
1337 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1338 reqctx->iv : req->iv,
1339 op_type);
1340 goto error;
1341 }
1342 reqctx->op = op_type;
1343 reqctx->srcsg = req->src;
1344 reqctx->dstsg = req->dst;
1345 reqctx->src_ofst = 0;
1346 reqctx->dst_ofst = 0;
1347 wrparam.qid = qid;
1348 wrparam.req = req;
1349 wrparam.bytes = bytes;
1350 *skb = create_cipher_wr(&wrparam);
1351 if (IS_ERR(*skb)) {
1352 err = PTR_ERR(*skb);
1353 goto unmap;
1354 }
1355 reqctx->processed = bytes;
1356 reqctx->last_req_len = bytes;
1357 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1358
1359 return 0;
1360unmap:
1361 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1362error:
1363 return err;
1364}
1365
1366static int chcr_aes_encrypt(struct skcipher_request *req)
1367{
1368 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1369 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1370 struct chcr_dev *dev = c_ctx(tfm)->dev;
1371 struct sk_buff *skb = NULL;
1372 int err;
1373 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1374 struct chcr_context *ctx = c_ctx(tfm);
1375 unsigned int cpu;
1376
1377 cpu = get_cpu();
1378 reqctx->txqidx = cpu % ctx->ntxq;
1379 reqctx->rxqidx = cpu % ctx->nrxq;
1380 put_cpu();
1381
1382 err = chcr_inc_wrcount(dev);
1383 if (err)
1384 return -ENXIO;
1385 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1386 reqctx->txqidx) &&
1387 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1388 err = -ENOSPC;
1389 goto error;
1390 }
1391
1392 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1393 &skb, CHCR_ENCRYPT_OP);
1394 if (err || !skb)
1395 return err;
1396 skb->dev = u_ctx->lldi.ports[0];
1397 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1398 chcr_send_wr(skb);
1399 if (get_cryptoalg_subtype(tfm) ==
1400 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1401 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1402 reqctx->partial_req = 1;
1403 wait_for_completion(&ctx->cbc_aes_aio_done);
1404 }
1405 return -EINPROGRESS;
1406error:
1407 chcr_dec_wrcount(dev);
1408 return err;
1409}
1410
1411static int chcr_aes_decrypt(struct skcipher_request *req)
1412{
1413 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1414 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1415 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1416 struct chcr_dev *dev = c_ctx(tfm)->dev;
1417 struct sk_buff *skb = NULL;
1418 int err;
1419 struct chcr_context *ctx = c_ctx(tfm);
1420 unsigned int cpu;
1421
1422 cpu = get_cpu();
1423 reqctx->txqidx = cpu % ctx->ntxq;
1424 reqctx->rxqidx = cpu % ctx->nrxq;
1425 put_cpu();
1426
1427 err = chcr_inc_wrcount(dev);
1428 if (err)
1429 return -ENXIO;
1430
1431 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1432 reqctx->txqidx) &&
1433 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1434 return -ENOSPC;
1435 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1436 &skb, CHCR_DECRYPT_OP);
1437 if (err || !skb)
1438 return err;
1439 skb->dev = u_ctx->lldi.ports[0];
1440 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1441 chcr_send_wr(skb);
1442 return -EINPROGRESS;
1443}
1444static int chcr_device_init(struct chcr_context *ctx)
1445{
1446 struct uld_ctx *u_ctx = NULL;
1447 int txq_perchan, ntxq;
1448 int err = 0, rxq_perchan;
1449
1450 if (!ctx->dev) {
1451 u_ctx = assign_chcr_device();
1452 if (!u_ctx) {
1453 err = -ENXIO;
1454 pr_err("chcr device assignment fails\n");
1455 goto out;
1456 }
1457 ctx->dev = &u_ctx->dev;
1458 ntxq = u_ctx->lldi.ntxq;
1459 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1460 txq_perchan = ntxq / u_ctx->lldi.nchan;
1461 ctx->ntxq = ntxq;
1462 ctx->nrxq = u_ctx->lldi.nrxq;
1463 ctx->rxq_perchan = rxq_perchan;
1464 ctx->txq_perchan = txq_perchan;
1465 }
1466out:
1467 return err;
1468}
1469
1470static int chcr_init_tfm(struct crypto_skcipher *tfm)
1471{
1472 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1473 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1474 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1475
1476 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1477 CRYPTO_ALG_NEED_FALLBACK);
1478 if (IS_ERR(ablkctx->sw_cipher)) {
1479 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1480 return PTR_ERR(ablkctx->sw_cipher);
1481 }
1482 init_completion(&ctx->cbc_aes_aio_done);
1483 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1484 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1485
1486 return chcr_device_init(ctx);
1487}
1488
1489static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1490{
1491 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1492 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1493 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1494
1495 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1496 * cannot be used as fallback in chcr_handle_cipher_response
1497 */
1498 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1499 CRYPTO_ALG_NEED_FALLBACK);
1500 if (IS_ERR(ablkctx->sw_cipher)) {
1501 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1502 return PTR_ERR(ablkctx->sw_cipher);
1503 }
1504 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1505 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1506 return chcr_device_init(ctx);
1507}
1508
1509
1510static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1511{
1512 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1513 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1514
1515 crypto_free_skcipher(ablkctx->sw_cipher);
1516}
1517
1518static int get_alg_config(struct algo_param *params,
1519 unsigned int auth_size)
1520{
1521 switch (auth_size) {
1522 case SHA1_DIGEST_SIZE:
1523 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1524 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1525 params->result_size = SHA1_DIGEST_SIZE;
1526 break;
1527 case SHA224_DIGEST_SIZE:
1528 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1529 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1530 params->result_size = SHA256_DIGEST_SIZE;
1531 break;
1532 case SHA256_DIGEST_SIZE:
1533 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1534 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1535 params->result_size = SHA256_DIGEST_SIZE;
1536 break;
1537 case SHA384_DIGEST_SIZE:
1538 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1539 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1540 params->result_size = SHA512_DIGEST_SIZE;
1541 break;
1542 case SHA512_DIGEST_SIZE:
1543 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1544 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1545 params->result_size = SHA512_DIGEST_SIZE;
1546 break;
1547 default:
1548 pr_err("ERROR, unsupported digest size\n");
1549 return -EINVAL;
1550 }
1551 return 0;
1552}
1553
1554static inline void chcr_free_shash(struct crypto_shash *base_hash)
1555{
1556 crypto_free_shash(base_hash);
1557}
1558
1559/**
1560 * create_hash_wr - Create hash work request
1561 * @req - Cipher req base
1562 */
1563static struct sk_buff *create_hash_wr(struct ahash_request *req,
1564 struct hash_wr_param *param)
1565{
1566 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1567 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1568 struct chcr_context *ctx = h_ctx(tfm);
1569 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1570 struct sk_buff *skb = NULL;
1571 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1572 struct chcr_wr *chcr_req;
1573 struct ulptx_sgl *ulptx;
1574 unsigned int nents = 0, transhdr_len;
1575 unsigned int temp = 0;
1576 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1577 GFP_ATOMIC;
1578 struct adapter *adap = padap(h_ctx(tfm)->dev);
1579 int error = 0;
1580 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1581
1582 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1583 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1584 param->sg_len) <= SGE_MAX_WR_LEN;
1585 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1586 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1587 nents += param->bfr_len ? 1 : 0;
1588 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1589 param->sg_len, 16) : (sgl_len(nents) * 8);
1590 transhdr_len = roundup(transhdr_len, 16);
1591
1592 skb = alloc_skb(transhdr_len, flags);
1593 if (!skb)
1594 return ERR_PTR(-ENOMEM);
1595 chcr_req = __skb_put_zero(skb, transhdr_len);
1596
1597 chcr_req->sec_cpl.op_ivinsrtofst =
1598 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1599
1600 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1601
1602 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1603 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1604 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1605 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1606 chcr_req->sec_cpl.seqno_numivs =
1607 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1608 param->opad_needed, 0);
1609
1610 chcr_req->sec_cpl.ivgen_hdrlen =
1611 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1612
1613 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1614 param->alg_prm.result_size);
1615
1616 if (param->opad_needed)
1617 memcpy(chcr_req->key_ctx.key +
1618 ((param->alg_prm.result_size <= 32) ? 32 :
1619 CHCR_HASH_MAX_DIGEST_SIZE),
1620 hmacctx->opad, param->alg_prm.result_size);
1621
1622 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1623 param->alg_prm.mk_size, 0,
1624 param->opad_needed,
1625 ((param->kctx_len +
1626 sizeof(chcr_req->key_ctx)) >> 4));
1627 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1628 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1629 DUMMY_BYTES);
1630 if (param->bfr_len != 0) {
1631 req_ctx->hctx_wr.dma_addr =
1632 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1633 param->bfr_len, DMA_TO_DEVICE);
1634 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1635 req_ctx->hctx_wr. dma_addr)) {
1636 error = -ENOMEM;
1637 goto err;
1638 }
1639 req_ctx->hctx_wr.dma_len = param->bfr_len;
1640 } else {
1641 req_ctx->hctx_wr.dma_addr = 0;
1642 }
1643 chcr_add_hash_src_ent(req, ulptx, param);
1644 /* Request upto max wr size */
1645 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1646 (param->sg_len + param->bfr_len) : 0);
1647 atomic_inc(&adap->chcr_stats.digest_rqst);
1648 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1649 param->hash_size, transhdr_len,
1650 temp, 0);
1651 req_ctx->hctx_wr.skb = skb;
1652 return skb;
1653err:
1654 kfree_skb(skb);
1655 return ERR_PTR(error);
1656}
1657
1658static int chcr_ahash_update(struct ahash_request *req)
1659{
1660 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1661 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1662 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1663 struct chcr_context *ctx = h_ctx(rtfm);
1664 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1665 struct sk_buff *skb;
1666 u8 remainder = 0, bs;
1667 unsigned int nbytes = req->nbytes;
1668 struct hash_wr_param params;
1669 int error;
1670 unsigned int cpu;
1671
1672 cpu = get_cpu();
1673 req_ctx->txqidx = cpu % ctx->ntxq;
1674 req_ctx->rxqidx = cpu % ctx->nrxq;
1675 put_cpu();
1676
1677 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1678
1679 if (nbytes + req_ctx->reqlen >= bs) {
1680 remainder = (nbytes + req_ctx->reqlen) % bs;
1681 nbytes = nbytes + req_ctx->reqlen - remainder;
1682 } else {
1683 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1684 + req_ctx->reqlen, nbytes, 0);
1685 req_ctx->reqlen += nbytes;
1686 return 0;
1687 }
1688 error = chcr_inc_wrcount(dev);
1689 if (error)
1690 return -ENXIO;
1691 /* Detach state for CHCR means lldi or padap is freed. Increasing
1692 * inflight count for dev guarantees that lldi and padap is valid
1693 */
1694 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1695 req_ctx->txqidx) &&
1696 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1697 error = -ENOSPC;
1698 goto err;
1699 }
1700
1701 chcr_init_hctx_per_wr(req_ctx);
1702 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1703 if (error) {
1704 error = -ENOMEM;
1705 goto err;
1706 }
1707 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1708 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1709 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1710 HASH_SPACE_LEFT(params.kctx_len), 0);
1711 if (params.sg_len > req->nbytes)
1712 params.sg_len = req->nbytes;
1713 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1714 req_ctx->reqlen;
1715 params.opad_needed = 0;
1716 params.more = 1;
1717 params.last = 0;
1718 params.bfr_len = req_ctx->reqlen;
1719 params.scmd1 = 0;
1720 req_ctx->hctx_wr.srcsg = req->src;
1721
1722 params.hash_size = params.alg_prm.result_size;
1723 req_ctx->data_len += params.sg_len + params.bfr_len;
1724 skb = create_hash_wr(req, ¶ms);
1725 if (IS_ERR(skb)) {
1726 error = PTR_ERR(skb);
1727 goto unmap;
1728 }
1729
1730 req_ctx->hctx_wr.processed += params.sg_len;
1731 if (remainder) {
1732 /* Swap buffers */
1733 swap(req_ctx->reqbfr, req_ctx->skbfr);
1734 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1735 req_ctx->reqbfr, remainder, req->nbytes -
1736 remainder);
1737 }
1738 req_ctx->reqlen = remainder;
1739 skb->dev = u_ctx->lldi.ports[0];
1740 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1741 chcr_send_wr(skb);
1742 return -EINPROGRESS;
1743unmap:
1744 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1745err:
1746 chcr_dec_wrcount(dev);
1747 return error;
1748}
1749
1750static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1751{
1752 memset(bfr_ptr, 0, bs);
1753 *bfr_ptr = 0x80;
1754 if (bs == 64)
1755 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1756 else
1757 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1758}
1759
1760static int chcr_ahash_final(struct ahash_request *req)
1761{
1762 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1763 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1764 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1765 struct hash_wr_param params;
1766 struct sk_buff *skb;
1767 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1768 struct chcr_context *ctx = h_ctx(rtfm);
1769 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1770 int error;
1771 unsigned int cpu;
1772
1773 cpu = get_cpu();
1774 req_ctx->txqidx = cpu % ctx->ntxq;
1775 req_ctx->rxqidx = cpu % ctx->nrxq;
1776 put_cpu();
1777
1778 error = chcr_inc_wrcount(dev);
1779 if (error)
1780 return -ENXIO;
1781
1782 chcr_init_hctx_per_wr(req_ctx);
1783 if (is_hmac(crypto_ahash_tfm(rtfm)))
1784 params.opad_needed = 1;
1785 else
1786 params.opad_needed = 0;
1787 params.sg_len = 0;
1788 req_ctx->hctx_wr.isfinal = 1;
1789 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1790 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1791 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1792 params.opad_needed = 1;
1793 params.kctx_len *= 2;
1794 } else {
1795 params.opad_needed = 0;
1796 }
1797
1798 req_ctx->hctx_wr.result = 1;
1799 params.bfr_len = req_ctx->reqlen;
1800 req_ctx->data_len += params.bfr_len + params.sg_len;
1801 req_ctx->hctx_wr.srcsg = req->src;
1802 if (req_ctx->reqlen == 0) {
1803 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1804 params.last = 0;
1805 params.more = 1;
1806 params.scmd1 = 0;
1807 params.bfr_len = bs;
1808
1809 } else {
1810 params.scmd1 = req_ctx->data_len;
1811 params.last = 1;
1812 params.more = 0;
1813 }
1814 params.hash_size = crypto_ahash_digestsize(rtfm);
1815 skb = create_hash_wr(req, ¶ms);
1816 if (IS_ERR(skb)) {
1817 error = PTR_ERR(skb);
1818 goto err;
1819 }
1820 req_ctx->reqlen = 0;
1821 skb->dev = u_ctx->lldi.ports[0];
1822 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1823 chcr_send_wr(skb);
1824 return -EINPROGRESS;
1825err:
1826 chcr_dec_wrcount(dev);
1827 return error;
1828}
1829
1830static int chcr_ahash_finup(struct ahash_request *req)
1831{
1832 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1833 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1834 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1835 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1836 struct chcr_context *ctx = h_ctx(rtfm);
1837 struct sk_buff *skb;
1838 struct hash_wr_param params;
1839 u8 bs;
1840 int error;
1841 unsigned int cpu;
1842
1843 cpu = get_cpu();
1844 req_ctx->txqidx = cpu % ctx->ntxq;
1845 req_ctx->rxqidx = cpu % ctx->nrxq;
1846 put_cpu();
1847
1848 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1849 error = chcr_inc_wrcount(dev);
1850 if (error)
1851 return -ENXIO;
1852
1853 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1854 req_ctx->txqidx) &&
1855 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1856 error = -ENOSPC;
1857 goto err;
1858 }
1859 chcr_init_hctx_per_wr(req_ctx);
1860 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1861 if (error) {
1862 error = -ENOMEM;
1863 goto err;
1864 }
1865
1866 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1867 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1868 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1869 params.kctx_len *= 2;
1870 params.opad_needed = 1;
1871 } else {
1872 params.opad_needed = 0;
1873 }
1874
1875 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1876 HASH_SPACE_LEFT(params.kctx_len), 0);
1877 if (params.sg_len < req->nbytes) {
1878 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1879 params.kctx_len /= 2;
1880 params.opad_needed = 0;
1881 }
1882 params.last = 0;
1883 params.more = 1;
1884 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1885 - req_ctx->reqlen;
1886 params.hash_size = params.alg_prm.result_size;
1887 params.scmd1 = 0;
1888 } else {
1889 params.last = 1;
1890 params.more = 0;
1891 params.sg_len = req->nbytes;
1892 params.hash_size = crypto_ahash_digestsize(rtfm);
1893 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1894 params.sg_len;
1895 }
1896 params.bfr_len = req_ctx->reqlen;
1897 req_ctx->data_len += params.bfr_len + params.sg_len;
1898 req_ctx->hctx_wr.result = 1;
1899 req_ctx->hctx_wr.srcsg = req->src;
1900 if ((req_ctx->reqlen + req->nbytes) == 0) {
1901 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1902 params.last = 0;
1903 params.more = 1;
1904 params.scmd1 = 0;
1905 params.bfr_len = bs;
1906 }
1907 skb = create_hash_wr(req, ¶ms);
1908 if (IS_ERR(skb)) {
1909 error = PTR_ERR(skb);
1910 goto unmap;
1911 }
1912 req_ctx->reqlen = 0;
1913 req_ctx->hctx_wr.processed += params.sg_len;
1914 skb->dev = u_ctx->lldi.ports[0];
1915 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1916 chcr_send_wr(skb);
1917 return -EINPROGRESS;
1918unmap:
1919 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1920err:
1921 chcr_dec_wrcount(dev);
1922 return error;
1923}
1924
1925static int chcr_ahash_digest(struct ahash_request *req)
1926{
1927 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1928 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1929 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1930 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1931 struct chcr_context *ctx = h_ctx(rtfm);
1932 struct sk_buff *skb;
1933 struct hash_wr_param params;
1934 u8 bs;
1935 int error;
1936 unsigned int cpu;
1937
1938 cpu = get_cpu();
1939 req_ctx->txqidx = cpu % ctx->ntxq;
1940 req_ctx->rxqidx = cpu % ctx->nrxq;
1941 put_cpu();
1942
1943 rtfm->init(req);
1944 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1945 error = chcr_inc_wrcount(dev);
1946 if (error)
1947 return -ENXIO;
1948
1949 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1950 req_ctx->txqidx) &&
1951 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1952 error = -ENOSPC;
1953 goto err;
1954 }
1955
1956 chcr_init_hctx_per_wr(req_ctx);
1957 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1958 if (error) {
1959 error = -ENOMEM;
1960 goto err;
1961 }
1962
1963 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1964 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1965 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1966 params.kctx_len *= 2;
1967 params.opad_needed = 1;
1968 } else {
1969 params.opad_needed = 0;
1970 }
1971 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1972 HASH_SPACE_LEFT(params.kctx_len), 0);
1973 if (params.sg_len < req->nbytes) {
1974 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1975 params.kctx_len /= 2;
1976 params.opad_needed = 0;
1977 }
1978 params.last = 0;
1979 params.more = 1;
1980 params.scmd1 = 0;
1981 params.sg_len = rounddown(params.sg_len, bs);
1982 params.hash_size = params.alg_prm.result_size;
1983 } else {
1984 params.sg_len = req->nbytes;
1985 params.hash_size = crypto_ahash_digestsize(rtfm);
1986 params.last = 1;
1987 params.more = 0;
1988 params.scmd1 = req->nbytes + req_ctx->data_len;
1989
1990 }
1991 params.bfr_len = 0;
1992 req_ctx->hctx_wr.result = 1;
1993 req_ctx->hctx_wr.srcsg = req->src;
1994 req_ctx->data_len += params.bfr_len + params.sg_len;
1995
1996 if (req->nbytes == 0) {
1997 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1998 params.more = 1;
1999 params.bfr_len = bs;
2000 }
2001
2002 skb = create_hash_wr(req, ¶ms);
2003 if (IS_ERR(skb)) {
2004 error = PTR_ERR(skb);
2005 goto unmap;
2006 }
2007 req_ctx->hctx_wr.processed += params.sg_len;
2008 skb->dev = u_ctx->lldi.ports[0];
2009 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2010 chcr_send_wr(skb);
2011 return -EINPROGRESS;
2012unmap:
2013 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2014err:
2015 chcr_dec_wrcount(dev);
2016 return error;
2017}
2018
2019static int chcr_ahash_continue(struct ahash_request *req)
2020{
2021 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2022 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2023 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2024 struct chcr_context *ctx = h_ctx(rtfm);
2025 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2026 struct sk_buff *skb;
2027 struct hash_wr_param params;
2028 u8 bs;
2029 int error;
2030 unsigned int cpu;
2031
2032 cpu = get_cpu();
2033 reqctx->txqidx = cpu % ctx->ntxq;
2034 reqctx->rxqidx = cpu % ctx->nrxq;
2035 put_cpu();
2036
2037 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2038 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
2039 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2040 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2041 params.kctx_len *= 2;
2042 params.opad_needed = 1;
2043 } else {
2044 params.opad_needed = 0;
2045 }
2046 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2047 HASH_SPACE_LEFT(params.kctx_len),
2048 hctx_wr->src_ofst);
2049 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2050 params.sg_len = req->nbytes - hctx_wr->processed;
2051 if (!hctx_wr->result ||
2052 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2053 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2054 params.kctx_len /= 2;
2055 params.opad_needed = 0;
2056 }
2057 params.last = 0;
2058 params.more = 1;
2059 params.sg_len = rounddown(params.sg_len, bs);
2060 params.hash_size = params.alg_prm.result_size;
2061 params.scmd1 = 0;
2062 } else {
2063 params.last = 1;
2064 params.more = 0;
2065 params.hash_size = crypto_ahash_digestsize(rtfm);
2066 params.scmd1 = reqctx->data_len + params.sg_len;
2067 }
2068 params.bfr_len = 0;
2069 reqctx->data_len += params.sg_len;
2070 skb = create_hash_wr(req, ¶ms);
2071 if (IS_ERR(skb)) {
2072 error = PTR_ERR(skb);
2073 goto err;
2074 }
2075 hctx_wr->processed += params.sg_len;
2076 skb->dev = u_ctx->lldi.ports[0];
2077 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2078 chcr_send_wr(skb);
2079 return 0;
2080err:
2081 return error;
2082}
2083
2084static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2085 unsigned char *input,
2086 int err)
2087{
2088 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2089 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2090 int digestsize, updated_digestsize;
2091 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2092 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2093 struct chcr_dev *dev = h_ctx(tfm)->dev;
2094
2095 if (input == NULL)
2096 goto out;
2097 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2098 updated_digestsize = digestsize;
2099 if (digestsize == SHA224_DIGEST_SIZE)
2100 updated_digestsize = SHA256_DIGEST_SIZE;
2101 else if (digestsize == SHA384_DIGEST_SIZE)
2102 updated_digestsize = SHA512_DIGEST_SIZE;
2103
2104 if (hctx_wr->dma_addr) {
2105 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2106 hctx_wr->dma_len, DMA_TO_DEVICE);
2107 hctx_wr->dma_addr = 0;
2108 }
2109 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2110 req->nbytes)) {
2111 if (hctx_wr->result == 1) {
2112 hctx_wr->result = 0;
2113 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2114 digestsize);
2115 } else {
2116 memcpy(reqctx->partial_hash,
2117 input + sizeof(struct cpl_fw6_pld),
2118 updated_digestsize);
2119
2120 }
2121 goto unmap;
2122 }
2123 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2124 updated_digestsize);
2125
2126 err = chcr_ahash_continue(req);
2127 if (err)
2128 goto unmap;
2129 return;
2130unmap:
2131 if (hctx_wr->is_sg_map)
2132 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2133
2134
2135out:
2136 chcr_dec_wrcount(dev);
2137 req->base.complete(&req->base, err);
2138}
2139
2140/*
2141 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2142 * @req: crypto request
2143 */
2144int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2145 int err)
2146{
2147 struct crypto_tfm *tfm = req->tfm;
2148 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2149 struct adapter *adap = padap(ctx->dev);
2150
2151 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2152 case CRYPTO_ALG_TYPE_AEAD:
2153 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2154 break;
2155
2156 case CRYPTO_ALG_TYPE_SKCIPHER:
2157 chcr_handle_cipher_resp(skcipher_request_cast(req),
2158 input, err);
2159 break;
2160 case CRYPTO_ALG_TYPE_AHASH:
2161 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2162 }
2163 atomic_inc(&adap->chcr_stats.complete);
2164 return err;
2165}
2166static int chcr_ahash_export(struct ahash_request *areq, void *out)
2167{
2168 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2169 struct chcr_ahash_req_ctx *state = out;
2170
2171 state->reqlen = req_ctx->reqlen;
2172 state->data_len = req_ctx->data_len;
2173 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2174 memcpy(state->partial_hash, req_ctx->partial_hash,
2175 CHCR_HASH_MAX_DIGEST_SIZE);
2176 chcr_init_hctx_per_wr(state);
2177 return 0;
2178}
2179
2180static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2181{
2182 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2184
2185 req_ctx->reqlen = state->reqlen;
2186 req_ctx->data_len = state->data_len;
2187 req_ctx->reqbfr = req_ctx->bfr1;
2188 req_ctx->skbfr = req_ctx->bfr2;
2189 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2190 memcpy(req_ctx->partial_hash, state->partial_hash,
2191 CHCR_HASH_MAX_DIGEST_SIZE);
2192 chcr_init_hctx_per_wr(req_ctx);
2193 return 0;
2194}
2195
2196static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2197 unsigned int keylen)
2198{
2199 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2200 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2201 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2202 unsigned int i, err = 0, updated_digestsize;
2203
2204 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2205
2206 /* use the key to calculate the ipad and opad. ipad will sent with the
2207 * first request's data. opad will be sent with the final hash result
2208 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2209 */
2210 shash->tfm = hmacctx->base_hash;
2211 if (keylen > bs) {
2212 err = crypto_shash_digest(shash, key, keylen,
2213 hmacctx->ipad);
2214 if (err)
2215 goto out;
2216 keylen = digestsize;
2217 } else {
2218 memcpy(hmacctx->ipad, key, keylen);
2219 }
2220 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2221 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2222
2223 for (i = 0; i < bs / sizeof(int); i++) {
2224 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2225 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2226 }
2227
2228 updated_digestsize = digestsize;
2229 if (digestsize == SHA224_DIGEST_SIZE)
2230 updated_digestsize = SHA256_DIGEST_SIZE;
2231 else if (digestsize == SHA384_DIGEST_SIZE)
2232 updated_digestsize = SHA512_DIGEST_SIZE;
2233 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2234 hmacctx->ipad, digestsize);
2235 if (err)
2236 goto out;
2237 chcr_change_order(hmacctx->ipad, updated_digestsize);
2238
2239 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2240 hmacctx->opad, digestsize);
2241 if (err)
2242 goto out;
2243 chcr_change_order(hmacctx->opad, updated_digestsize);
2244out:
2245 return err;
2246}
2247
2248static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2249 unsigned int key_len)
2250{
2251 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2252 unsigned short context_size = 0;
2253 int err;
2254
2255 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2256 if (err)
2257 goto badkey_err;
2258
2259 memcpy(ablkctx->key, key, key_len);
2260 ablkctx->enckey_len = key_len;
2261 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2262 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2263 /* Both keys for xts must be aligned to 16 byte boundary
2264 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2265 */
2266 if (key_len == 48) {
2267 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2268 + 16) >> 4;
2269 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2270 memset(ablkctx->key + 24, 0, 8);
2271 memset(ablkctx->key + 56, 0, 8);
2272 ablkctx->enckey_len = 64;
2273 ablkctx->key_ctx_hdr =
2274 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2275 CHCR_KEYCTX_NO_KEY, 1,
2276 0, context_size);
2277 } else {
2278 ablkctx->key_ctx_hdr =
2279 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2280 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2281 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2282 CHCR_KEYCTX_NO_KEY, 1,
2283 0, context_size);
2284 }
2285 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2286 return 0;
2287badkey_err:
2288 ablkctx->enckey_len = 0;
2289
2290 return err;
2291}
2292
2293static int chcr_sha_init(struct ahash_request *areq)
2294{
2295 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2296 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2297 int digestsize = crypto_ahash_digestsize(tfm);
2298
2299 req_ctx->data_len = 0;
2300 req_ctx->reqlen = 0;
2301 req_ctx->reqbfr = req_ctx->bfr1;
2302 req_ctx->skbfr = req_ctx->bfr2;
2303 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2304
2305 return 0;
2306}
2307
2308static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2309{
2310 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2311 sizeof(struct chcr_ahash_req_ctx));
2312 return chcr_device_init(crypto_tfm_ctx(tfm));
2313}
2314
2315static int chcr_hmac_init(struct ahash_request *areq)
2316{
2317 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2318 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2319 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2320 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2321 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2322
2323 chcr_sha_init(areq);
2324 req_ctx->data_len = bs;
2325 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2326 if (digestsize == SHA224_DIGEST_SIZE)
2327 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2328 SHA256_DIGEST_SIZE);
2329 else if (digestsize == SHA384_DIGEST_SIZE)
2330 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2331 SHA512_DIGEST_SIZE);
2332 else
2333 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334 digestsize);
2335 }
2336 return 0;
2337}
2338
2339static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2340{
2341 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2342 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2343 unsigned int digestsize =
2344 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2345
2346 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2347 sizeof(struct chcr_ahash_req_ctx));
2348 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2349 if (IS_ERR(hmacctx->base_hash))
2350 return PTR_ERR(hmacctx->base_hash);
2351 return chcr_device_init(crypto_tfm_ctx(tfm));
2352}
2353
2354static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2355{
2356 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2357 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2358
2359 if (hmacctx->base_hash) {
2360 chcr_free_shash(hmacctx->base_hash);
2361 hmacctx->base_hash = NULL;
2362 }
2363}
2364
2365inline void chcr_aead_common_exit(struct aead_request *req)
2366{
2367 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2368 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2369 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2370
2371 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2372}
2373
2374static int chcr_aead_common_init(struct aead_request *req)
2375{
2376 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2377 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2378 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2379 unsigned int authsize = crypto_aead_authsize(tfm);
2380 int error = -EINVAL;
2381
2382 /* validate key size */
2383 if (aeadctx->enckey_len == 0)
2384 goto err;
2385 if (reqctx->op && req->cryptlen < authsize)
2386 goto err;
2387 if (reqctx->b0_len)
2388 reqctx->scratch_pad = reqctx->iv + IV;
2389 else
2390 reqctx->scratch_pad = NULL;
2391
2392 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2393 reqctx->op);
2394 if (error) {
2395 error = -ENOMEM;
2396 goto err;
2397 }
2398
2399 return 0;
2400err:
2401 return error;
2402}
2403
2404static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2405 int aadmax, int wrlen,
2406 unsigned short op_type)
2407{
2408 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2409
2410 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2411 dst_nents > MAX_DSGL_ENT ||
2412 (req->assoclen > aadmax) ||
2413 (wrlen > SGE_MAX_WR_LEN))
2414 return 1;
2415 return 0;
2416}
2417
2418static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2419{
2420 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2421 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2422 struct aead_request *subreq = aead_request_ctx(req);
2423
2424 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2425 aead_request_set_callback(subreq, req->base.flags,
2426 req->base.complete, req->base.data);
2427 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2428 req->iv);
2429 aead_request_set_ad(subreq, req->assoclen);
2430 return op_type ? crypto_aead_decrypt(subreq) :
2431 crypto_aead_encrypt(subreq);
2432}
2433
2434static struct sk_buff *create_authenc_wr(struct aead_request *req,
2435 unsigned short qid,
2436 int size)
2437{
2438 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2439 struct chcr_context *ctx = a_ctx(tfm);
2440 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2441 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2442 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2443 struct sk_buff *skb = NULL;
2444 struct chcr_wr *chcr_req;
2445 struct cpl_rx_phys_dsgl *phys_cpl;
2446 struct ulptx_sgl *ulptx;
2447 unsigned int transhdr_len;
2448 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2449 unsigned int kctx_len = 0, dnents, snents;
2450 unsigned int authsize = crypto_aead_authsize(tfm);
2451 int error = -EINVAL;
2452 u8 *ivptr;
2453 int null = 0;
2454 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2455 GFP_ATOMIC;
2456 struct adapter *adap = padap(ctx->dev);
2457 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2458
2459 if (req->cryptlen == 0)
2460 return NULL;
2461
2462 reqctx->b0_len = 0;
2463 error = chcr_aead_common_init(req);
2464 if (error)
2465 return ERR_PTR(error);
2466
2467 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2468 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2469 null = 1;
2470 }
2471 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2472 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2473 dnents += MIN_AUTH_SG; // For IV
2474 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2475 CHCR_SRC_SG_SIZE, 0);
2476 dst_size = get_space_for_phys_dsgl(dnents);
2477 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2478 - sizeof(chcr_req->key_ctx);
2479 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2480 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2481 SGE_MAX_WR_LEN;
2482 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2483 : (sgl_len(snents) * 8);
2484 transhdr_len += temp;
2485 transhdr_len = roundup(transhdr_len, 16);
2486
2487 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2488 transhdr_len, reqctx->op)) {
2489 atomic_inc(&adap->chcr_stats.fallback);
2490 chcr_aead_common_exit(req);
2491 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2492 }
2493 skb = alloc_skb(transhdr_len, flags);
2494 if (!skb) {
2495 error = -ENOMEM;
2496 goto err;
2497 }
2498
2499 chcr_req = __skb_put_zero(skb, transhdr_len);
2500
2501 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2502
2503 /*
2504 * Input order is AAD,IV and Payload. where IV should be included as
2505 * the part of authdata. All other fields should be filled according
2506 * to the hardware spec
2507 */
2508 chcr_req->sec_cpl.op_ivinsrtofst =
2509 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2510 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2511 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2512 null ? 0 : 1 + IV,
2513 null ? 0 : IV + req->assoclen,
2514 req->assoclen + IV + 1,
2515 (temp & 0x1F0) >> 4);
2516 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2517 temp & 0xF,
2518 null ? 0 : req->assoclen + IV + 1,
2519 temp, temp);
2520 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2521 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2522 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2523 else
2524 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2525 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2526 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2527 temp,
2528 actx->auth_mode, aeadctx->hmac_ctrl,
2529 IV >> 1);
2530 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2531 0, 0, dst_size);
2532
2533 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2534 if (reqctx->op == CHCR_ENCRYPT_OP ||
2535 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2536 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2537 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2538 aeadctx->enckey_len);
2539 else
2540 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2541 aeadctx->enckey_len);
2542
2543 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2544 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2545 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2546 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2547 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2548 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2549 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2550 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2551 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2552 CTR_RFC3686_IV_SIZE);
2553 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2554 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2555 } else {
2556 memcpy(ivptr, req->iv, IV);
2557 }
2558 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2559 chcr_add_aead_src_ent(req, ulptx);
2560 atomic_inc(&adap->chcr_stats.cipher_rqst);
2561 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2562 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2563 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2564 transhdr_len, temp, 0);
2565 reqctx->skb = skb;
2566
2567 return skb;
2568err:
2569 chcr_aead_common_exit(req);
2570
2571 return ERR_PTR(error);
2572}
2573
2574int chcr_aead_dma_map(struct device *dev,
2575 struct aead_request *req,
2576 unsigned short op_type)
2577{
2578 int error;
2579 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2580 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2581 unsigned int authsize = crypto_aead_authsize(tfm);
2582 int src_len, dst_len;
2583
2584 /* calculate and handle src and dst sg length separately
2585 * for inplace and out-of place operations
2586 */
2587 if (req->src == req->dst) {
2588 src_len = req->assoclen + req->cryptlen + (op_type ?
2589 0 : authsize);
2590 dst_len = src_len;
2591 } else {
2592 src_len = req->assoclen + req->cryptlen;
2593 dst_len = req->assoclen + req->cryptlen + (op_type ?
2594 -authsize : authsize);
2595 }
2596
2597 if (!req->cryptlen || !src_len || !dst_len)
2598 return 0;
2599 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2600 DMA_BIDIRECTIONAL);
2601 if (dma_mapping_error(dev, reqctx->iv_dma))
2602 return -ENOMEM;
2603 if (reqctx->b0_len)
2604 reqctx->b0_dma = reqctx->iv_dma + IV;
2605 else
2606 reqctx->b0_dma = 0;
2607 if (req->src == req->dst) {
2608 error = dma_map_sg(dev, req->src,
2609 sg_nents_for_len(req->src, src_len),
2610 DMA_BIDIRECTIONAL);
2611 if (!error)
2612 goto err;
2613 } else {
2614 error = dma_map_sg(dev, req->src,
2615 sg_nents_for_len(req->src, src_len),
2616 DMA_TO_DEVICE);
2617 if (!error)
2618 goto err;
2619 error = dma_map_sg(dev, req->dst,
2620 sg_nents_for_len(req->dst, dst_len),
2621 DMA_FROM_DEVICE);
2622 if (!error) {
2623 dma_unmap_sg(dev, req->src,
2624 sg_nents_for_len(req->src, src_len),
2625 DMA_TO_DEVICE);
2626 goto err;
2627 }
2628 }
2629
2630 return 0;
2631err:
2632 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2633 return -ENOMEM;
2634}
2635
2636void chcr_aead_dma_unmap(struct device *dev,
2637 struct aead_request *req,
2638 unsigned short op_type)
2639{
2640 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2641 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2642 unsigned int authsize = crypto_aead_authsize(tfm);
2643 int src_len, dst_len;
2644
2645 /* calculate and handle src and dst sg length separately
2646 * for inplace and out-of place operations
2647 */
2648 if (req->src == req->dst) {
2649 src_len = req->assoclen + req->cryptlen + (op_type ?
2650 0 : authsize);
2651 dst_len = src_len;
2652 } else {
2653 src_len = req->assoclen + req->cryptlen;
2654 dst_len = req->assoclen + req->cryptlen + (op_type ?
2655 -authsize : authsize);
2656 }
2657
2658 if (!req->cryptlen || !src_len || !dst_len)
2659 return;
2660
2661 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2662 DMA_BIDIRECTIONAL);
2663 if (req->src == req->dst) {
2664 dma_unmap_sg(dev, req->src,
2665 sg_nents_for_len(req->src, src_len),
2666 DMA_BIDIRECTIONAL);
2667 } else {
2668 dma_unmap_sg(dev, req->src,
2669 sg_nents_for_len(req->src, src_len),
2670 DMA_TO_DEVICE);
2671 dma_unmap_sg(dev, req->dst,
2672 sg_nents_for_len(req->dst, dst_len),
2673 DMA_FROM_DEVICE);
2674 }
2675}
2676
2677void chcr_add_aead_src_ent(struct aead_request *req,
2678 struct ulptx_sgl *ulptx)
2679{
2680 struct ulptx_walk ulp_walk;
2681 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2682
2683 if (reqctx->imm) {
2684 u8 *buf = (u8 *)ulptx;
2685
2686 if (reqctx->b0_len) {
2687 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2688 buf += reqctx->b0_len;
2689 }
2690 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2691 buf, req->cryptlen + req->assoclen, 0);
2692 } else {
2693 ulptx_walk_init(&ulp_walk, ulptx);
2694 if (reqctx->b0_len)
2695 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2696 reqctx->b0_dma);
2697 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2698 req->assoclen, 0);
2699 ulptx_walk_end(&ulp_walk);
2700 }
2701}
2702
2703void chcr_add_aead_dst_ent(struct aead_request *req,
2704 struct cpl_rx_phys_dsgl *phys_cpl,
2705 unsigned short qid)
2706{
2707 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2708 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2709 struct dsgl_walk dsgl_walk;
2710 unsigned int authsize = crypto_aead_authsize(tfm);
2711 struct chcr_context *ctx = a_ctx(tfm);
2712 u32 temp;
2713 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2714
2715 dsgl_walk_init(&dsgl_walk, phys_cpl);
2716 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2717 temp = req->assoclen + req->cryptlen +
2718 (reqctx->op ? -authsize : authsize);
2719 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2720 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2721}
2722
2723void chcr_add_cipher_src_ent(struct skcipher_request *req,
2724 void *ulptx,
2725 struct cipher_wr_param *wrparam)
2726{
2727 struct ulptx_walk ulp_walk;
2728 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2729 u8 *buf = ulptx;
2730
2731 memcpy(buf, reqctx->iv, IV);
2732 buf += IV;
2733 if (reqctx->imm) {
2734 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2735 buf, wrparam->bytes, reqctx->processed);
2736 } else {
2737 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2738 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2739 reqctx->src_ofst);
2740 reqctx->srcsg = ulp_walk.last_sg;
2741 reqctx->src_ofst = ulp_walk.last_sg_len;
2742 ulptx_walk_end(&ulp_walk);
2743 }
2744}
2745
2746void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2747 struct cpl_rx_phys_dsgl *phys_cpl,
2748 struct cipher_wr_param *wrparam,
2749 unsigned short qid)
2750{
2751 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2752 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2753 struct chcr_context *ctx = c_ctx(tfm);
2754 struct dsgl_walk dsgl_walk;
2755 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2756
2757 dsgl_walk_init(&dsgl_walk, phys_cpl);
2758 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2759 reqctx->dst_ofst);
2760 reqctx->dstsg = dsgl_walk.last_sg;
2761 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2762 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2763}
2764
2765void chcr_add_hash_src_ent(struct ahash_request *req,
2766 struct ulptx_sgl *ulptx,
2767 struct hash_wr_param *param)
2768{
2769 struct ulptx_walk ulp_walk;
2770 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2771
2772 if (reqctx->hctx_wr.imm) {
2773 u8 *buf = (u8 *)ulptx;
2774
2775 if (param->bfr_len) {
2776 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2777 buf += param->bfr_len;
2778 }
2779
2780 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2781 sg_nents(reqctx->hctx_wr.srcsg), buf,
2782 param->sg_len, 0);
2783 } else {
2784 ulptx_walk_init(&ulp_walk, ulptx);
2785 if (param->bfr_len)
2786 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2787 reqctx->hctx_wr.dma_addr);
2788 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2789 param->sg_len, reqctx->hctx_wr.src_ofst);
2790 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2791 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2792 ulptx_walk_end(&ulp_walk);
2793 }
2794}
2795
2796int chcr_hash_dma_map(struct device *dev,
2797 struct ahash_request *req)
2798{
2799 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2800 int error = 0;
2801
2802 if (!req->nbytes)
2803 return 0;
2804 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2805 DMA_TO_DEVICE);
2806 if (!error)
2807 return -ENOMEM;
2808 req_ctx->hctx_wr.is_sg_map = 1;
2809 return 0;
2810}
2811
2812void chcr_hash_dma_unmap(struct device *dev,
2813 struct ahash_request *req)
2814{
2815 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2816
2817 if (!req->nbytes)
2818 return;
2819
2820 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2821 DMA_TO_DEVICE);
2822 req_ctx->hctx_wr.is_sg_map = 0;
2823
2824}
2825
2826int chcr_cipher_dma_map(struct device *dev,
2827 struct skcipher_request *req)
2828{
2829 int error;
2830
2831 if (req->src == req->dst) {
2832 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2833 DMA_BIDIRECTIONAL);
2834 if (!error)
2835 goto err;
2836 } else {
2837 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2838 DMA_TO_DEVICE);
2839 if (!error)
2840 goto err;
2841 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2842 DMA_FROM_DEVICE);
2843 if (!error) {
2844 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2845 DMA_TO_DEVICE);
2846 goto err;
2847 }
2848 }
2849
2850 return 0;
2851err:
2852 return -ENOMEM;
2853}
2854
2855void chcr_cipher_dma_unmap(struct device *dev,
2856 struct skcipher_request *req)
2857{
2858 if (req->src == req->dst) {
2859 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2860 DMA_BIDIRECTIONAL);
2861 } else {
2862 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2863 DMA_TO_DEVICE);
2864 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2865 DMA_FROM_DEVICE);
2866 }
2867}
2868
2869static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2870{
2871 __be32 data;
2872
2873 memset(block, 0, csize);
2874 block += csize;
2875
2876 if (csize >= 4)
2877 csize = 4;
2878 else if (msglen > (unsigned int)(1 << (8 * csize)))
2879 return -EOVERFLOW;
2880
2881 data = cpu_to_be32(msglen);
2882 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2883
2884 return 0;
2885}
2886
2887static int generate_b0(struct aead_request *req, u8 *ivptr,
2888 unsigned short op_type)
2889{
2890 unsigned int l, lp, m;
2891 int rc;
2892 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2893 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2894 u8 *b0 = reqctx->scratch_pad;
2895
2896 m = crypto_aead_authsize(aead);
2897
2898 memcpy(b0, ivptr, 16);
2899
2900 lp = b0[0];
2901 l = lp + 1;
2902
2903 /* set m, bits 3-5 */
2904 *b0 |= (8 * ((m - 2) / 2));
2905
2906 /* set adata, bit 6, if associated data is used */
2907 if (req->assoclen)
2908 *b0 |= 64;
2909 rc = set_msg_len(b0 + 16 - l,
2910 (op_type == CHCR_DECRYPT_OP) ?
2911 req->cryptlen - m : req->cryptlen, l);
2912
2913 return rc;
2914}
2915
2916static inline int crypto_ccm_check_iv(const u8 *iv)
2917{
2918 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2919 if (iv[0] < 1 || iv[0] > 7)
2920 return -EINVAL;
2921
2922 return 0;
2923}
2924
2925static int ccm_format_packet(struct aead_request *req,
2926 u8 *ivptr,
2927 unsigned int sub_type,
2928 unsigned short op_type,
2929 unsigned int assoclen)
2930{
2931 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2932 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2933 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2934 int rc = 0;
2935
2936 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2937 ivptr[0] = 3;
2938 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2939 memcpy(ivptr + 4, req->iv, 8);
2940 memset(ivptr + 12, 0, 4);
2941 } else {
2942 memcpy(ivptr, req->iv, 16);
2943 }
2944 if (assoclen)
2945 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2946
2947 rc = generate_b0(req, ivptr, op_type);
2948 /* zero the ctr value */
2949 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2950 return rc;
2951}
2952
2953static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2954 unsigned int dst_size,
2955 struct aead_request *req,
2956 unsigned short op_type)
2957{
2958 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2959 struct chcr_context *ctx = a_ctx(tfm);
2960 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2961 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2962 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2963 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2964 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2965 unsigned int ccm_xtra;
2966 unsigned int tag_offset = 0, auth_offset = 0;
2967 unsigned int assoclen;
2968
2969 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2970 assoclen = req->assoclen - 8;
2971 else
2972 assoclen = req->assoclen;
2973 ccm_xtra = CCM_B0_SIZE +
2974 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2975
2976 auth_offset = req->cryptlen ?
2977 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2978 if (op_type == CHCR_DECRYPT_OP) {
2979 if (crypto_aead_authsize(tfm) != req->cryptlen)
2980 tag_offset = crypto_aead_authsize(tfm);
2981 else
2982 auth_offset = 0;
2983 }
2984
2985 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2986 sec_cpl->pldlen =
2987 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2988 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2989 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2990 1 + IV, IV + assoclen + ccm_xtra,
2991 req->assoclen + IV + 1 + ccm_xtra, 0);
2992
2993 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2994 auth_offset, tag_offset,
2995 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2996 crypto_aead_authsize(tfm));
2997 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2998 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2999 cipher_mode, mac_mode,
3000 aeadctx->hmac_ctrl, IV >> 1);
3001
3002 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3003 0, dst_size);
3004}
3005
3006static int aead_ccm_validate_input(unsigned short op_type,
3007 struct aead_request *req,
3008 struct chcr_aead_ctx *aeadctx,
3009 unsigned int sub_type)
3010{
3011 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3012 if (crypto_ccm_check_iv(req->iv)) {
3013 pr_err("CCM: IV check fails\n");
3014 return -EINVAL;
3015 }
3016 } else {
3017 if (req->assoclen != 16 && req->assoclen != 20) {
3018 pr_err("RFC4309: Invalid AAD length %d\n",
3019 req->assoclen);
3020 return -EINVAL;
3021 }
3022 }
3023 return 0;
3024}
3025
3026static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3027 unsigned short qid,
3028 int size)
3029{
3030 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3031 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3032 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3033 struct sk_buff *skb = NULL;
3034 struct chcr_wr *chcr_req;
3035 struct cpl_rx_phys_dsgl *phys_cpl;
3036 struct ulptx_sgl *ulptx;
3037 unsigned int transhdr_len;
3038 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3039 unsigned int sub_type, assoclen = req->assoclen;
3040 unsigned int authsize = crypto_aead_authsize(tfm);
3041 int error = -EINVAL;
3042 u8 *ivptr;
3043 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3044 GFP_ATOMIC;
3045 struct adapter *adap = padap(a_ctx(tfm)->dev);
3046
3047 sub_type = get_aead_subtype(tfm);
3048 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3049 assoclen -= 8;
3050 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3051 error = chcr_aead_common_init(req);
3052 if (error)
3053 return ERR_PTR(error);
3054
3055 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3056 if (error)
3057 goto err;
3058 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3059 + (reqctx->op ? -authsize : authsize),
3060 CHCR_DST_SG_SIZE, 0);
3061 dnents += MIN_CCM_SG; // For IV and B0
3062 dst_size = get_space_for_phys_dsgl(dnents);
3063 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3064 CHCR_SRC_SG_SIZE, 0);
3065 snents += MIN_CCM_SG; //For B0
3066 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3067 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3068 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3069 reqctx->b0_len) <= SGE_MAX_WR_LEN;
3070 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3071 reqctx->b0_len, 16) :
3072 (sgl_len(snents) * 8);
3073 transhdr_len += temp;
3074 transhdr_len = roundup(transhdr_len, 16);
3075
3076 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3077 reqctx->b0_len, transhdr_len, reqctx->op)) {
3078 atomic_inc(&adap->chcr_stats.fallback);
3079 chcr_aead_common_exit(req);
3080 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3081 }
3082 skb = alloc_skb(transhdr_len, flags);
3083
3084 if (!skb) {
3085 error = -ENOMEM;
3086 goto err;
3087 }
3088
3089 chcr_req = __skb_put_zero(skb, transhdr_len);
3090
3091 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3092
3093 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3094 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3095 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3096 aeadctx->key, aeadctx->enckey_len);
3097
3098 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3099 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3100 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3101 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3102 if (error)
3103 goto dstmap_fail;
3104 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3105 chcr_add_aead_src_ent(req, ulptx);
3106
3107 atomic_inc(&adap->chcr_stats.aead_rqst);
3108 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3109 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3110 reqctx->b0_len) : 0);
3111 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3112 transhdr_len, temp, 0);
3113 reqctx->skb = skb;
3114
3115 return skb;
3116dstmap_fail:
3117 kfree_skb(skb);
3118err:
3119 chcr_aead_common_exit(req);
3120 return ERR_PTR(error);
3121}
3122
3123static struct sk_buff *create_gcm_wr(struct aead_request *req,
3124 unsigned short qid,
3125 int size)
3126{
3127 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3128 struct chcr_context *ctx = a_ctx(tfm);
3129 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3130 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3131 struct sk_buff *skb = NULL;
3132 struct chcr_wr *chcr_req;
3133 struct cpl_rx_phys_dsgl *phys_cpl;
3134 struct ulptx_sgl *ulptx;
3135 unsigned int transhdr_len, dnents = 0, snents;
3136 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3137 unsigned int authsize = crypto_aead_authsize(tfm);
3138 int error = -EINVAL;
3139 u8 *ivptr;
3140 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3141 GFP_ATOMIC;
3142 struct adapter *adap = padap(ctx->dev);
3143 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3144
3145 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3146 assoclen = req->assoclen - 8;
3147
3148 reqctx->b0_len = 0;
3149 error = chcr_aead_common_init(req);
3150 if (error)
3151 return ERR_PTR(error);
3152 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3153 (reqctx->op ? -authsize : authsize),
3154 CHCR_DST_SG_SIZE, 0);
3155 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3156 CHCR_SRC_SG_SIZE, 0);
3157 dnents += MIN_GCM_SG; // For IV
3158 dst_size = get_space_for_phys_dsgl(dnents);
3159 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3160 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3161 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3162 SGE_MAX_WR_LEN;
3163 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3164 (sgl_len(snents) * 8);
3165 transhdr_len += temp;
3166 transhdr_len = roundup(transhdr_len, 16);
3167 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3168 transhdr_len, reqctx->op)) {
3169
3170 atomic_inc(&adap->chcr_stats.fallback);
3171 chcr_aead_common_exit(req);
3172 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3173 }
3174 skb = alloc_skb(transhdr_len, flags);
3175 if (!skb) {
3176 error = -ENOMEM;
3177 goto err;
3178 }
3179
3180 chcr_req = __skb_put_zero(skb, transhdr_len);
3181
3182 //Offset of tag from end
3183 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3184 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3185 rx_channel_id, 2, 1);
3186 chcr_req->sec_cpl.pldlen =
3187 htonl(req->assoclen + IV + req->cryptlen);
3188 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3189 assoclen ? 1 + IV : 0,
3190 assoclen ? IV + assoclen : 0,
3191 req->assoclen + IV + 1, 0);
3192 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3193 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3194 temp, temp);
3195 chcr_req->sec_cpl.seqno_numivs =
3196 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3197 CHCR_ENCRYPT_OP) ? 1 : 0,
3198 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3199 CHCR_SCMD_AUTH_MODE_GHASH,
3200 aeadctx->hmac_ctrl, IV >> 1);
3201 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3202 0, 0, dst_size);
3203 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3204 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3205 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3206 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3207
3208 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3209 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3210 /* prepare a 16 byte iv */
3211 /* S A L T | IV | 0x00000001 */
3212 if (get_aead_subtype(tfm) ==
3213 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3214 memcpy(ivptr, aeadctx->salt, 4);
3215 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3216 } else {
3217 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3218 }
3219 put_unaligned_be32(0x01, &ivptr[12]);
3220 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3221
3222 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3223 chcr_add_aead_src_ent(req, ulptx);
3224 atomic_inc(&adap->chcr_stats.aead_rqst);
3225 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3226 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3227 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3228 transhdr_len, temp, reqctx->verify);
3229 reqctx->skb = skb;
3230 return skb;
3231
3232err:
3233 chcr_aead_common_exit(req);
3234 return ERR_PTR(error);
3235}
3236
3237
3238
3239static int chcr_aead_cra_init(struct crypto_aead *tfm)
3240{
3241 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3242 struct aead_alg *alg = crypto_aead_alg(tfm);
3243
3244 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3245 CRYPTO_ALG_NEED_FALLBACK |
3246 CRYPTO_ALG_ASYNC);
3247 if (IS_ERR(aeadctx->sw_cipher))
3248 return PTR_ERR(aeadctx->sw_cipher);
3249 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3250 sizeof(struct aead_request) +
3251 crypto_aead_reqsize(aeadctx->sw_cipher)));
3252 return chcr_device_init(a_ctx(tfm));
3253}
3254
3255static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3256{
3257 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3258
3259 crypto_free_aead(aeadctx->sw_cipher);
3260}
3261
3262static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3263 unsigned int authsize)
3264{
3265 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3266
3267 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3268 aeadctx->mayverify = VERIFY_HW;
3269 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3270}
3271static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3272 unsigned int authsize)
3273{
3274 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3275 u32 maxauth = crypto_aead_maxauthsize(tfm);
3276
3277 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3278 * true for sha1. authsize == 12 condition should be before
3279 * authsize == (maxauth >> 1)
3280 */
3281 if (authsize == ICV_4) {
3282 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3283 aeadctx->mayverify = VERIFY_HW;
3284 } else if (authsize == ICV_6) {
3285 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3286 aeadctx->mayverify = VERIFY_HW;
3287 } else if (authsize == ICV_10) {
3288 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3289 aeadctx->mayverify = VERIFY_HW;
3290 } else if (authsize == ICV_12) {
3291 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3292 aeadctx->mayverify = VERIFY_HW;
3293 } else if (authsize == ICV_14) {
3294 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3295 aeadctx->mayverify = VERIFY_HW;
3296 } else if (authsize == (maxauth >> 1)) {
3297 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3298 aeadctx->mayverify = VERIFY_HW;
3299 } else if (authsize == maxauth) {
3300 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3301 aeadctx->mayverify = VERIFY_HW;
3302 } else {
3303 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3304 aeadctx->mayverify = VERIFY_SW;
3305 }
3306 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3307}
3308
3309
3310static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3311{
3312 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3313
3314 switch (authsize) {
3315 case ICV_4:
3316 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3317 aeadctx->mayverify = VERIFY_HW;
3318 break;
3319 case ICV_8:
3320 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3321 aeadctx->mayverify = VERIFY_HW;
3322 break;
3323 case ICV_12:
3324 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3325 aeadctx->mayverify = VERIFY_HW;
3326 break;
3327 case ICV_14:
3328 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3329 aeadctx->mayverify = VERIFY_HW;
3330 break;
3331 case ICV_16:
3332 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3333 aeadctx->mayverify = VERIFY_HW;
3334 break;
3335 case ICV_13:
3336 case ICV_15:
3337 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3338 aeadctx->mayverify = VERIFY_SW;
3339 break;
3340 default:
3341 return -EINVAL;
3342 }
3343 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3344}
3345
3346static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3347 unsigned int authsize)
3348{
3349 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3350
3351 switch (authsize) {
3352 case ICV_8:
3353 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3354 aeadctx->mayverify = VERIFY_HW;
3355 break;
3356 case ICV_12:
3357 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3358 aeadctx->mayverify = VERIFY_HW;
3359 break;
3360 case ICV_16:
3361 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3362 aeadctx->mayverify = VERIFY_HW;
3363 break;
3364 default:
3365 return -EINVAL;
3366 }
3367 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3368}
3369
3370static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3371 unsigned int authsize)
3372{
3373 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3374
3375 switch (authsize) {
3376 case ICV_4:
3377 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3378 aeadctx->mayverify = VERIFY_HW;
3379 break;
3380 case ICV_6:
3381 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3382 aeadctx->mayverify = VERIFY_HW;
3383 break;
3384 case ICV_8:
3385 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3386 aeadctx->mayverify = VERIFY_HW;
3387 break;
3388 case ICV_10:
3389 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3390 aeadctx->mayverify = VERIFY_HW;
3391 break;
3392 case ICV_12:
3393 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3394 aeadctx->mayverify = VERIFY_HW;
3395 break;
3396 case ICV_14:
3397 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3398 aeadctx->mayverify = VERIFY_HW;
3399 break;
3400 case ICV_16:
3401 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3402 aeadctx->mayverify = VERIFY_HW;
3403 break;
3404 default:
3405 return -EINVAL;
3406 }
3407 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3408}
3409
3410static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3411 const u8 *key,
3412 unsigned int keylen)
3413{
3414 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3415 unsigned char ck_size, mk_size;
3416 int key_ctx_size = 0;
3417
3418 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3419 if (keylen == AES_KEYSIZE_128) {
3420 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3421 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3422 } else if (keylen == AES_KEYSIZE_192) {
3423 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3424 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3425 } else if (keylen == AES_KEYSIZE_256) {
3426 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3427 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3428 } else {
3429 aeadctx->enckey_len = 0;
3430 return -EINVAL;
3431 }
3432 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3433 key_ctx_size >> 4);
3434 memcpy(aeadctx->key, key, keylen);
3435 aeadctx->enckey_len = keylen;
3436
3437 return 0;
3438}
3439
3440static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3441 const u8 *key,
3442 unsigned int keylen)
3443{
3444 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3445 int error;
3446
3447 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3448 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3449 CRYPTO_TFM_REQ_MASK);
3450 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3451 if (error)
3452 return error;
3453 return chcr_ccm_common_setkey(aead, key, keylen);
3454}
3455
3456static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3457 unsigned int keylen)
3458{
3459 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3460 int error;
3461
3462 if (keylen < 3) {
3463 aeadctx->enckey_len = 0;
3464 return -EINVAL;
3465 }
3466 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3467 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3468 CRYPTO_TFM_REQ_MASK);
3469 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3470 if (error)
3471 return error;
3472 keylen -= 3;
3473 memcpy(aeadctx->salt, key + keylen, 3);
3474 return chcr_ccm_common_setkey(aead, key, keylen);
3475}
3476
3477static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3478 unsigned int keylen)
3479{
3480 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3481 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3482 unsigned int ck_size;
3483 int ret = 0, key_ctx_size = 0;
3484 struct crypto_aes_ctx aes;
3485
3486 aeadctx->enckey_len = 0;
3487 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3488 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3489 & CRYPTO_TFM_REQ_MASK);
3490 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3491 if (ret)
3492 goto out;
3493
3494 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3495 keylen > 3) {
3496 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3497 memcpy(aeadctx->salt, key + keylen, 4);
3498 }
3499 if (keylen == AES_KEYSIZE_128) {
3500 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3501 } else if (keylen == AES_KEYSIZE_192) {
3502 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3503 } else if (keylen == AES_KEYSIZE_256) {
3504 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3505 } else {
3506 pr_err("GCM: Invalid key length %d\n", keylen);
3507 ret = -EINVAL;
3508 goto out;
3509 }
3510
3511 memcpy(aeadctx->key, key, keylen);
3512 aeadctx->enckey_len = keylen;
3513 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3514 AEAD_H_SIZE;
3515 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3516 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3517 0, 0,
3518 key_ctx_size >> 4);
3519 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3520 * It will go in key context
3521 */
3522 ret = aes_expandkey(&aes, key, keylen);
3523 if (ret) {
3524 aeadctx->enckey_len = 0;
3525 goto out;
3526 }
3527 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3528 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3529 memzero_explicit(&aes, sizeof(aes));
3530
3531out:
3532 return ret;
3533}
3534
3535static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3536 unsigned int keylen)
3537{
3538 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3539 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3540 /* it contains auth and cipher key both*/
3541 struct crypto_authenc_keys keys;
3542 unsigned int bs, subtype;
3543 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3544 int err = 0, i, key_ctx_len = 0;
3545 unsigned char ck_size = 0;
3546 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3547 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3548 struct algo_param param;
3549 int align;
3550 u8 *o_ptr = NULL;
3551
3552 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3553 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3554 & CRYPTO_TFM_REQ_MASK);
3555 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3556 if (err)
3557 goto out;
3558
3559 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3560 goto out;
3561
3562 if (get_alg_config(¶m, max_authsize)) {
3563 pr_err("Unsupported digest size\n");
3564 goto out;
3565 }
3566 subtype = get_aead_subtype(authenc);
3567 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3568 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3569 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3570 goto out;
3571 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3572 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3573 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3574 }
3575 if (keys.enckeylen == AES_KEYSIZE_128) {
3576 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3577 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3578 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3579 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3580 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3581 } else {
3582 pr_err("Unsupported cipher key\n");
3583 goto out;
3584 }
3585
3586 /* Copy only encryption key. We use authkey to generate h(ipad) and
3587 * h(opad) so authkey is not needed again. authkeylen size have the
3588 * size of the hash digest size.
3589 */
3590 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3591 aeadctx->enckey_len = keys.enckeylen;
3592 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3593 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3594
3595 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3596 aeadctx->enckey_len << 3);
3597 }
3598 base_hash = chcr_alloc_shash(max_authsize);
3599 if (IS_ERR(base_hash)) {
3600 pr_err("Base driver cannot be loaded\n");
3601 goto out;
3602 }
3603 {
3604 SHASH_DESC_ON_STACK(shash, base_hash);
3605
3606 shash->tfm = base_hash;
3607 bs = crypto_shash_blocksize(base_hash);
3608 align = KEYCTX_ALIGN_PAD(max_authsize);
3609 o_ptr = actx->h_iopad + param.result_size + align;
3610
3611 if (keys.authkeylen > bs) {
3612 err = crypto_shash_digest(shash, keys.authkey,
3613 keys.authkeylen,
3614 o_ptr);
3615 if (err) {
3616 pr_err("Base driver cannot be loaded\n");
3617 goto out;
3618 }
3619 keys.authkeylen = max_authsize;
3620 } else
3621 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3622
3623 /* Compute the ipad-digest*/
3624 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3625 memcpy(pad, o_ptr, keys.authkeylen);
3626 for (i = 0; i < bs >> 2; i++)
3627 *((unsigned int *)pad + i) ^= IPAD_DATA;
3628
3629 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3630 max_authsize))
3631 goto out;
3632 /* Compute the opad-digest */
3633 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3634 memcpy(pad, o_ptr, keys.authkeylen);
3635 for (i = 0; i < bs >> 2; i++)
3636 *((unsigned int *)pad + i) ^= OPAD_DATA;
3637
3638 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3639 goto out;
3640
3641 /* convert the ipad and opad digest to network order */
3642 chcr_change_order(actx->h_iopad, param.result_size);
3643 chcr_change_order(o_ptr, param.result_size);
3644 key_ctx_len = sizeof(struct _key_ctx) +
3645 roundup(keys.enckeylen, 16) +
3646 (param.result_size + align) * 2;
3647 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3648 0, 1, key_ctx_len >> 4);
3649 actx->auth_mode = param.auth_mode;
3650 chcr_free_shash(base_hash);
3651
3652 memzero_explicit(&keys, sizeof(keys));
3653 return 0;
3654 }
3655out:
3656 aeadctx->enckey_len = 0;
3657 memzero_explicit(&keys, sizeof(keys));
3658 if (!IS_ERR(base_hash))
3659 chcr_free_shash(base_hash);
3660 return -EINVAL;
3661}
3662
3663static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3664 const u8 *key, unsigned int keylen)
3665{
3666 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3667 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3668 struct crypto_authenc_keys keys;
3669 int err;
3670 /* it contains auth and cipher key both*/
3671 unsigned int subtype;
3672 int key_ctx_len = 0;
3673 unsigned char ck_size = 0;
3674
3675 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3676 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3677 & CRYPTO_TFM_REQ_MASK);
3678 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3679 if (err)
3680 goto out;
3681
3682 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3683 goto out;
3684
3685 subtype = get_aead_subtype(authenc);
3686 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3687 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3688 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3689 goto out;
3690 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3691 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3692 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3693 }
3694 if (keys.enckeylen == AES_KEYSIZE_128) {
3695 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3696 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3697 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3698 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3699 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3700 } else {
3701 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3702 goto out;
3703 }
3704 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3705 aeadctx->enckey_len = keys.enckeylen;
3706 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3707 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3708 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3709 aeadctx->enckey_len << 3);
3710 }
3711 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3712
3713 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3714 0, key_ctx_len >> 4);
3715 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3716 memzero_explicit(&keys, sizeof(keys));
3717 return 0;
3718out:
3719 aeadctx->enckey_len = 0;
3720 memzero_explicit(&keys, sizeof(keys));
3721 return -EINVAL;
3722}
3723
3724static int chcr_aead_op(struct aead_request *req,
3725 int size,
3726 create_wr_t create_wr_fn)
3727{
3728 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3729 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3730 struct chcr_context *ctx = a_ctx(tfm);
3731 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3732 struct sk_buff *skb;
3733 struct chcr_dev *cdev;
3734
3735 cdev = a_ctx(tfm)->dev;
3736 if (!cdev) {
3737 pr_err("%s : No crypto device.\n", __func__);
3738 return -ENXIO;
3739 }
3740
3741 if (chcr_inc_wrcount(cdev)) {
3742 /* Detach state for CHCR means lldi or padap is freed.
3743 * We cannot increment fallback here.
3744 */
3745 return chcr_aead_fallback(req, reqctx->op);
3746 }
3747
3748 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3749 reqctx->txqidx) &&
3750 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3751 chcr_dec_wrcount(cdev);
3752 return -ENOSPC;
3753 }
3754
3755 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3756 crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3757 pr_err("RFC4106: Invalid value of assoclen %d\n",
3758 req->assoclen);
3759 return -EINVAL;
3760 }
3761
3762 /* Form a WR from req */
3763 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3764
3765 if (IS_ERR_OR_NULL(skb)) {
3766 chcr_dec_wrcount(cdev);
3767 return PTR_ERR_OR_ZERO(skb);
3768 }
3769
3770 skb->dev = u_ctx->lldi.ports[0];
3771 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3772 chcr_send_wr(skb);
3773 return -EINPROGRESS;
3774}
3775
3776static int chcr_aead_encrypt(struct aead_request *req)
3777{
3778 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3779 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3780 struct chcr_context *ctx = a_ctx(tfm);
3781 unsigned int cpu;
3782
3783 cpu = get_cpu();
3784 reqctx->txqidx = cpu % ctx->ntxq;
3785 reqctx->rxqidx = cpu % ctx->nrxq;
3786 put_cpu();
3787
3788 reqctx->verify = VERIFY_HW;
3789 reqctx->op = CHCR_ENCRYPT_OP;
3790
3791 switch (get_aead_subtype(tfm)) {
3792 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3793 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3794 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3795 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3796 return chcr_aead_op(req, 0, create_authenc_wr);
3797 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3798 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3799 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3800 default:
3801 return chcr_aead_op(req, 0, create_gcm_wr);
3802 }
3803}
3804
3805static int chcr_aead_decrypt(struct aead_request *req)
3806{
3807 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3808 struct chcr_context *ctx = a_ctx(tfm);
3809 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3810 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3811 int size;
3812 unsigned int cpu;
3813
3814 cpu = get_cpu();
3815 reqctx->txqidx = cpu % ctx->ntxq;
3816 reqctx->rxqidx = cpu % ctx->nrxq;
3817 put_cpu();
3818
3819 if (aeadctx->mayverify == VERIFY_SW) {
3820 size = crypto_aead_maxauthsize(tfm);
3821 reqctx->verify = VERIFY_SW;
3822 } else {
3823 size = 0;
3824 reqctx->verify = VERIFY_HW;
3825 }
3826 reqctx->op = CHCR_DECRYPT_OP;
3827 switch (get_aead_subtype(tfm)) {
3828 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3829 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3830 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3831 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3832 return chcr_aead_op(req, size, create_authenc_wr);
3833 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3834 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3835 return chcr_aead_op(req, size, create_aead_ccm_wr);
3836 default:
3837 return chcr_aead_op(req, size, create_gcm_wr);
3838 }
3839}
3840
3841static struct chcr_alg_template driver_algs[] = {
3842 /* AES-CBC */
3843 {
3844 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3845 .is_registered = 0,
3846 .alg.skcipher = {
3847 .base.cra_name = "cbc(aes)",
3848 .base.cra_driver_name = "cbc-aes-chcr",
3849 .base.cra_blocksize = AES_BLOCK_SIZE,
3850
3851 .init = chcr_init_tfm,
3852 .exit = chcr_exit_tfm,
3853 .min_keysize = AES_MIN_KEY_SIZE,
3854 .max_keysize = AES_MAX_KEY_SIZE,
3855 .ivsize = AES_BLOCK_SIZE,
3856 .setkey = chcr_aes_cbc_setkey,
3857 .encrypt = chcr_aes_encrypt,
3858 .decrypt = chcr_aes_decrypt,
3859 }
3860 },
3861 {
3862 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3863 .is_registered = 0,
3864 .alg.skcipher = {
3865 .base.cra_name = "xts(aes)",
3866 .base.cra_driver_name = "xts-aes-chcr",
3867 .base.cra_blocksize = AES_BLOCK_SIZE,
3868
3869 .init = chcr_init_tfm,
3870 .exit = chcr_exit_tfm,
3871 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3872 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3873 .ivsize = AES_BLOCK_SIZE,
3874 .setkey = chcr_aes_xts_setkey,
3875 .encrypt = chcr_aes_encrypt,
3876 .decrypt = chcr_aes_decrypt,
3877 }
3878 },
3879 {
3880 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3881 .is_registered = 0,
3882 .alg.skcipher = {
3883 .base.cra_name = "ctr(aes)",
3884 .base.cra_driver_name = "ctr-aes-chcr",
3885 .base.cra_blocksize = 1,
3886
3887 .init = chcr_init_tfm,
3888 .exit = chcr_exit_tfm,
3889 .min_keysize = AES_MIN_KEY_SIZE,
3890 .max_keysize = AES_MAX_KEY_SIZE,
3891 .ivsize = AES_BLOCK_SIZE,
3892 .setkey = chcr_aes_ctr_setkey,
3893 .encrypt = chcr_aes_encrypt,
3894 .decrypt = chcr_aes_decrypt,
3895 }
3896 },
3897 {
3898 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3899 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3900 .is_registered = 0,
3901 .alg.skcipher = {
3902 .base.cra_name = "rfc3686(ctr(aes))",
3903 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3904 .base.cra_blocksize = 1,
3905
3906 .init = chcr_rfc3686_init,
3907 .exit = chcr_exit_tfm,
3908 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3909 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3910 .ivsize = CTR_RFC3686_IV_SIZE,
3911 .setkey = chcr_aes_rfc3686_setkey,
3912 .encrypt = chcr_aes_encrypt,
3913 .decrypt = chcr_aes_decrypt,
3914 }
3915 },
3916 /* SHA */
3917 {
3918 .type = CRYPTO_ALG_TYPE_AHASH,
3919 .is_registered = 0,
3920 .alg.hash = {
3921 .halg.digestsize = SHA1_DIGEST_SIZE,
3922 .halg.base = {
3923 .cra_name = "sha1",
3924 .cra_driver_name = "sha1-chcr",
3925 .cra_blocksize = SHA1_BLOCK_SIZE,
3926 }
3927 }
3928 },
3929 {
3930 .type = CRYPTO_ALG_TYPE_AHASH,
3931 .is_registered = 0,
3932 .alg.hash = {
3933 .halg.digestsize = SHA256_DIGEST_SIZE,
3934 .halg.base = {
3935 .cra_name = "sha256",
3936 .cra_driver_name = "sha256-chcr",
3937 .cra_blocksize = SHA256_BLOCK_SIZE,
3938 }
3939 }
3940 },
3941 {
3942 .type = CRYPTO_ALG_TYPE_AHASH,
3943 .is_registered = 0,
3944 .alg.hash = {
3945 .halg.digestsize = SHA224_DIGEST_SIZE,
3946 .halg.base = {
3947 .cra_name = "sha224",
3948 .cra_driver_name = "sha224-chcr",
3949 .cra_blocksize = SHA224_BLOCK_SIZE,
3950 }
3951 }
3952 },
3953 {
3954 .type = CRYPTO_ALG_TYPE_AHASH,
3955 .is_registered = 0,
3956 .alg.hash = {
3957 .halg.digestsize = SHA384_DIGEST_SIZE,
3958 .halg.base = {
3959 .cra_name = "sha384",
3960 .cra_driver_name = "sha384-chcr",
3961 .cra_blocksize = SHA384_BLOCK_SIZE,
3962 }
3963 }
3964 },
3965 {
3966 .type = CRYPTO_ALG_TYPE_AHASH,
3967 .is_registered = 0,
3968 .alg.hash = {
3969 .halg.digestsize = SHA512_DIGEST_SIZE,
3970 .halg.base = {
3971 .cra_name = "sha512",
3972 .cra_driver_name = "sha512-chcr",
3973 .cra_blocksize = SHA512_BLOCK_SIZE,
3974 }
3975 }
3976 },
3977 /* HMAC */
3978 {
3979 .type = CRYPTO_ALG_TYPE_HMAC,
3980 .is_registered = 0,
3981 .alg.hash = {
3982 .halg.digestsize = SHA1_DIGEST_SIZE,
3983 .halg.base = {
3984 .cra_name = "hmac(sha1)",
3985 .cra_driver_name = "hmac-sha1-chcr",
3986 .cra_blocksize = SHA1_BLOCK_SIZE,
3987 }
3988 }
3989 },
3990 {
3991 .type = CRYPTO_ALG_TYPE_HMAC,
3992 .is_registered = 0,
3993 .alg.hash = {
3994 .halg.digestsize = SHA224_DIGEST_SIZE,
3995 .halg.base = {
3996 .cra_name = "hmac(sha224)",
3997 .cra_driver_name = "hmac-sha224-chcr",
3998 .cra_blocksize = SHA224_BLOCK_SIZE,
3999 }
4000 }
4001 },
4002 {
4003 .type = CRYPTO_ALG_TYPE_HMAC,
4004 .is_registered = 0,
4005 .alg.hash = {
4006 .halg.digestsize = SHA256_DIGEST_SIZE,
4007 .halg.base = {
4008 .cra_name = "hmac(sha256)",
4009 .cra_driver_name = "hmac-sha256-chcr",
4010 .cra_blocksize = SHA256_BLOCK_SIZE,
4011 }
4012 }
4013 },
4014 {
4015 .type = CRYPTO_ALG_TYPE_HMAC,
4016 .is_registered = 0,
4017 .alg.hash = {
4018 .halg.digestsize = SHA384_DIGEST_SIZE,
4019 .halg.base = {
4020 .cra_name = "hmac(sha384)",
4021 .cra_driver_name = "hmac-sha384-chcr",
4022 .cra_blocksize = SHA384_BLOCK_SIZE,
4023 }
4024 }
4025 },
4026 {
4027 .type = CRYPTO_ALG_TYPE_HMAC,
4028 .is_registered = 0,
4029 .alg.hash = {
4030 .halg.digestsize = SHA512_DIGEST_SIZE,
4031 .halg.base = {
4032 .cra_name = "hmac(sha512)",
4033 .cra_driver_name = "hmac-sha512-chcr",
4034 .cra_blocksize = SHA512_BLOCK_SIZE,
4035 }
4036 }
4037 },
4038 /* Add AEAD Algorithms */
4039 {
4040 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4041 .is_registered = 0,
4042 .alg.aead = {
4043 .base = {
4044 .cra_name = "gcm(aes)",
4045 .cra_driver_name = "gcm-aes-chcr",
4046 .cra_blocksize = 1,
4047 .cra_priority = CHCR_AEAD_PRIORITY,
4048 .cra_ctxsize = sizeof(struct chcr_context) +
4049 sizeof(struct chcr_aead_ctx) +
4050 sizeof(struct chcr_gcm_ctx),
4051 },
4052 .ivsize = GCM_AES_IV_SIZE,
4053 .maxauthsize = GHASH_DIGEST_SIZE,
4054 .setkey = chcr_gcm_setkey,
4055 .setauthsize = chcr_gcm_setauthsize,
4056 }
4057 },
4058 {
4059 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4060 .is_registered = 0,
4061 .alg.aead = {
4062 .base = {
4063 .cra_name = "rfc4106(gcm(aes))",
4064 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4065 .cra_blocksize = 1,
4066 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4067 .cra_ctxsize = sizeof(struct chcr_context) +
4068 sizeof(struct chcr_aead_ctx) +
4069 sizeof(struct chcr_gcm_ctx),
4070
4071 },
4072 .ivsize = GCM_RFC4106_IV_SIZE,
4073 .maxauthsize = GHASH_DIGEST_SIZE,
4074 .setkey = chcr_gcm_setkey,
4075 .setauthsize = chcr_4106_4309_setauthsize,
4076 }
4077 },
4078 {
4079 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4080 .is_registered = 0,
4081 .alg.aead = {
4082 .base = {
4083 .cra_name = "ccm(aes)",
4084 .cra_driver_name = "ccm-aes-chcr",
4085 .cra_blocksize = 1,
4086 .cra_priority = CHCR_AEAD_PRIORITY,
4087 .cra_ctxsize = sizeof(struct chcr_context) +
4088 sizeof(struct chcr_aead_ctx),
4089
4090 },
4091 .ivsize = AES_BLOCK_SIZE,
4092 .maxauthsize = GHASH_DIGEST_SIZE,
4093 .setkey = chcr_aead_ccm_setkey,
4094 .setauthsize = chcr_ccm_setauthsize,
4095 }
4096 },
4097 {
4098 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4099 .is_registered = 0,
4100 .alg.aead = {
4101 .base = {
4102 .cra_name = "rfc4309(ccm(aes))",
4103 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4104 .cra_blocksize = 1,
4105 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4106 .cra_ctxsize = sizeof(struct chcr_context) +
4107 sizeof(struct chcr_aead_ctx),
4108
4109 },
4110 .ivsize = 8,
4111 .maxauthsize = GHASH_DIGEST_SIZE,
4112 .setkey = chcr_aead_rfc4309_setkey,
4113 .setauthsize = chcr_4106_4309_setauthsize,
4114 }
4115 },
4116 {
4117 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4118 .is_registered = 0,
4119 .alg.aead = {
4120 .base = {
4121 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4122 .cra_driver_name =
4123 "authenc-hmac-sha1-cbc-aes-chcr",
4124 .cra_blocksize = AES_BLOCK_SIZE,
4125 .cra_priority = CHCR_AEAD_PRIORITY,
4126 .cra_ctxsize = sizeof(struct chcr_context) +
4127 sizeof(struct chcr_aead_ctx) +
4128 sizeof(struct chcr_authenc_ctx),
4129
4130 },
4131 .ivsize = AES_BLOCK_SIZE,
4132 .maxauthsize = SHA1_DIGEST_SIZE,
4133 .setkey = chcr_authenc_setkey,
4134 .setauthsize = chcr_authenc_setauthsize,
4135 }
4136 },
4137 {
4138 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4139 .is_registered = 0,
4140 .alg.aead = {
4141 .base = {
4142
4143 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4144 .cra_driver_name =
4145 "authenc-hmac-sha256-cbc-aes-chcr",
4146 .cra_blocksize = AES_BLOCK_SIZE,
4147 .cra_priority = CHCR_AEAD_PRIORITY,
4148 .cra_ctxsize = sizeof(struct chcr_context) +
4149 sizeof(struct chcr_aead_ctx) +
4150 sizeof(struct chcr_authenc_ctx),
4151
4152 },
4153 .ivsize = AES_BLOCK_SIZE,
4154 .maxauthsize = SHA256_DIGEST_SIZE,
4155 .setkey = chcr_authenc_setkey,
4156 .setauthsize = chcr_authenc_setauthsize,
4157 }
4158 },
4159 {
4160 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4161 .is_registered = 0,
4162 .alg.aead = {
4163 .base = {
4164 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4165 .cra_driver_name =
4166 "authenc-hmac-sha224-cbc-aes-chcr",
4167 .cra_blocksize = AES_BLOCK_SIZE,
4168 .cra_priority = CHCR_AEAD_PRIORITY,
4169 .cra_ctxsize = sizeof(struct chcr_context) +
4170 sizeof(struct chcr_aead_ctx) +
4171 sizeof(struct chcr_authenc_ctx),
4172 },
4173 .ivsize = AES_BLOCK_SIZE,
4174 .maxauthsize = SHA224_DIGEST_SIZE,
4175 .setkey = chcr_authenc_setkey,
4176 .setauthsize = chcr_authenc_setauthsize,
4177 }
4178 },
4179 {
4180 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4181 .is_registered = 0,
4182 .alg.aead = {
4183 .base = {
4184 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4185 .cra_driver_name =
4186 "authenc-hmac-sha384-cbc-aes-chcr",
4187 .cra_blocksize = AES_BLOCK_SIZE,
4188 .cra_priority = CHCR_AEAD_PRIORITY,
4189 .cra_ctxsize = sizeof(struct chcr_context) +
4190 sizeof(struct chcr_aead_ctx) +
4191 sizeof(struct chcr_authenc_ctx),
4192
4193 },
4194 .ivsize = AES_BLOCK_SIZE,
4195 .maxauthsize = SHA384_DIGEST_SIZE,
4196 .setkey = chcr_authenc_setkey,
4197 .setauthsize = chcr_authenc_setauthsize,
4198 }
4199 },
4200 {
4201 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4202 .is_registered = 0,
4203 .alg.aead = {
4204 .base = {
4205 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4206 .cra_driver_name =
4207 "authenc-hmac-sha512-cbc-aes-chcr",
4208 .cra_blocksize = AES_BLOCK_SIZE,
4209 .cra_priority = CHCR_AEAD_PRIORITY,
4210 .cra_ctxsize = sizeof(struct chcr_context) +
4211 sizeof(struct chcr_aead_ctx) +
4212 sizeof(struct chcr_authenc_ctx),
4213
4214 },
4215 .ivsize = AES_BLOCK_SIZE,
4216 .maxauthsize = SHA512_DIGEST_SIZE,
4217 .setkey = chcr_authenc_setkey,
4218 .setauthsize = chcr_authenc_setauthsize,
4219 }
4220 },
4221 {
4222 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4223 .is_registered = 0,
4224 .alg.aead = {
4225 .base = {
4226 .cra_name = "authenc(digest_null,cbc(aes))",
4227 .cra_driver_name =
4228 "authenc-digest_null-cbc-aes-chcr",
4229 .cra_blocksize = AES_BLOCK_SIZE,
4230 .cra_priority = CHCR_AEAD_PRIORITY,
4231 .cra_ctxsize = sizeof(struct chcr_context) +
4232 sizeof(struct chcr_aead_ctx) +
4233 sizeof(struct chcr_authenc_ctx),
4234
4235 },
4236 .ivsize = AES_BLOCK_SIZE,
4237 .maxauthsize = 0,
4238 .setkey = chcr_aead_digest_null_setkey,
4239 .setauthsize = chcr_authenc_null_setauthsize,
4240 }
4241 },
4242 {
4243 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4244 .is_registered = 0,
4245 .alg.aead = {
4246 .base = {
4247 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4248 .cra_driver_name =
4249 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4250 .cra_blocksize = 1,
4251 .cra_priority = CHCR_AEAD_PRIORITY,
4252 .cra_ctxsize = sizeof(struct chcr_context) +
4253 sizeof(struct chcr_aead_ctx) +
4254 sizeof(struct chcr_authenc_ctx),
4255
4256 },
4257 .ivsize = CTR_RFC3686_IV_SIZE,
4258 .maxauthsize = SHA1_DIGEST_SIZE,
4259 .setkey = chcr_authenc_setkey,
4260 .setauthsize = chcr_authenc_setauthsize,
4261 }
4262 },
4263 {
4264 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4265 .is_registered = 0,
4266 .alg.aead = {
4267 .base = {
4268
4269 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4270 .cra_driver_name =
4271 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4272 .cra_blocksize = 1,
4273 .cra_priority = CHCR_AEAD_PRIORITY,
4274 .cra_ctxsize = sizeof(struct chcr_context) +
4275 sizeof(struct chcr_aead_ctx) +
4276 sizeof(struct chcr_authenc_ctx),
4277
4278 },
4279 .ivsize = CTR_RFC3686_IV_SIZE,
4280 .maxauthsize = SHA256_DIGEST_SIZE,
4281 .setkey = chcr_authenc_setkey,
4282 .setauthsize = chcr_authenc_setauthsize,
4283 }
4284 },
4285 {
4286 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4287 .is_registered = 0,
4288 .alg.aead = {
4289 .base = {
4290 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4291 .cra_driver_name =
4292 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4293 .cra_blocksize = 1,
4294 .cra_priority = CHCR_AEAD_PRIORITY,
4295 .cra_ctxsize = sizeof(struct chcr_context) +
4296 sizeof(struct chcr_aead_ctx) +
4297 sizeof(struct chcr_authenc_ctx),
4298 },
4299 .ivsize = CTR_RFC3686_IV_SIZE,
4300 .maxauthsize = SHA224_DIGEST_SIZE,
4301 .setkey = chcr_authenc_setkey,
4302 .setauthsize = chcr_authenc_setauthsize,
4303 }
4304 },
4305 {
4306 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4307 .is_registered = 0,
4308 .alg.aead = {
4309 .base = {
4310 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4311 .cra_driver_name =
4312 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4313 .cra_blocksize = 1,
4314 .cra_priority = CHCR_AEAD_PRIORITY,
4315 .cra_ctxsize = sizeof(struct chcr_context) +
4316 sizeof(struct chcr_aead_ctx) +
4317 sizeof(struct chcr_authenc_ctx),
4318
4319 },
4320 .ivsize = CTR_RFC3686_IV_SIZE,
4321 .maxauthsize = SHA384_DIGEST_SIZE,
4322 .setkey = chcr_authenc_setkey,
4323 .setauthsize = chcr_authenc_setauthsize,
4324 }
4325 },
4326 {
4327 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4328 .is_registered = 0,
4329 .alg.aead = {
4330 .base = {
4331 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4332 .cra_driver_name =
4333 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4334 .cra_blocksize = 1,
4335 .cra_priority = CHCR_AEAD_PRIORITY,
4336 .cra_ctxsize = sizeof(struct chcr_context) +
4337 sizeof(struct chcr_aead_ctx) +
4338 sizeof(struct chcr_authenc_ctx),
4339
4340 },
4341 .ivsize = CTR_RFC3686_IV_SIZE,
4342 .maxauthsize = SHA512_DIGEST_SIZE,
4343 .setkey = chcr_authenc_setkey,
4344 .setauthsize = chcr_authenc_setauthsize,
4345 }
4346 },
4347 {
4348 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4349 .is_registered = 0,
4350 .alg.aead = {
4351 .base = {
4352 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4353 .cra_driver_name =
4354 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4355 .cra_blocksize = 1,
4356 .cra_priority = CHCR_AEAD_PRIORITY,
4357 .cra_ctxsize = sizeof(struct chcr_context) +
4358 sizeof(struct chcr_aead_ctx) +
4359 sizeof(struct chcr_authenc_ctx),
4360
4361 },
4362 .ivsize = CTR_RFC3686_IV_SIZE,
4363 .maxauthsize = 0,
4364 .setkey = chcr_aead_digest_null_setkey,
4365 .setauthsize = chcr_authenc_null_setauthsize,
4366 }
4367 },
4368};
4369
4370/*
4371 * chcr_unregister_alg - Deregister crypto algorithms with
4372 * kernel framework.
4373 */
4374static int chcr_unregister_alg(void)
4375{
4376 int i;
4377
4378 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4379 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4380 case CRYPTO_ALG_TYPE_SKCIPHER:
4381 if (driver_algs[i].is_registered && refcount_read(
4382 &driver_algs[i].alg.skcipher.base.cra_refcnt)
4383 == 1) {
4384 crypto_unregister_skcipher(
4385 &driver_algs[i].alg.skcipher);
4386 driver_algs[i].is_registered = 0;
4387 }
4388 break;
4389 case CRYPTO_ALG_TYPE_AEAD:
4390 if (driver_algs[i].is_registered && refcount_read(
4391 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4392 crypto_unregister_aead(
4393 &driver_algs[i].alg.aead);
4394 driver_algs[i].is_registered = 0;
4395 }
4396 break;
4397 case CRYPTO_ALG_TYPE_AHASH:
4398 if (driver_algs[i].is_registered && refcount_read(
4399 &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4400 == 1) {
4401 crypto_unregister_ahash(
4402 &driver_algs[i].alg.hash);
4403 driver_algs[i].is_registered = 0;
4404 }
4405 break;
4406 }
4407 }
4408 return 0;
4409}
4410
4411#define SZ_AHASH_CTX sizeof(struct chcr_context)
4412#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4413#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4414
4415/*
4416 * chcr_register_alg - Register crypto algorithms with kernel framework.
4417 */
4418static int chcr_register_alg(void)
4419{
4420 struct crypto_alg ai;
4421 struct ahash_alg *a_hash;
4422 int err = 0, i;
4423 char *name = NULL;
4424
4425 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4426 if (driver_algs[i].is_registered)
4427 continue;
4428 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4429 case CRYPTO_ALG_TYPE_SKCIPHER:
4430 driver_algs[i].alg.skcipher.base.cra_priority =
4431 CHCR_CRA_PRIORITY;
4432 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4433 driver_algs[i].alg.skcipher.base.cra_flags =
4434 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4435 CRYPTO_ALG_ALLOCATES_MEMORY |
4436 CRYPTO_ALG_NEED_FALLBACK;
4437 driver_algs[i].alg.skcipher.base.cra_ctxsize =
4438 sizeof(struct chcr_context) +
4439 sizeof(struct ablk_ctx);
4440 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4441
4442 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4443 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4444 break;
4445 case CRYPTO_ALG_TYPE_AEAD:
4446 driver_algs[i].alg.aead.base.cra_flags =
4447 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4448 CRYPTO_ALG_ALLOCATES_MEMORY;
4449 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4450 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4451 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4452 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4453 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4454 err = crypto_register_aead(&driver_algs[i].alg.aead);
4455 name = driver_algs[i].alg.aead.base.cra_driver_name;
4456 break;
4457 case CRYPTO_ALG_TYPE_AHASH:
4458 a_hash = &driver_algs[i].alg.hash;
4459 a_hash->update = chcr_ahash_update;
4460 a_hash->final = chcr_ahash_final;
4461 a_hash->finup = chcr_ahash_finup;
4462 a_hash->digest = chcr_ahash_digest;
4463 a_hash->export = chcr_ahash_export;
4464 a_hash->import = chcr_ahash_import;
4465 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4466 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4467 a_hash->halg.base.cra_module = THIS_MODULE;
4468 a_hash->halg.base.cra_flags =
4469 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4470 a_hash->halg.base.cra_alignmask = 0;
4471 a_hash->halg.base.cra_exit = NULL;
4472
4473 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4474 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4475 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4476 a_hash->init = chcr_hmac_init;
4477 a_hash->setkey = chcr_ahash_setkey;
4478 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4479 } else {
4480 a_hash->init = chcr_sha_init;
4481 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4482 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4483 }
4484 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4485 ai = driver_algs[i].alg.hash.halg.base;
4486 name = ai.cra_driver_name;
4487 break;
4488 }
4489 if (err) {
4490 pr_err("%s : Algorithm registration failed\n", name);
4491 goto register_err;
4492 } else {
4493 driver_algs[i].is_registered = 1;
4494 }
4495 }
4496 return 0;
4497
4498register_err:
4499 chcr_unregister_alg();
4500 return err;
4501}
4502
4503/*
4504 * start_crypto - Register the crypto algorithms.
4505 * This should called once when the first device comesup. After this
4506 * kernel will start calling driver APIs for crypto operations.
4507 */
4508int start_crypto(void)
4509{
4510 return chcr_register_alg();
4511}
4512
4513/*
4514 * stop_crypto - Deregister all the crypto algorithms with kernel.
4515 * This should be called once when the last device goes down. After this
4516 * kernel will not call the driver API for crypto operations.
4517 */
4518int stop_crypto(void)
4519{
4520 chcr_unregister_alg();
4521 return 0;
4522}
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/skbuff.h>
48#include <linux/rtnetlink.h>
49#include <linux/highmem.h>
50#include <linux/scatterlist.h>
51
52#include <crypto/aes.h>
53#include <crypto/algapi.h>
54#include <crypto/hash.h>
55#include <crypto/gcm.h>
56#include <crypto/sha1.h>
57#include <crypto/sha2.h>
58#include <crypto/authenc.h>
59#include <crypto/ctr.h>
60#include <crypto/gf128mul.h>
61#include <crypto/internal/aead.h>
62#include <crypto/null.h>
63#include <crypto/internal/skcipher.h>
64#include <crypto/aead.h>
65#include <crypto/scatterwalk.h>
66#include <crypto/internal/hash.h>
67
68#include "t4fw_api.h"
69#include "t4_msg.h"
70#include "chcr_core.h"
71#include "chcr_algo.h"
72#include "chcr_crypto.h"
73
74#define IV AES_BLOCK_SIZE
75
76static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
81};
82
83static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
88};
89
90static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
94};
95
96static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 unsigned char *input, int err);
98
99static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100{
101 return &ctx->crypto_ctx->aeadctx;
102}
103
104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105{
106 return &ctx->crypto_ctx->ablkctx;
107}
108
109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110{
111 return &ctx->crypto_ctx->hmacctx;
112}
113
114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115{
116 return gctx->ctx->gcm;
117}
118
119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120{
121 return gctx->ctx->authenc;
122}
123
124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125{
126 return container_of(ctx->dev, struct uld_ctx, dev);
127}
128
129static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
130{
131 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
132}
133
134static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
135 unsigned int entlen,
136 unsigned int skip)
137{
138 int nents = 0;
139 unsigned int less;
140 unsigned int skip_len = 0;
141
142 while (sg && skip) {
143 if (sg_dma_len(sg) <= skip) {
144 skip -= sg_dma_len(sg);
145 skip_len = 0;
146 sg = sg_next(sg);
147 } else {
148 skip_len = skip;
149 skip = 0;
150 }
151 }
152
153 while (sg && reqlen) {
154 less = min(reqlen, sg_dma_len(sg) - skip_len);
155 nents += DIV_ROUND_UP(less, entlen);
156 reqlen -= less;
157 skip_len = 0;
158 sg = sg_next(sg);
159 }
160 return nents;
161}
162
163static inline int get_aead_subtype(struct crypto_aead *aead)
164{
165 struct aead_alg *alg = crypto_aead_alg(aead);
166 struct chcr_alg_template *chcr_crypto_alg =
167 container_of(alg, struct chcr_alg_template, alg.aead);
168 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
169}
170
171void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
172{
173 u8 temp[SHA512_DIGEST_SIZE];
174 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
175 int authsize = crypto_aead_authsize(tfm);
176 struct cpl_fw6_pld *fw6_pld;
177 int cmp = 0;
178
179 fw6_pld = (struct cpl_fw6_pld *)input;
180 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
181 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
182 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
183 } else {
184
185 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
186 authsize, req->assoclen +
187 req->cryptlen - authsize);
188 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
189 }
190 if (cmp)
191 *err = -EBADMSG;
192 else
193 *err = 0;
194}
195
196static int chcr_inc_wrcount(struct chcr_dev *dev)
197{
198 if (dev->state == CHCR_DETACH)
199 return 1;
200 atomic_inc(&dev->inflight);
201 return 0;
202}
203
204static inline void chcr_dec_wrcount(struct chcr_dev *dev)
205{
206 atomic_dec(&dev->inflight);
207}
208
209static inline int chcr_handle_aead_resp(struct aead_request *req,
210 unsigned char *input,
211 int err)
212{
213 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
214 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
215 struct chcr_dev *dev = a_ctx(tfm)->dev;
216
217 chcr_aead_common_exit(req);
218 if (reqctx->verify == VERIFY_SW) {
219 chcr_verify_tag(req, input, &err);
220 reqctx->verify = VERIFY_HW;
221 }
222 chcr_dec_wrcount(dev);
223 aead_request_complete(req, err);
224
225 return err;
226}
227
228static void get_aes_decrypt_key(unsigned char *dec_key,
229 const unsigned char *key,
230 unsigned int keylength)
231{
232 u32 temp;
233 u32 w_ring[MAX_NK];
234 int i, j, k;
235 u8 nr, nk;
236
237 switch (keylength) {
238 case AES_KEYLENGTH_128BIT:
239 nk = KEYLENGTH_4BYTES;
240 nr = NUMBER_OF_ROUNDS_10;
241 break;
242 case AES_KEYLENGTH_192BIT:
243 nk = KEYLENGTH_6BYTES;
244 nr = NUMBER_OF_ROUNDS_12;
245 break;
246 case AES_KEYLENGTH_256BIT:
247 nk = KEYLENGTH_8BYTES;
248 nr = NUMBER_OF_ROUNDS_14;
249 break;
250 default:
251 return;
252 }
253 for (i = 0; i < nk; i++)
254 w_ring[i] = get_unaligned_be32(&key[i * 4]);
255
256 i = 0;
257 temp = w_ring[nk - 1];
258 while (i + nk < (nr + 1) * 4) {
259 if (!(i % nk)) {
260 /* RotWord(temp) */
261 temp = (temp << 8) | (temp >> 24);
262 temp = aes_ks_subword(temp);
263 temp ^= round_constant[i / nk];
264 } else if (nk == 8 && (i % 4 == 0)) {
265 temp = aes_ks_subword(temp);
266 }
267 w_ring[i % nk] ^= temp;
268 temp = w_ring[i % nk];
269 i++;
270 }
271 i--;
272 for (k = 0, j = i % nk; k < nk; k++) {
273 put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
274 j--;
275 if (j < 0)
276 j += nk;
277 }
278}
279
280static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
281{
282 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
283
284 switch (ds) {
285 case SHA1_DIGEST_SIZE:
286 base_hash = crypto_alloc_shash("sha1", 0, 0);
287 break;
288 case SHA224_DIGEST_SIZE:
289 base_hash = crypto_alloc_shash("sha224", 0, 0);
290 break;
291 case SHA256_DIGEST_SIZE:
292 base_hash = crypto_alloc_shash("sha256", 0, 0);
293 break;
294 case SHA384_DIGEST_SIZE:
295 base_hash = crypto_alloc_shash("sha384", 0, 0);
296 break;
297 case SHA512_DIGEST_SIZE:
298 base_hash = crypto_alloc_shash("sha512", 0, 0);
299 break;
300 }
301
302 return base_hash;
303}
304
305static int chcr_compute_partial_hash(struct shash_desc *desc,
306 char *iopad, char *result_hash,
307 int digest_size)
308{
309 struct sha1_state sha1_st;
310 struct sha256_state sha256_st;
311 struct sha512_state sha512_st;
312 int error;
313
314 if (digest_size == SHA1_DIGEST_SIZE) {
315 error = crypto_shash_init(desc) ?:
316 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
317 crypto_shash_export(desc, (void *)&sha1_st);
318 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
319 } else if (digest_size == SHA224_DIGEST_SIZE) {
320 error = crypto_shash_init(desc) ?:
321 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
322 crypto_shash_export(desc, (void *)&sha256_st);
323 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
324
325 } else if (digest_size == SHA256_DIGEST_SIZE) {
326 error = crypto_shash_init(desc) ?:
327 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
328 crypto_shash_export(desc, (void *)&sha256_st);
329 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
330
331 } else if (digest_size == SHA384_DIGEST_SIZE) {
332 error = crypto_shash_init(desc) ?:
333 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
334 crypto_shash_export(desc, (void *)&sha512_st);
335 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
336
337 } else if (digest_size == SHA512_DIGEST_SIZE) {
338 error = crypto_shash_init(desc) ?:
339 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
340 crypto_shash_export(desc, (void *)&sha512_st);
341 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
342 } else {
343 error = -EINVAL;
344 pr_err("Unknown digest size %d\n", digest_size);
345 }
346 return error;
347}
348
349static void chcr_change_order(char *buf, int ds)
350{
351 int i;
352
353 if (ds == SHA512_DIGEST_SIZE) {
354 for (i = 0; i < (ds / sizeof(u64)); i++)
355 *((__be64 *)buf + i) =
356 cpu_to_be64(*((u64 *)buf + i));
357 } else {
358 for (i = 0; i < (ds / sizeof(u32)); i++)
359 *((__be32 *)buf + i) =
360 cpu_to_be32(*((u32 *)buf + i));
361 }
362}
363
364static inline int is_hmac(struct crypto_tfm *tfm)
365{
366 struct crypto_alg *alg = tfm->__crt_alg;
367 struct chcr_alg_template *chcr_crypto_alg =
368 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
369 alg.hash);
370 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
371 return 1;
372 return 0;
373}
374
375static inline void dsgl_walk_init(struct dsgl_walk *walk,
376 struct cpl_rx_phys_dsgl *dsgl)
377{
378 walk->dsgl = dsgl;
379 walk->nents = 0;
380 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
381}
382
383static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
384 int pci_chan_id)
385{
386 struct cpl_rx_phys_dsgl *phys_cpl;
387
388 phys_cpl = walk->dsgl;
389
390 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
391 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
392 phys_cpl->pcirlxorder_to_noofsgentr =
393 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
394 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
395 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
396 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
397 CPL_RX_PHYS_DSGL_DCAID_V(0) |
398 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
399 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
400 phys_cpl->rss_hdr_int.qid = htons(qid);
401 phys_cpl->rss_hdr_int.hash_val = 0;
402 phys_cpl->rss_hdr_int.channel = pci_chan_id;
403}
404
405static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
406 size_t size,
407 dma_addr_t addr)
408{
409 int j;
410
411 if (!size)
412 return;
413 j = walk->nents;
414 walk->to->len[j % 8] = htons(size);
415 walk->to->addr[j % 8] = cpu_to_be64(addr);
416 j++;
417 if ((j % 8) == 0)
418 walk->to++;
419 walk->nents = j;
420}
421
422static void dsgl_walk_add_sg(struct dsgl_walk *walk,
423 struct scatterlist *sg,
424 unsigned int slen,
425 unsigned int skip)
426{
427 int skip_len = 0;
428 unsigned int left_size = slen, len = 0;
429 unsigned int j = walk->nents;
430 int offset, ent_len;
431
432 if (!slen)
433 return;
434 while (sg && skip) {
435 if (sg_dma_len(sg) <= skip) {
436 skip -= sg_dma_len(sg);
437 skip_len = 0;
438 sg = sg_next(sg);
439 } else {
440 skip_len = skip;
441 skip = 0;
442 }
443 }
444
445 while (left_size && sg) {
446 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
447 offset = 0;
448 while (len) {
449 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
450 walk->to->len[j % 8] = htons(ent_len);
451 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
452 offset + skip_len);
453 offset += ent_len;
454 len -= ent_len;
455 j++;
456 if ((j % 8) == 0)
457 walk->to++;
458 }
459 walk->last_sg = sg;
460 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
461 skip_len) + skip_len;
462 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
463 skip_len = 0;
464 sg = sg_next(sg);
465 }
466 walk->nents = j;
467}
468
469static inline void ulptx_walk_init(struct ulptx_walk *walk,
470 struct ulptx_sgl *ulp)
471{
472 walk->sgl = ulp;
473 walk->nents = 0;
474 walk->pair_idx = 0;
475 walk->pair = ulp->sge;
476 walk->last_sg = NULL;
477 walk->last_sg_len = 0;
478}
479
480static inline void ulptx_walk_end(struct ulptx_walk *walk)
481{
482 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
483 ULPTX_NSGE_V(walk->nents));
484}
485
486
487static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
488 size_t size,
489 dma_addr_t addr)
490{
491 if (!size)
492 return;
493
494 if (walk->nents == 0) {
495 walk->sgl->len0 = cpu_to_be32(size);
496 walk->sgl->addr0 = cpu_to_be64(addr);
497 } else {
498 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
499 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
500 walk->pair_idx = !walk->pair_idx;
501 if (!walk->pair_idx)
502 walk->pair++;
503 }
504 walk->nents++;
505}
506
507static void ulptx_walk_add_sg(struct ulptx_walk *walk,
508 struct scatterlist *sg,
509 unsigned int len,
510 unsigned int skip)
511{
512 int small;
513 int skip_len = 0;
514 unsigned int sgmin;
515
516 if (!len)
517 return;
518 while (sg && skip) {
519 if (sg_dma_len(sg) <= skip) {
520 skip -= sg_dma_len(sg);
521 skip_len = 0;
522 sg = sg_next(sg);
523 } else {
524 skip_len = skip;
525 skip = 0;
526 }
527 }
528 WARN(!sg, "SG should not be null here\n");
529 if (sg && (walk->nents == 0)) {
530 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
531 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
532 walk->sgl->len0 = cpu_to_be32(sgmin);
533 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
534 walk->nents++;
535 len -= sgmin;
536 walk->last_sg = sg;
537 walk->last_sg_len = sgmin + skip_len;
538 skip_len += sgmin;
539 if (sg_dma_len(sg) == skip_len) {
540 sg = sg_next(sg);
541 skip_len = 0;
542 }
543 }
544
545 while (sg && len) {
546 small = min(sg_dma_len(sg) - skip_len, len);
547 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
548 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
549 walk->pair->addr[walk->pair_idx] =
550 cpu_to_be64(sg_dma_address(sg) + skip_len);
551 walk->pair_idx = !walk->pair_idx;
552 walk->nents++;
553 if (!walk->pair_idx)
554 walk->pair++;
555 len -= sgmin;
556 skip_len += sgmin;
557 walk->last_sg = sg;
558 walk->last_sg_len = skip_len;
559 if (sg_dma_len(sg) == skip_len) {
560 sg = sg_next(sg);
561 skip_len = 0;
562 }
563 }
564}
565
566static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
567{
568 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
569 struct chcr_alg_template *chcr_crypto_alg =
570 container_of(alg, struct chcr_alg_template, alg.skcipher);
571
572 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
573}
574
575static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
576{
577 struct adapter *adap = netdev2adap(dev);
578 struct sge_uld_txq_info *txq_info =
579 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
580 struct sge_uld_txq *txq;
581 int ret = 0;
582
583 local_bh_disable();
584 txq = &txq_info->uldtxq[idx];
585 spin_lock(&txq->sendq.lock);
586 if (txq->full)
587 ret = -1;
588 spin_unlock(&txq->sendq.lock);
589 local_bh_enable();
590 return ret;
591}
592
593static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
594 struct _key_ctx *key_ctx)
595{
596 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
597 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
598 } else {
599 memcpy(key_ctx->key,
600 ablkctx->key + (ablkctx->enckey_len >> 1),
601 ablkctx->enckey_len >> 1);
602 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
603 ablkctx->rrkey, ablkctx->enckey_len >> 1);
604 }
605 return 0;
606}
607
608static int chcr_hash_ent_in_wr(struct scatterlist *src,
609 unsigned int minsg,
610 unsigned int space,
611 unsigned int srcskip)
612{
613 int srclen = 0;
614 int srcsg = minsg;
615 int soffset = 0, sless;
616
617 if (sg_dma_len(src) == srcskip) {
618 src = sg_next(src);
619 srcskip = 0;
620 }
621 while (src && space > (sgl_ent_len[srcsg + 1])) {
622 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
623 CHCR_SRC_SG_SIZE);
624 srclen += sless;
625 soffset += sless;
626 srcsg++;
627 if (sg_dma_len(src) == (soffset + srcskip)) {
628 src = sg_next(src);
629 soffset = 0;
630 srcskip = 0;
631 }
632 }
633 return srclen;
634}
635
636static int chcr_sg_ent_in_wr(struct scatterlist *src,
637 struct scatterlist *dst,
638 unsigned int minsg,
639 unsigned int space,
640 unsigned int srcskip,
641 unsigned int dstskip)
642{
643 int srclen = 0, dstlen = 0;
644 int srcsg = minsg, dstsg = minsg;
645 int offset = 0, soffset = 0, less, sless = 0;
646
647 if (sg_dma_len(src) == srcskip) {
648 src = sg_next(src);
649 srcskip = 0;
650 }
651 if (sg_dma_len(dst) == dstskip) {
652 dst = sg_next(dst);
653 dstskip = 0;
654 }
655
656 while (src && dst &&
657 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
658 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
659 CHCR_SRC_SG_SIZE);
660 srclen += sless;
661 srcsg++;
662 offset = 0;
663 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
664 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
665 if (srclen <= dstlen)
666 break;
667 less = min_t(unsigned int, sg_dma_len(dst) - offset -
668 dstskip, CHCR_DST_SG_SIZE);
669 dstlen += less;
670 offset += less;
671 if ((offset + dstskip) == sg_dma_len(dst)) {
672 dst = sg_next(dst);
673 offset = 0;
674 }
675 dstsg++;
676 dstskip = 0;
677 }
678 soffset += sless;
679 if ((soffset + srcskip) == sg_dma_len(src)) {
680 src = sg_next(src);
681 srcskip = 0;
682 soffset = 0;
683 }
684
685 }
686 return min(srclen, dstlen);
687}
688
689static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
690 struct skcipher_request *req,
691 u8 *iv,
692 unsigned short op_type)
693{
694 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
695 int err;
696
697 skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
698 skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
699 req->base.complete, req->base.data);
700 skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
701 req->cryptlen, iv);
702
703 err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
704 crypto_skcipher_encrypt(&reqctx->fallback_req);
705
706 return err;
707
708}
709
710static inline int get_qidxs(struct crypto_async_request *req,
711 unsigned int *txqidx, unsigned int *rxqidx)
712{
713 struct crypto_tfm *tfm = req->tfm;
714 int ret = 0;
715
716 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
717 case CRYPTO_ALG_TYPE_AEAD:
718 {
719 struct aead_request *aead_req =
720 container_of(req, struct aead_request, base);
721 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req);
722 *txqidx = reqctx->txqidx;
723 *rxqidx = reqctx->rxqidx;
724 break;
725 }
726 case CRYPTO_ALG_TYPE_SKCIPHER:
727 {
728 struct skcipher_request *sk_req =
729 container_of(req, struct skcipher_request, base);
730 struct chcr_skcipher_req_ctx *reqctx =
731 skcipher_request_ctx(sk_req);
732 *txqidx = reqctx->txqidx;
733 *rxqidx = reqctx->rxqidx;
734 break;
735 }
736 case CRYPTO_ALG_TYPE_AHASH:
737 {
738 struct ahash_request *ahash_req =
739 container_of(req, struct ahash_request, base);
740 struct chcr_ahash_req_ctx *reqctx =
741 ahash_request_ctx(ahash_req);
742 *txqidx = reqctx->txqidx;
743 *rxqidx = reqctx->rxqidx;
744 break;
745 }
746 default:
747 ret = -EINVAL;
748 /* should never get here */
749 BUG();
750 break;
751 }
752 return ret;
753}
754
755static inline void create_wreq(struct chcr_context *ctx,
756 struct chcr_wr *chcr_req,
757 struct crypto_async_request *req,
758 unsigned int imm,
759 int hash_sz,
760 unsigned int len16,
761 unsigned int sc_len,
762 unsigned int lcb)
763{
764 struct uld_ctx *u_ctx = ULD_CTX(ctx);
765 unsigned int tx_channel_id, rx_channel_id;
766 unsigned int txqidx = 0, rxqidx = 0;
767 unsigned int qid, fid, portno;
768
769 get_qidxs(req, &txqidx, &rxqidx);
770 qid = u_ctx->lldi.rxq_ids[rxqidx];
771 fid = u_ctx->lldi.rxq_ids[0];
772 portno = rxqidx / ctx->rxq_perchan;
773 tx_channel_id = txqidx / ctx->txq_perchan;
774 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
775
776
777 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
778 chcr_req->wreq.pld_size_hash_size =
779 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
780 chcr_req->wreq.len16_pkd =
781 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
782 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
783 chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
784 !!lcb, txqidx);
785
786 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
787 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
788 ((sizeof(chcr_req->wreq)) >> 4)));
789 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
790 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
791 sizeof(chcr_req->key_ctx) + sc_len);
792}
793
794/**
795 * create_cipher_wr - form the WR for cipher operations
796 * @wrparam: Container for create_cipher_wr()'s parameters
797 */
798static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
799{
800 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
801 struct chcr_context *ctx = c_ctx(tfm);
802 struct uld_ctx *u_ctx = ULD_CTX(ctx);
803 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
804 struct sk_buff *skb = NULL;
805 struct chcr_wr *chcr_req;
806 struct cpl_rx_phys_dsgl *phys_cpl;
807 struct ulptx_sgl *ulptx;
808 struct chcr_skcipher_req_ctx *reqctx =
809 skcipher_request_ctx(wrparam->req);
810 unsigned int temp = 0, transhdr_len, dst_size;
811 int error;
812 int nents;
813 unsigned int kctx_len;
814 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
815 GFP_KERNEL : GFP_ATOMIC;
816 struct adapter *adap = padap(ctx->dev);
817 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
818
819 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
820 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
821 reqctx->dst_ofst);
822 dst_size = get_space_for_phys_dsgl(nents);
823 kctx_len = roundup(ablkctx->enckey_len, 16);
824 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
825 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
826 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
827 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
828 (sgl_len(nents) * 8);
829 transhdr_len += temp;
830 transhdr_len = roundup(transhdr_len, 16);
831 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
832 if (!skb) {
833 error = -ENOMEM;
834 goto err;
835 }
836 chcr_req = __skb_put_zero(skb, transhdr_len);
837 chcr_req->sec_cpl.op_ivinsrtofst =
838 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
839
840 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
841 chcr_req->sec_cpl.aadstart_cipherstop_hi =
842 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
843
844 chcr_req->sec_cpl.cipherstop_lo_authinsert =
845 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
846 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
847 ablkctx->ciph_mode,
848 0, 0, IV >> 1);
849 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
850 0, 1, dst_size);
851
852 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
853 if ((reqctx->op == CHCR_DECRYPT_OP) &&
854 (!(get_cryptoalg_subtype(tfm) ==
855 CRYPTO_ALG_SUB_TYPE_CTR)) &&
856 (!(get_cryptoalg_subtype(tfm) ==
857 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
858 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
859 } else {
860 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
861 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
862 memcpy(chcr_req->key_ctx.key, ablkctx->key,
863 ablkctx->enckey_len);
864 } else {
865 memcpy(chcr_req->key_ctx.key, ablkctx->key +
866 (ablkctx->enckey_len >> 1),
867 ablkctx->enckey_len >> 1);
868 memcpy(chcr_req->key_ctx.key +
869 (ablkctx->enckey_len >> 1),
870 ablkctx->key,
871 ablkctx->enckey_len >> 1);
872 }
873 }
874 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
875 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
876 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
877 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
878
879 atomic_inc(&adap->chcr_stats.cipher_rqst);
880 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
881 + (reqctx->imm ? (wrparam->bytes) : 0);
882 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
883 transhdr_len, temp,
884 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
885 reqctx->skb = skb;
886
887 if (reqctx->op && (ablkctx->ciph_mode ==
888 CHCR_SCMD_CIPHER_MODE_AES_CBC))
889 sg_pcopy_to_buffer(wrparam->req->src,
890 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
891 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
892
893 return skb;
894err:
895 return ERR_PTR(error);
896}
897
898static inline int chcr_keyctx_ck_size(unsigned int keylen)
899{
900 int ck_size = 0;
901
902 if (keylen == AES_KEYSIZE_128)
903 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
904 else if (keylen == AES_KEYSIZE_192)
905 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
906 else if (keylen == AES_KEYSIZE_256)
907 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
908 else
909 ck_size = 0;
910
911 return ck_size;
912}
913static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
914 const u8 *key,
915 unsigned int keylen)
916{
917 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
918
919 crypto_skcipher_clear_flags(ablkctx->sw_cipher,
920 CRYPTO_TFM_REQ_MASK);
921 crypto_skcipher_set_flags(ablkctx->sw_cipher,
922 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
923 return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
924}
925
926static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
927 const u8 *key,
928 unsigned int keylen)
929{
930 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
931 unsigned int ck_size, context_size;
932 u16 alignment = 0;
933 int err;
934
935 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
936 if (err)
937 goto badkey_err;
938
939 ck_size = chcr_keyctx_ck_size(keylen);
940 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
941 memcpy(ablkctx->key, key, keylen);
942 ablkctx->enckey_len = keylen;
943 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
944 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
945 keylen + alignment) >> 4;
946
947 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
948 0, 0, context_size);
949 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
950 return 0;
951badkey_err:
952 ablkctx->enckey_len = 0;
953
954 return err;
955}
956
957static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
958 const u8 *key,
959 unsigned int keylen)
960{
961 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
962 unsigned int ck_size, context_size;
963 u16 alignment = 0;
964 int err;
965
966 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
967 if (err)
968 goto badkey_err;
969 ck_size = chcr_keyctx_ck_size(keylen);
970 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
971 memcpy(ablkctx->key, key, keylen);
972 ablkctx->enckey_len = keylen;
973 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
974 keylen + alignment) >> 4;
975
976 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
977 0, 0, context_size);
978 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
979
980 return 0;
981badkey_err:
982 ablkctx->enckey_len = 0;
983
984 return err;
985}
986
987static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
988 const u8 *key,
989 unsigned int keylen)
990{
991 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
992 unsigned int ck_size, context_size;
993 u16 alignment = 0;
994 int err;
995
996 if (keylen < CTR_RFC3686_NONCE_SIZE)
997 return -EINVAL;
998 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
999 CTR_RFC3686_NONCE_SIZE);
1000
1001 keylen -= CTR_RFC3686_NONCE_SIZE;
1002 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1003 if (err)
1004 goto badkey_err;
1005
1006 ck_size = chcr_keyctx_ck_size(keylen);
1007 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1008 memcpy(ablkctx->key, key, keylen);
1009 ablkctx->enckey_len = keylen;
1010 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1011 keylen + alignment) >> 4;
1012
1013 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1014 0, 0, context_size);
1015 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1016
1017 return 0;
1018badkey_err:
1019 ablkctx->enckey_len = 0;
1020
1021 return err;
1022}
1023static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1024{
1025 unsigned int size = AES_BLOCK_SIZE;
1026 __be32 *b = (__be32 *)(dstiv + size);
1027 u32 c, prev;
1028
1029 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1030 for (; size >= 4; size -= 4) {
1031 prev = be32_to_cpu(*--b);
1032 c = prev + add;
1033 *b = cpu_to_be32(c);
1034 if (prev < c)
1035 break;
1036 add = 1;
1037 }
1038
1039}
1040
1041static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1042{
1043 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1044 u64 c;
1045 u32 temp = be32_to_cpu(*--b);
1046
1047 temp = ~temp;
1048 c = (u64)temp + 1; // No of block can processed without overflow
1049 if ((bytes / AES_BLOCK_SIZE) >= c)
1050 bytes = c * AES_BLOCK_SIZE;
1051 return bytes;
1052}
1053
1054static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1055 u32 isfinal)
1056{
1057 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1059 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1060 struct crypto_aes_ctx aes;
1061 int ret, i;
1062 u8 *key;
1063 unsigned int keylen;
1064 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1065 int round8 = round / 8;
1066
1067 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1068
1069 keylen = ablkctx->enckey_len / 2;
1070 key = ablkctx->key + keylen;
1071 /* For a 192 bit key remove the padded zeroes which was
1072 * added in chcr_xts_setkey
1073 */
1074 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1075 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1076 ret = aes_expandkey(&aes, key, keylen - 8);
1077 else
1078 ret = aes_expandkey(&aes, key, keylen);
1079 if (ret)
1080 return ret;
1081 aes_encrypt(&aes, iv, iv);
1082 for (i = 0; i < round8; i++)
1083 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1084
1085 for (i = 0; i < (round % 8); i++)
1086 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1087
1088 if (!isfinal)
1089 aes_decrypt(&aes, iv, iv);
1090
1091 memzero_explicit(&aes, sizeof(aes));
1092 return 0;
1093}
1094
1095static int chcr_update_cipher_iv(struct skcipher_request *req,
1096 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1097{
1098 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1099 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1100 int subtype = get_cryptoalg_subtype(tfm);
1101 int ret = 0;
1102
1103 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1104 ctr_add_iv(iv, req->iv, (reqctx->processed /
1105 AES_BLOCK_SIZE));
1106 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1107 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1108 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1109 AES_BLOCK_SIZE) + 1);
1110 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1111 ret = chcr_update_tweak(req, iv, 0);
1112 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1113 if (reqctx->op)
1114 /*Updated before sending last WR*/
1115 memcpy(iv, req->iv, AES_BLOCK_SIZE);
1116 else
1117 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1118 }
1119
1120 return ret;
1121
1122}
1123
1124/* We need separate function for final iv because in rfc3686 Initial counter
1125 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126 * for subsequent update requests
1127 */
1128
1129static int chcr_final_cipher_iv(struct skcipher_request *req,
1130 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1131{
1132 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1133 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1134 int subtype = get_cryptoalg_subtype(tfm);
1135 int ret = 0;
1136
1137 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1138 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1139 AES_BLOCK_SIZE));
1140 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1141 if (!reqctx->partial_req)
1142 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1143 else
1144 ret = chcr_update_tweak(req, iv, 1);
1145 }
1146 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1147 /*Already updated for Decrypt*/
1148 if (!reqctx->op)
1149 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1150
1151 }
1152 return ret;
1153
1154}
1155
1156static int chcr_handle_cipher_resp(struct skcipher_request *req,
1157 unsigned char *input, int err)
1158{
1159 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1160 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1161 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1162 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1163 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1164 struct chcr_dev *dev = c_ctx(tfm)->dev;
1165 struct chcr_context *ctx = c_ctx(tfm);
1166 struct adapter *adap = padap(ctx->dev);
1167 struct cipher_wr_param wrparam;
1168 struct sk_buff *skb;
1169 int bytes;
1170
1171 if (err)
1172 goto unmap;
1173 if (req->cryptlen == reqctx->processed) {
1174 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1175 req);
1176 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1177 goto complete;
1178 }
1179
1180 if (!reqctx->imm) {
1181 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1182 CIP_SPACE_LEFT(ablkctx->enckey_len),
1183 reqctx->src_ofst, reqctx->dst_ofst);
1184 if ((bytes + reqctx->processed) >= req->cryptlen)
1185 bytes = req->cryptlen - reqctx->processed;
1186 else
1187 bytes = rounddown(bytes, 16);
1188 } else {
1189 /*CTR mode counter overflow*/
1190 bytes = req->cryptlen - reqctx->processed;
1191 }
1192 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1193 if (err)
1194 goto unmap;
1195
1196 if (unlikely(bytes == 0)) {
1197 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1198 req);
1199 memcpy(req->iv, reqctx->init_iv, IV);
1200 atomic_inc(&adap->chcr_stats.fallback);
1201 err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1202 reqctx->op);
1203 goto complete;
1204 }
1205
1206 if (get_cryptoalg_subtype(tfm) ==
1207 CRYPTO_ALG_SUB_TYPE_CTR)
1208 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1209 wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1210 wrparam.req = req;
1211 wrparam.bytes = bytes;
1212 skb = create_cipher_wr(&wrparam);
1213 if (IS_ERR(skb)) {
1214 pr_err("%s : Failed to form WR. No memory\n", __func__);
1215 err = PTR_ERR(skb);
1216 goto unmap;
1217 }
1218 skb->dev = u_ctx->lldi.ports[0];
1219 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1220 chcr_send_wr(skb);
1221 reqctx->last_req_len = bytes;
1222 reqctx->processed += bytes;
1223 if (get_cryptoalg_subtype(tfm) ==
1224 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1225 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1226 complete(&ctx->cbc_aes_aio_done);
1227 }
1228 return 0;
1229unmap:
1230 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1231complete:
1232 if (get_cryptoalg_subtype(tfm) ==
1233 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1234 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1235 complete(&ctx->cbc_aes_aio_done);
1236 }
1237 chcr_dec_wrcount(dev);
1238 skcipher_request_complete(req, err);
1239 return err;
1240}
1241
1242static int process_cipher(struct skcipher_request *req,
1243 unsigned short qid,
1244 struct sk_buff **skb,
1245 unsigned short op_type)
1246{
1247 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1248 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1249 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1250 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1251 struct adapter *adap = padap(c_ctx(tfm)->dev);
1252 struct cipher_wr_param wrparam;
1253 int bytes, err = -EINVAL;
1254 int subtype;
1255
1256 reqctx->processed = 0;
1257 reqctx->partial_req = 0;
1258 if (!req->iv)
1259 goto error;
1260 subtype = get_cryptoalg_subtype(tfm);
1261 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1262 (req->cryptlen == 0) ||
1263 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1264 if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1265 goto fallback;
1266 else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1267 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1268 goto fallback;
1269 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270 ablkctx->enckey_len, req->cryptlen, ivsize);
1271 goto error;
1272 }
1273
1274 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1275 if (err)
1276 goto error;
1277 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1278 AES_MIN_KEY_SIZE +
1279 sizeof(struct cpl_rx_phys_dsgl) +
1280 /*Min dsgl size*/
1281 32))) {
1282 /* Can be sent as Imm*/
1283 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1284
1285 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1286 CHCR_DST_SG_SIZE, 0);
1287 phys_dsgl = get_space_for_phys_dsgl(dnents);
1288 kctx_len = roundup(ablkctx->enckey_len, 16);
1289 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1290 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1291 SGE_MAX_WR_LEN;
1292 bytes = IV + req->cryptlen;
1293
1294 } else {
1295 reqctx->imm = 0;
1296 }
1297
1298 if (!reqctx->imm) {
1299 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1300 CIP_SPACE_LEFT(ablkctx->enckey_len),
1301 0, 0);
1302 if ((bytes + reqctx->processed) >= req->cryptlen)
1303 bytes = req->cryptlen - reqctx->processed;
1304 else
1305 bytes = rounddown(bytes, 16);
1306 } else {
1307 bytes = req->cryptlen;
1308 }
1309 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1310 bytes = adjust_ctr_overflow(req->iv, bytes);
1311 }
1312 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1313 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1314 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1315 CTR_RFC3686_IV_SIZE);
1316
1317 /* initialize counter portion of counter block */
1318 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1319 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1320 memcpy(reqctx->init_iv, reqctx->iv, IV);
1321
1322 } else {
1323
1324 memcpy(reqctx->iv, req->iv, IV);
1325 memcpy(reqctx->init_iv, req->iv, IV);
1326 }
1327 if (unlikely(bytes == 0)) {
1328 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1329 req);
1330fallback: atomic_inc(&adap->chcr_stats.fallback);
1331 err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1332 subtype ==
1333 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1334 reqctx->iv : req->iv,
1335 op_type);
1336 goto error;
1337 }
1338 reqctx->op = op_type;
1339 reqctx->srcsg = req->src;
1340 reqctx->dstsg = req->dst;
1341 reqctx->src_ofst = 0;
1342 reqctx->dst_ofst = 0;
1343 wrparam.qid = qid;
1344 wrparam.req = req;
1345 wrparam.bytes = bytes;
1346 *skb = create_cipher_wr(&wrparam);
1347 if (IS_ERR(*skb)) {
1348 err = PTR_ERR(*skb);
1349 goto unmap;
1350 }
1351 reqctx->processed = bytes;
1352 reqctx->last_req_len = bytes;
1353 reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1354
1355 return 0;
1356unmap:
1357 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1358error:
1359 return err;
1360}
1361
1362static int chcr_aes_encrypt(struct skcipher_request *req)
1363{
1364 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1365 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1366 struct chcr_dev *dev = c_ctx(tfm)->dev;
1367 struct sk_buff *skb = NULL;
1368 int err;
1369 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1370 struct chcr_context *ctx = c_ctx(tfm);
1371 unsigned int cpu;
1372
1373 cpu = get_cpu();
1374 reqctx->txqidx = cpu % ctx->ntxq;
1375 reqctx->rxqidx = cpu % ctx->nrxq;
1376 put_cpu();
1377
1378 err = chcr_inc_wrcount(dev);
1379 if (err)
1380 return -ENXIO;
1381 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1382 reqctx->txqidx) &&
1383 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1384 err = -ENOSPC;
1385 goto error;
1386 }
1387
1388 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1389 &skb, CHCR_ENCRYPT_OP);
1390 if (err || !skb)
1391 return err;
1392 skb->dev = u_ctx->lldi.ports[0];
1393 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1394 chcr_send_wr(skb);
1395 if (get_cryptoalg_subtype(tfm) ==
1396 CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1397 CRYPTO_TFM_REQ_MAY_SLEEP ) {
1398 reqctx->partial_req = 1;
1399 wait_for_completion(&ctx->cbc_aes_aio_done);
1400 }
1401 return -EINPROGRESS;
1402error:
1403 chcr_dec_wrcount(dev);
1404 return err;
1405}
1406
1407static int chcr_aes_decrypt(struct skcipher_request *req)
1408{
1409 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1410 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1411 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1412 struct chcr_dev *dev = c_ctx(tfm)->dev;
1413 struct sk_buff *skb = NULL;
1414 int err;
1415 struct chcr_context *ctx = c_ctx(tfm);
1416 unsigned int cpu;
1417
1418 cpu = get_cpu();
1419 reqctx->txqidx = cpu % ctx->ntxq;
1420 reqctx->rxqidx = cpu % ctx->nrxq;
1421 put_cpu();
1422
1423 err = chcr_inc_wrcount(dev);
1424 if (err)
1425 return -ENXIO;
1426
1427 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1428 reqctx->txqidx) &&
1429 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1430 return -ENOSPC;
1431 err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1432 &skb, CHCR_DECRYPT_OP);
1433 if (err || !skb)
1434 return err;
1435 skb->dev = u_ctx->lldi.ports[0];
1436 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1437 chcr_send_wr(skb);
1438 return -EINPROGRESS;
1439}
1440static int chcr_device_init(struct chcr_context *ctx)
1441{
1442 struct uld_ctx *u_ctx = NULL;
1443 int txq_perchan, ntxq;
1444 int err = 0, rxq_perchan;
1445
1446 if (!ctx->dev) {
1447 u_ctx = assign_chcr_device();
1448 if (!u_ctx) {
1449 err = -ENXIO;
1450 pr_err("chcr device assignment fails\n");
1451 goto out;
1452 }
1453 ctx->dev = &u_ctx->dev;
1454 ntxq = u_ctx->lldi.ntxq;
1455 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1456 txq_perchan = ntxq / u_ctx->lldi.nchan;
1457 ctx->ntxq = ntxq;
1458 ctx->nrxq = u_ctx->lldi.nrxq;
1459 ctx->rxq_perchan = rxq_perchan;
1460 ctx->txq_perchan = txq_perchan;
1461 }
1462out:
1463 return err;
1464}
1465
1466static int chcr_init_tfm(struct crypto_skcipher *tfm)
1467{
1468 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1469 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1470 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1471
1472 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1473 CRYPTO_ALG_NEED_FALLBACK);
1474 if (IS_ERR(ablkctx->sw_cipher)) {
1475 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1476 return PTR_ERR(ablkctx->sw_cipher);
1477 }
1478 init_completion(&ctx->cbc_aes_aio_done);
1479 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1480 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1481
1482 return chcr_device_init(ctx);
1483}
1484
1485static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1486{
1487 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1488 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1489 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1490
1491 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492 * cannot be used as fallback in chcr_handle_cipher_response
1493 */
1494 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1495 CRYPTO_ALG_NEED_FALLBACK);
1496 if (IS_ERR(ablkctx->sw_cipher)) {
1497 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1498 return PTR_ERR(ablkctx->sw_cipher);
1499 }
1500 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1501 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1502 return chcr_device_init(ctx);
1503}
1504
1505
1506static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1507{
1508 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1509 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1510
1511 crypto_free_skcipher(ablkctx->sw_cipher);
1512}
1513
1514static int get_alg_config(struct algo_param *params,
1515 unsigned int auth_size)
1516{
1517 switch (auth_size) {
1518 case SHA1_DIGEST_SIZE:
1519 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1520 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1521 params->result_size = SHA1_DIGEST_SIZE;
1522 break;
1523 case SHA224_DIGEST_SIZE:
1524 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1525 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1526 params->result_size = SHA256_DIGEST_SIZE;
1527 break;
1528 case SHA256_DIGEST_SIZE:
1529 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1530 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1531 params->result_size = SHA256_DIGEST_SIZE;
1532 break;
1533 case SHA384_DIGEST_SIZE:
1534 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1535 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1536 params->result_size = SHA512_DIGEST_SIZE;
1537 break;
1538 case SHA512_DIGEST_SIZE:
1539 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1540 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1541 params->result_size = SHA512_DIGEST_SIZE;
1542 break;
1543 default:
1544 pr_err("ERROR, unsupported digest size\n");
1545 return -EINVAL;
1546 }
1547 return 0;
1548}
1549
1550static inline void chcr_free_shash(struct crypto_shash *base_hash)
1551{
1552 crypto_free_shash(base_hash);
1553}
1554
1555/**
1556 * create_hash_wr - Create hash work request
1557 * @req: Cipher req base
1558 * @param: Container for create_hash_wr()'s parameters
1559 */
1560static struct sk_buff *create_hash_wr(struct ahash_request *req,
1561 struct hash_wr_param *param)
1562{
1563 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1564 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1565 struct chcr_context *ctx = h_ctx(tfm);
1566 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1567 struct sk_buff *skb = NULL;
1568 struct uld_ctx *u_ctx = ULD_CTX(ctx);
1569 struct chcr_wr *chcr_req;
1570 struct ulptx_sgl *ulptx;
1571 unsigned int nents = 0, transhdr_len;
1572 unsigned int temp = 0;
1573 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1574 GFP_ATOMIC;
1575 struct adapter *adap = padap(h_ctx(tfm)->dev);
1576 int error = 0;
1577 unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1578
1579 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1580 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1581 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1582 param->sg_len) <= SGE_MAX_WR_LEN;
1583 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1584 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1585 nents += param->bfr_len ? 1 : 0;
1586 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1587 param->sg_len, 16) : (sgl_len(nents) * 8);
1588 transhdr_len = roundup(transhdr_len, 16);
1589
1590 skb = alloc_skb(transhdr_len, flags);
1591 if (!skb)
1592 return ERR_PTR(-ENOMEM);
1593 chcr_req = __skb_put_zero(skb, transhdr_len);
1594
1595 chcr_req->sec_cpl.op_ivinsrtofst =
1596 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1597
1598 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1599
1600 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1601 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1603 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604 chcr_req->sec_cpl.seqno_numivs =
1605 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1606 param->opad_needed, 0);
1607
1608 chcr_req->sec_cpl.ivgen_hdrlen =
1609 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1610
1611 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1612 param->alg_prm.result_size);
1613
1614 if (param->opad_needed)
1615 memcpy(chcr_req->key_ctx.key +
1616 ((param->alg_prm.result_size <= 32) ? 32 :
1617 CHCR_HASH_MAX_DIGEST_SIZE),
1618 hmacctx->opad, param->alg_prm.result_size);
1619
1620 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1621 param->alg_prm.mk_size, 0,
1622 param->opad_needed,
1623 ((param->kctx_len +
1624 sizeof(chcr_req->key_ctx)) >> 4));
1625 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1626 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1627 DUMMY_BYTES);
1628 if (param->bfr_len != 0) {
1629 req_ctx->hctx_wr.dma_addr =
1630 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1631 param->bfr_len, DMA_TO_DEVICE);
1632 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1633 req_ctx->hctx_wr. dma_addr)) {
1634 error = -ENOMEM;
1635 goto err;
1636 }
1637 req_ctx->hctx_wr.dma_len = param->bfr_len;
1638 } else {
1639 req_ctx->hctx_wr.dma_addr = 0;
1640 }
1641 chcr_add_hash_src_ent(req, ulptx, param);
1642 /* Request upto max wr size */
1643 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1644 (param->sg_len + param->bfr_len) : 0);
1645 atomic_inc(&adap->chcr_stats.digest_rqst);
1646 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1647 param->hash_size, transhdr_len,
1648 temp, 0);
1649 req_ctx->hctx_wr.skb = skb;
1650 return skb;
1651err:
1652 kfree_skb(skb);
1653 return ERR_PTR(error);
1654}
1655
1656static int chcr_ahash_update(struct ahash_request *req)
1657{
1658 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1659 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1660 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1661 struct chcr_context *ctx = h_ctx(rtfm);
1662 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1663 struct sk_buff *skb;
1664 u8 remainder = 0, bs;
1665 unsigned int nbytes = req->nbytes;
1666 struct hash_wr_param params;
1667 int error;
1668 unsigned int cpu;
1669
1670 cpu = get_cpu();
1671 req_ctx->txqidx = cpu % ctx->ntxq;
1672 req_ctx->rxqidx = cpu % ctx->nrxq;
1673 put_cpu();
1674
1675 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1676
1677 if (nbytes + req_ctx->reqlen >= bs) {
1678 remainder = (nbytes + req_ctx->reqlen) % bs;
1679 nbytes = nbytes + req_ctx->reqlen - remainder;
1680 } else {
1681 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1682 + req_ctx->reqlen, nbytes, 0);
1683 req_ctx->reqlen += nbytes;
1684 return 0;
1685 }
1686 error = chcr_inc_wrcount(dev);
1687 if (error)
1688 return -ENXIO;
1689 /* Detach state for CHCR means lldi or padap is freed. Increasing
1690 * inflight count for dev guarantees that lldi and padap is valid
1691 */
1692 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1693 req_ctx->txqidx) &&
1694 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1695 error = -ENOSPC;
1696 goto err;
1697 }
1698
1699 chcr_init_hctx_per_wr(req_ctx);
1700 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1701 if (error) {
1702 error = -ENOMEM;
1703 goto err;
1704 }
1705 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1706 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1708 HASH_SPACE_LEFT(params.kctx_len), 0);
1709 if (params.sg_len > req->nbytes)
1710 params.sg_len = req->nbytes;
1711 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1712 req_ctx->reqlen;
1713 params.opad_needed = 0;
1714 params.more = 1;
1715 params.last = 0;
1716 params.bfr_len = req_ctx->reqlen;
1717 params.scmd1 = 0;
1718 req_ctx->hctx_wr.srcsg = req->src;
1719
1720 params.hash_size = params.alg_prm.result_size;
1721 req_ctx->data_len += params.sg_len + params.bfr_len;
1722 skb = create_hash_wr(req, ¶ms);
1723 if (IS_ERR(skb)) {
1724 error = PTR_ERR(skb);
1725 goto unmap;
1726 }
1727
1728 req_ctx->hctx_wr.processed += params.sg_len;
1729 if (remainder) {
1730 /* Swap buffers */
1731 swap(req_ctx->reqbfr, req_ctx->skbfr);
1732 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1733 req_ctx->reqbfr, remainder, req->nbytes -
1734 remainder);
1735 }
1736 req_ctx->reqlen = remainder;
1737 skb->dev = u_ctx->lldi.ports[0];
1738 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1739 chcr_send_wr(skb);
1740 return -EINPROGRESS;
1741unmap:
1742 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1743err:
1744 chcr_dec_wrcount(dev);
1745 return error;
1746}
1747
1748static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1749{
1750 memset(bfr_ptr, 0, bs);
1751 *bfr_ptr = 0x80;
1752 if (bs == 64)
1753 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1754 else
1755 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1756}
1757
1758static int chcr_ahash_final(struct ahash_request *req)
1759{
1760 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1761 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1762 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1763 struct hash_wr_param params;
1764 struct sk_buff *skb;
1765 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1766 struct chcr_context *ctx = h_ctx(rtfm);
1767 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1768 int error;
1769 unsigned int cpu;
1770
1771 cpu = get_cpu();
1772 req_ctx->txqidx = cpu % ctx->ntxq;
1773 req_ctx->rxqidx = cpu % ctx->nrxq;
1774 put_cpu();
1775
1776 error = chcr_inc_wrcount(dev);
1777 if (error)
1778 return -ENXIO;
1779
1780 chcr_init_hctx_per_wr(req_ctx);
1781 if (is_hmac(crypto_ahash_tfm(rtfm)))
1782 params.opad_needed = 1;
1783 else
1784 params.opad_needed = 0;
1785 params.sg_len = 0;
1786 req_ctx->hctx_wr.isfinal = 1;
1787 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1788 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1789 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1790 params.opad_needed = 1;
1791 params.kctx_len *= 2;
1792 } else {
1793 params.opad_needed = 0;
1794 }
1795
1796 req_ctx->hctx_wr.result = 1;
1797 params.bfr_len = req_ctx->reqlen;
1798 req_ctx->data_len += params.bfr_len + params.sg_len;
1799 req_ctx->hctx_wr.srcsg = req->src;
1800 if (req_ctx->reqlen == 0) {
1801 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1802 params.last = 0;
1803 params.more = 1;
1804 params.scmd1 = 0;
1805 params.bfr_len = bs;
1806
1807 } else {
1808 params.scmd1 = req_ctx->data_len;
1809 params.last = 1;
1810 params.more = 0;
1811 }
1812 params.hash_size = crypto_ahash_digestsize(rtfm);
1813 skb = create_hash_wr(req, ¶ms);
1814 if (IS_ERR(skb)) {
1815 error = PTR_ERR(skb);
1816 goto err;
1817 }
1818 req_ctx->reqlen = 0;
1819 skb->dev = u_ctx->lldi.ports[0];
1820 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1821 chcr_send_wr(skb);
1822 return -EINPROGRESS;
1823err:
1824 chcr_dec_wrcount(dev);
1825 return error;
1826}
1827
1828static int chcr_ahash_finup(struct ahash_request *req)
1829{
1830 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1831 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1832 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1833 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1834 struct chcr_context *ctx = h_ctx(rtfm);
1835 struct sk_buff *skb;
1836 struct hash_wr_param params;
1837 u8 bs;
1838 int error;
1839 unsigned int cpu;
1840
1841 cpu = get_cpu();
1842 req_ctx->txqidx = cpu % ctx->ntxq;
1843 req_ctx->rxqidx = cpu % ctx->nrxq;
1844 put_cpu();
1845
1846 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1847 error = chcr_inc_wrcount(dev);
1848 if (error)
1849 return -ENXIO;
1850
1851 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1852 req_ctx->txqidx) &&
1853 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1854 error = -ENOSPC;
1855 goto err;
1856 }
1857 chcr_init_hctx_per_wr(req_ctx);
1858 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1859 if (error) {
1860 error = -ENOMEM;
1861 goto err;
1862 }
1863
1864 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1865 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1866 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1867 params.kctx_len *= 2;
1868 params.opad_needed = 1;
1869 } else {
1870 params.opad_needed = 0;
1871 }
1872
1873 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1874 HASH_SPACE_LEFT(params.kctx_len), 0);
1875 if (params.sg_len < req->nbytes) {
1876 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1877 params.kctx_len /= 2;
1878 params.opad_needed = 0;
1879 }
1880 params.last = 0;
1881 params.more = 1;
1882 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1883 - req_ctx->reqlen;
1884 params.hash_size = params.alg_prm.result_size;
1885 params.scmd1 = 0;
1886 } else {
1887 params.last = 1;
1888 params.more = 0;
1889 params.sg_len = req->nbytes;
1890 params.hash_size = crypto_ahash_digestsize(rtfm);
1891 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1892 params.sg_len;
1893 }
1894 params.bfr_len = req_ctx->reqlen;
1895 req_ctx->data_len += params.bfr_len + params.sg_len;
1896 req_ctx->hctx_wr.result = 1;
1897 req_ctx->hctx_wr.srcsg = req->src;
1898 if ((req_ctx->reqlen + req->nbytes) == 0) {
1899 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1900 params.last = 0;
1901 params.more = 1;
1902 params.scmd1 = 0;
1903 params.bfr_len = bs;
1904 }
1905 skb = create_hash_wr(req, ¶ms);
1906 if (IS_ERR(skb)) {
1907 error = PTR_ERR(skb);
1908 goto unmap;
1909 }
1910 req_ctx->reqlen = 0;
1911 req_ctx->hctx_wr.processed += params.sg_len;
1912 skb->dev = u_ctx->lldi.ports[0];
1913 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1914 chcr_send_wr(skb);
1915 return -EINPROGRESS;
1916unmap:
1917 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1918err:
1919 chcr_dec_wrcount(dev);
1920 return error;
1921}
1922
1923static int chcr_hmac_init(struct ahash_request *areq);
1924static int chcr_sha_init(struct ahash_request *areq);
1925
1926static int chcr_ahash_digest(struct ahash_request *req)
1927{
1928 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1929 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1930 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1931 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1932 struct chcr_context *ctx = h_ctx(rtfm);
1933 struct sk_buff *skb;
1934 struct hash_wr_param params;
1935 u8 bs;
1936 int error;
1937 unsigned int cpu;
1938
1939 cpu = get_cpu();
1940 req_ctx->txqidx = cpu % ctx->ntxq;
1941 req_ctx->rxqidx = cpu % ctx->nrxq;
1942 put_cpu();
1943
1944 if (is_hmac(crypto_ahash_tfm(rtfm)))
1945 chcr_hmac_init(req);
1946 else
1947 chcr_sha_init(req);
1948
1949 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1950 error = chcr_inc_wrcount(dev);
1951 if (error)
1952 return -ENXIO;
1953
1954 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1955 req_ctx->txqidx) &&
1956 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1957 error = -ENOSPC;
1958 goto err;
1959 }
1960
1961 chcr_init_hctx_per_wr(req_ctx);
1962 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1963 if (error) {
1964 error = -ENOMEM;
1965 goto err;
1966 }
1967
1968 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1969 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1970 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1971 params.kctx_len *= 2;
1972 params.opad_needed = 1;
1973 } else {
1974 params.opad_needed = 0;
1975 }
1976 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1977 HASH_SPACE_LEFT(params.kctx_len), 0);
1978 if (params.sg_len < req->nbytes) {
1979 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1980 params.kctx_len /= 2;
1981 params.opad_needed = 0;
1982 }
1983 params.last = 0;
1984 params.more = 1;
1985 params.scmd1 = 0;
1986 params.sg_len = rounddown(params.sg_len, bs);
1987 params.hash_size = params.alg_prm.result_size;
1988 } else {
1989 params.sg_len = req->nbytes;
1990 params.hash_size = crypto_ahash_digestsize(rtfm);
1991 params.last = 1;
1992 params.more = 0;
1993 params.scmd1 = req->nbytes + req_ctx->data_len;
1994
1995 }
1996 params.bfr_len = 0;
1997 req_ctx->hctx_wr.result = 1;
1998 req_ctx->hctx_wr.srcsg = req->src;
1999 req_ctx->data_len += params.bfr_len + params.sg_len;
2000
2001 if (req->nbytes == 0) {
2002 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
2003 params.more = 1;
2004 params.bfr_len = bs;
2005 }
2006
2007 skb = create_hash_wr(req, ¶ms);
2008 if (IS_ERR(skb)) {
2009 error = PTR_ERR(skb);
2010 goto unmap;
2011 }
2012 req_ctx->hctx_wr.processed += params.sg_len;
2013 skb->dev = u_ctx->lldi.ports[0];
2014 set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2015 chcr_send_wr(skb);
2016 return -EINPROGRESS;
2017unmap:
2018 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2019err:
2020 chcr_dec_wrcount(dev);
2021 return error;
2022}
2023
2024static int chcr_ahash_continue(struct ahash_request *req)
2025{
2026 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2027 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2028 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2029 struct chcr_context *ctx = h_ctx(rtfm);
2030 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2031 struct sk_buff *skb;
2032 struct hash_wr_param params;
2033 u8 bs;
2034 int error;
2035 unsigned int cpu;
2036
2037 cpu = get_cpu();
2038 reqctx->txqidx = cpu % ctx->ntxq;
2039 reqctx->rxqidx = cpu % ctx->nrxq;
2040 put_cpu();
2041
2042 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2043 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
2044 params.kctx_len = roundup(params.alg_prm.result_size, 16);
2045 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2046 params.kctx_len *= 2;
2047 params.opad_needed = 1;
2048 } else {
2049 params.opad_needed = 0;
2050 }
2051 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2052 HASH_SPACE_LEFT(params.kctx_len),
2053 hctx_wr->src_ofst);
2054 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2055 params.sg_len = req->nbytes - hctx_wr->processed;
2056 if (!hctx_wr->result ||
2057 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2058 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2059 params.kctx_len /= 2;
2060 params.opad_needed = 0;
2061 }
2062 params.last = 0;
2063 params.more = 1;
2064 params.sg_len = rounddown(params.sg_len, bs);
2065 params.hash_size = params.alg_prm.result_size;
2066 params.scmd1 = 0;
2067 } else {
2068 params.last = 1;
2069 params.more = 0;
2070 params.hash_size = crypto_ahash_digestsize(rtfm);
2071 params.scmd1 = reqctx->data_len + params.sg_len;
2072 }
2073 params.bfr_len = 0;
2074 reqctx->data_len += params.sg_len;
2075 skb = create_hash_wr(req, ¶ms);
2076 if (IS_ERR(skb)) {
2077 error = PTR_ERR(skb);
2078 goto err;
2079 }
2080 hctx_wr->processed += params.sg_len;
2081 skb->dev = u_ctx->lldi.ports[0];
2082 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2083 chcr_send_wr(skb);
2084 return 0;
2085err:
2086 return error;
2087}
2088
2089static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2090 unsigned char *input,
2091 int err)
2092{
2093 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2094 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2095 int digestsize, updated_digestsize;
2096 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2097 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2098 struct chcr_dev *dev = h_ctx(tfm)->dev;
2099
2100 if (input == NULL)
2101 goto out;
2102 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2103 updated_digestsize = digestsize;
2104 if (digestsize == SHA224_DIGEST_SIZE)
2105 updated_digestsize = SHA256_DIGEST_SIZE;
2106 else if (digestsize == SHA384_DIGEST_SIZE)
2107 updated_digestsize = SHA512_DIGEST_SIZE;
2108
2109 if (hctx_wr->dma_addr) {
2110 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2111 hctx_wr->dma_len, DMA_TO_DEVICE);
2112 hctx_wr->dma_addr = 0;
2113 }
2114 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2115 req->nbytes)) {
2116 if (hctx_wr->result == 1) {
2117 hctx_wr->result = 0;
2118 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2119 digestsize);
2120 } else {
2121 memcpy(reqctx->partial_hash,
2122 input + sizeof(struct cpl_fw6_pld),
2123 updated_digestsize);
2124
2125 }
2126 goto unmap;
2127 }
2128 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2129 updated_digestsize);
2130
2131 err = chcr_ahash_continue(req);
2132 if (err)
2133 goto unmap;
2134 return;
2135unmap:
2136 if (hctx_wr->is_sg_map)
2137 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2138
2139
2140out:
2141 chcr_dec_wrcount(dev);
2142 ahash_request_complete(req, err);
2143}
2144
2145/*
2146 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2147 * @req: crypto request
2148 */
2149int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2150 int err)
2151{
2152 struct crypto_tfm *tfm = req->tfm;
2153 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2154 struct adapter *adap = padap(ctx->dev);
2155
2156 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2157 case CRYPTO_ALG_TYPE_AEAD:
2158 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2159 break;
2160
2161 case CRYPTO_ALG_TYPE_SKCIPHER:
2162 chcr_handle_cipher_resp(skcipher_request_cast(req),
2163 input, err);
2164 break;
2165 case CRYPTO_ALG_TYPE_AHASH:
2166 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2167 }
2168 atomic_inc(&adap->chcr_stats.complete);
2169 return err;
2170}
2171static int chcr_ahash_export(struct ahash_request *areq, void *out)
2172{
2173 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2174 struct chcr_ahash_req_ctx *state = out;
2175
2176 state->reqlen = req_ctx->reqlen;
2177 state->data_len = req_ctx->data_len;
2178 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2179 memcpy(state->partial_hash, req_ctx->partial_hash,
2180 CHCR_HASH_MAX_DIGEST_SIZE);
2181 chcr_init_hctx_per_wr(state);
2182 return 0;
2183}
2184
2185static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2186{
2187 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2188 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2189
2190 req_ctx->reqlen = state->reqlen;
2191 req_ctx->data_len = state->data_len;
2192 req_ctx->reqbfr = req_ctx->bfr1;
2193 req_ctx->skbfr = req_ctx->bfr2;
2194 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2195 memcpy(req_ctx->partial_hash, state->partial_hash,
2196 CHCR_HASH_MAX_DIGEST_SIZE);
2197 chcr_init_hctx_per_wr(req_ctx);
2198 return 0;
2199}
2200
2201static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2202 unsigned int keylen)
2203{
2204 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2205 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2206 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2207 unsigned int i, err = 0, updated_digestsize;
2208
2209 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2210
2211 /* use the key to calculate the ipad and opad. ipad will sent with the
2212 * first request's data. opad will be sent with the final hash result
2213 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2214 */
2215 shash->tfm = hmacctx->base_hash;
2216 if (keylen > bs) {
2217 err = crypto_shash_digest(shash, key, keylen,
2218 hmacctx->ipad);
2219 if (err)
2220 goto out;
2221 keylen = digestsize;
2222 } else {
2223 memcpy(hmacctx->ipad, key, keylen);
2224 }
2225 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2226 unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
2227 "fortified memcpy causes -Wrestrict warning");
2228
2229 for (i = 0; i < bs / sizeof(int); i++) {
2230 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2231 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2232 }
2233
2234 updated_digestsize = digestsize;
2235 if (digestsize == SHA224_DIGEST_SIZE)
2236 updated_digestsize = SHA256_DIGEST_SIZE;
2237 else if (digestsize == SHA384_DIGEST_SIZE)
2238 updated_digestsize = SHA512_DIGEST_SIZE;
2239 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2240 hmacctx->ipad, digestsize);
2241 if (err)
2242 goto out;
2243 chcr_change_order(hmacctx->ipad, updated_digestsize);
2244
2245 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2246 hmacctx->opad, digestsize);
2247 if (err)
2248 goto out;
2249 chcr_change_order(hmacctx->opad, updated_digestsize);
2250out:
2251 return err;
2252}
2253
2254static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2255 unsigned int key_len)
2256{
2257 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2258 unsigned short context_size = 0;
2259 int err;
2260
2261 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2262 if (err)
2263 goto badkey_err;
2264
2265 memcpy(ablkctx->key, key, key_len);
2266 ablkctx->enckey_len = key_len;
2267 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2268 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2269 /* Both keys for xts must be aligned to 16 byte boundary
2270 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2271 */
2272 if (key_len == 48) {
2273 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2274 + 16) >> 4;
2275 memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2276 memset(ablkctx->key + 24, 0, 8);
2277 memset(ablkctx->key + 56, 0, 8);
2278 ablkctx->enckey_len = 64;
2279 ablkctx->key_ctx_hdr =
2280 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2281 CHCR_KEYCTX_NO_KEY, 1,
2282 0, context_size);
2283 } else {
2284 ablkctx->key_ctx_hdr =
2285 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2286 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2287 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2288 CHCR_KEYCTX_NO_KEY, 1,
2289 0, context_size);
2290 }
2291 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2292 return 0;
2293badkey_err:
2294 ablkctx->enckey_len = 0;
2295
2296 return err;
2297}
2298
2299static int chcr_sha_init(struct ahash_request *areq)
2300{
2301 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2302 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2303 int digestsize = crypto_ahash_digestsize(tfm);
2304
2305 req_ctx->data_len = 0;
2306 req_ctx->reqlen = 0;
2307 req_ctx->reqbfr = req_ctx->bfr1;
2308 req_ctx->skbfr = req_ctx->bfr2;
2309 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2310
2311 return 0;
2312}
2313
2314static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2315{
2316 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2317 sizeof(struct chcr_ahash_req_ctx));
2318 return chcr_device_init(crypto_tfm_ctx(tfm));
2319}
2320
2321static int chcr_hmac_init(struct ahash_request *areq)
2322{
2323 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2324 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2325 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2326 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2327 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2328
2329 chcr_sha_init(areq);
2330 req_ctx->data_len = bs;
2331 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2332 if (digestsize == SHA224_DIGEST_SIZE)
2333 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2334 SHA256_DIGEST_SIZE);
2335 else if (digestsize == SHA384_DIGEST_SIZE)
2336 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2337 SHA512_DIGEST_SIZE);
2338 else
2339 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2340 digestsize);
2341 }
2342 return 0;
2343}
2344
2345static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2346{
2347 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2348 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2349 unsigned int digestsize =
2350 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2351
2352 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2353 sizeof(struct chcr_ahash_req_ctx));
2354 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2355 if (IS_ERR(hmacctx->base_hash))
2356 return PTR_ERR(hmacctx->base_hash);
2357 return chcr_device_init(crypto_tfm_ctx(tfm));
2358}
2359
2360static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2361{
2362 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2363 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2364
2365 if (hmacctx->base_hash) {
2366 chcr_free_shash(hmacctx->base_hash);
2367 hmacctx->base_hash = NULL;
2368 }
2369}
2370
2371inline void chcr_aead_common_exit(struct aead_request *req)
2372{
2373 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2374 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2375 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2376
2377 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2378}
2379
2380static int chcr_aead_common_init(struct aead_request *req)
2381{
2382 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2383 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2384 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2385 unsigned int authsize = crypto_aead_authsize(tfm);
2386 int error = -EINVAL;
2387
2388 /* validate key size */
2389 if (aeadctx->enckey_len == 0)
2390 goto err;
2391 if (reqctx->op && req->cryptlen < authsize)
2392 goto err;
2393 if (reqctx->b0_len)
2394 reqctx->scratch_pad = reqctx->iv + IV;
2395 else
2396 reqctx->scratch_pad = NULL;
2397
2398 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2399 reqctx->op);
2400 if (error) {
2401 error = -ENOMEM;
2402 goto err;
2403 }
2404
2405 return 0;
2406err:
2407 return error;
2408}
2409
2410static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2411 int aadmax, int wrlen,
2412 unsigned short op_type)
2413{
2414 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2415
2416 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2417 dst_nents > MAX_DSGL_ENT ||
2418 (req->assoclen > aadmax) ||
2419 (wrlen > SGE_MAX_WR_LEN))
2420 return 1;
2421 return 0;
2422}
2423
2424static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2425{
2426 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2427 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2428 struct aead_request *subreq = aead_request_ctx_dma(req);
2429
2430 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2431 aead_request_set_callback(subreq, req->base.flags,
2432 req->base.complete, req->base.data);
2433 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2434 req->iv);
2435 aead_request_set_ad(subreq, req->assoclen);
2436 return op_type ? crypto_aead_decrypt(subreq) :
2437 crypto_aead_encrypt(subreq);
2438}
2439
2440static struct sk_buff *create_authenc_wr(struct aead_request *req,
2441 unsigned short qid,
2442 int size)
2443{
2444 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2445 struct chcr_context *ctx = a_ctx(tfm);
2446 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2447 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2448 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2449 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2450 struct sk_buff *skb = NULL;
2451 struct chcr_wr *chcr_req;
2452 struct cpl_rx_phys_dsgl *phys_cpl;
2453 struct ulptx_sgl *ulptx;
2454 unsigned int transhdr_len;
2455 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2456 unsigned int kctx_len = 0, dnents, snents;
2457 unsigned int authsize = crypto_aead_authsize(tfm);
2458 int error = -EINVAL;
2459 u8 *ivptr;
2460 int null = 0;
2461 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2462 GFP_ATOMIC;
2463 struct adapter *adap = padap(ctx->dev);
2464 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2465
2466 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2467 if (req->cryptlen == 0)
2468 return NULL;
2469
2470 reqctx->b0_len = 0;
2471 error = chcr_aead_common_init(req);
2472 if (error)
2473 return ERR_PTR(error);
2474
2475 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2476 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2477 null = 1;
2478 }
2479 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2480 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2481 dnents += MIN_AUTH_SG; // For IV
2482 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2483 CHCR_SRC_SG_SIZE, 0);
2484 dst_size = get_space_for_phys_dsgl(dnents);
2485 kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2486 - sizeof(chcr_req->key_ctx);
2487 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2488 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2489 SGE_MAX_WR_LEN;
2490 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2491 : (sgl_len(snents) * 8);
2492 transhdr_len += temp;
2493 transhdr_len = roundup(transhdr_len, 16);
2494
2495 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2496 transhdr_len, reqctx->op)) {
2497 atomic_inc(&adap->chcr_stats.fallback);
2498 chcr_aead_common_exit(req);
2499 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2500 }
2501 skb = alloc_skb(transhdr_len, flags);
2502 if (!skb) {
2503 error = -ENOMEM;
2504 goto err;
2505 }
2506
2507 chcr_req = __skb_put_zero(skb, transhdr_len);
2508
2509 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2510
2511 /*
2512 * Input order is AAD,IV and Payload. where IV should be included as
2513 * the part of authdata. All other fields should be filled according
2514 * to the hardware spec
2515 */
2516 chcr_req->sec_cpl.op_ivinsrtofst =
2517 FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2518 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2519 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2520 null ? 0 : 1 + IV,
2521 null ? 0 : IV + req->assoclen,
2522 req->assoclen + IV + 1,
2523 (temp & 0x1F0) >> 4);
2524 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2525 temp & 0xF,
2526 null ? 0 : req->assoclen + IV + 1,
2527 temp, temp);
2528 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2529 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2530 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2531 else
2532 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2533 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2534 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2535 temp,
2536 actx->auth_mode, aeadctx->hmac_ctrl,
2537 IV >> 1);
2538 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2539 0, 0, dst_size);
2540
2541 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2542 if (reqctx->op == CHCR_ENCRYPT_OP ||
2543 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2544 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2545 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2546 aeadctx->enckey_len);
2547 else
2548 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2549 aeadctx->enckey_len);
2550
2551 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2552 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2553 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2554 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2555 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2556 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2557 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2558 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2559 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2560 CTR_RFC3686_IV_SIZE);
2561 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2562 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2563 } else {
2564 memcpy(ivptr, req->iv, IV);
2565 }
2566 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2567 chcr_add_aead_src_ent(req, ulptx);
2568 atomic_inc(&adap->chcr_stats.cipher_rqst);
2569 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2570 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2571 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2572 transhdr_len, temp, 0);
2573 reqctx->skb = skb;
2574
2575 return skb;
2576err:
2577 chcr_aead_common_exit(req);
2578
2579 return ERR_PTR(error);
2580}
2581
2582int chcr_aead_dma_map(struct device *dev,
2583 struct aead_request *req,
2584 unsigned short op_type)
2585{
2586 int error;
2587 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2588 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2589 unsigned int authsize = crypto_aead_authsize(tfm);
2590 int src_len, dst_len;
2591
2592 /* calculate and handle src and dst sg length separately
2593 * for inplace and out-of place operations
2594 */
2595 if (req->src == req->dst) {
2596 src_len = req->assoclen + req->cryptlen + (op_type ?
2597 0 : authsize);
2598 dst_len = src_len;
2599 } else {
2600 src_len = req->assoclen + req->cryptlen;
2601 dst_len = req->assoclen + req->cryptlen + (op_type ?
2602 -authsize : authsize);
2603 }
2604
2605 if (!req->cryptlen || !src_len || !dst_len)
2606 return 0;
2607 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2608 DMA_BIDIRECTIONAL);
2609 if (dma_mapping_error(dev, reqctx->iv_dma))
2610 return -ENOMEM;
2611 if (reqctx->b0_len)
2612 reqctx->b0_dma = reqctx->iv_dma + IV;
2613 else
2614 reqctx->b0_dma = 0;
2615 if (req->src == req->dst) {
2616 error = dma_map_sg(dev, req->src,
2617 sg_nents_for_len(req->src, src_len),
2618 DMA_BIDIRECTIONAL);
2619 if (!error)
2620 goto err;
2621 } else {
2622 error = dma_map_sg(dev, req->src,
2623 sg_nents_for_len(req->src, src_len),
2624 DMA_TO_DEVICE);
2625 if (!error)
2626 goto err;
2627 error = dma_map_sg(dev, req->dst,
2628 sg_nents_for_len(req->dst, dst_len),
2629 DMA_FROM_DEVICE);
2630 if (!error) {
2631 dma_unmap_sg(dev, req->src,
2632 sg_nents_for_len(req->src, src_len),
2633 DMA_TO_DEVICE);
2634 goto err;
2635 }
2636 }
2637
2638 return 0;
2639err:
2640 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2641 return -ENOMEM;
2642}
2643
2644void chcr_aead_dma_unmap(struct device *dev,
2645 struct aead_request *req,
2646 unsigned short op_type)
2647{
2648 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2649 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2650 unsigned int authsize = crypto_aead_authsize(tfm);
2651 int src_len, dst_len;
2652
2653 /* calculate and handle src and dst sg length separately
2654 * for inplace and out-of place operations
2655 */
2656 if (req->src == req->dst) {
2657 src_len = req->assoclen + req->cryptlen + (op_type ?
2658 0 : authsize);
2659 dst_len = src_len;
2660 } else {
2661 src_len = req->assoclen + req->cryptlen;
2662 dst_len = req->assoclen + req->cryptlen + (op_type ?
2663 -authsize : authsize);
2664 }
2665
2666 if (!req->cryptlen || !src_len || !dst_len)
2667 return;
2668
2669 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2670 DMA_BIDIRECTIONAL);
2671 if (req->src == req->dst) {
2672 dma_unmap_sg(dev, req->src,
2673 sg_nents_for_len(req->src, src_len),
2674 DMA_BIDIRECTIONAL);
2675 } else {
2676 dma_unmap_sg(dev, req->src,
2677 sg_nents_for_len(req->src, src_len),
2678 DMA_TO_DEVICE);
2679 dma_unmap_sg(dev, req->dst,
2680 sg_nents_for_len(req->dst, dst_len),
2681 DMA_FROM_DEVICE);
2682 }
2683}
2684
2685void chcr_add_aead_src_ent(struct aead_request *req,
2686 struct ulptx_sgl *ulptx)
2687{
2688 struct ulptx_walk ulp_walk;
2689 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2690
2691 if (reqctx->imm) {
2692 u8 *buf = (u8 *)ulptx;
2693
2694 if (reqctx->b0_len) {
2695 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2696 buf += reqctx->b0_len;
2697 }
2698 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2699 buf, req->cryptlen + req->assoclen, 0);
2700 } else {
2701 ulptx_walk_init(&ulp_walk, ulptx);
2702 if (reqctx->b0_len)
2703 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2704 reqctx->b0_dma);
2705 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2706 req->assoclen, 0);
2707 ulptx_walk_end(&ulp_walk);
2708 }
2709}
2710
2711void chcr_add_aead_dst_ent(struct aead_request *req,
2712 struct cpl_rx_phys_dsgl *phys_cpl,
2713 unsigned short qid)
2714{
2715 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2716 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2717 struct dsgl_walk dsgl_walk;
2718 unsigned int authsize = crypto_aead_authsize(tfm);
2719 struct chcr_context *ctx = a_ctx(tfm);
2720 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2721 u32 temp;
2722 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2723
2724 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2725 dsgl_walk_init(&dsgl_walk, phys_cpl);
2726 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2727 temp = req->assoclen + req->cryptlen +
2728 (reqctx->op ? -authsize : authsize);
2729 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2730 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2731}
2732
2733void chcr_add_cipher_src_ent(struct skcipher_request *req,
2734 void *ulptx,
2735 struct cipher_wr_param *wrparam)
2736{
2737 struct ulptx_walk ulp_walk;
2738 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2739 u8 *buf = ulptx;
2740
2741 memcpy(buf, reqctx->iv, IV);
2742 buf += IV;
2743 if (reqctx->imm) {
2744 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2745 buf, wrparam->bytes, reqctx->processed);
2746 } else {
2747 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2748 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2749 reqctx->src_ofst);
2750 reqctx->srcsg = ulp_walk.last_sg;
2751 reqctx->src_ofst = ulp_walk.last_sg_len;
2752 ulptx_walk_end(&ulp_walk);
2753 }
2754}
2755
2756void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2757 struct cpl_rx_phys_dsgl *phys_cpl,
2758 struct cipher_wr_param *wrparam,
2759 unsigned short qid)
2760{
2761 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2762 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2763 struct chcr_context *ctx = c_ctx(tfm);
2764 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2765 struct dsgl_walk dsgl_walk;
2766 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2767
2768 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2769 dsgl_walk_init(&dsgl_walk, phys_cpl);
2770 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2771 reqctx->dst_ofst);
2772 reqctx->dstsg = dsgl_walk.last_sg;
2773 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2774 dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2775}
2776
2777void chcr_add_hash_src_ent(struct ahash_request *req,
2778 struct ulptx_sgl *ulptx,
2779 struct hash_wr_param *param)
2780{
2781 struct ulptx_walk ulp_walk;
2782 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2783
2784 if (reqctx->hctx_wr.imm) {
2785 u8 *buf = (u8 *)ulptx;
2786
2787 if (param->bfr_len) {
2788 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2789 buf += param->bfr_len;
2790 }
2791
2792 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2793 sg_nents(reqctx->hctx_wr.srcsg), buf,
2794 param->sg_len, 0);
2795 } else {
2796 ulptx_walk_init(&ulp_walk, ulptx);
2797 if (param->bfr_len)
2798 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2799 reqctx->hctx_wr.dma_addr);
2800 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2801 param->sg_len, reqctx->hctx_wr.src_ofst);
2802 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2803 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2804 ulptx_walk_end(&ulp_walk);
2805 }
2806}
2807
2808int chcr_hash_dma_map(struct device *dev,
2809 struct ahash_request *req)
2810{
2811 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2812 int error = 0;
2813
2814 if (!req->nbytes)
2815 return 0;
2816 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2817 DMA_TO_DEVICE);
2818 if (!error)
2819 return -ENOMEM;
2820 req_ctx->hctx_wr.is_sg_map = 1;
2821 return 0;
2822}
2823
2824void chcr_hash_dma_unmap(struct device *dev,
2825 struct ahash_request *req)
2826{
2827 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2828
2829 if (!req->nbytes)
2830 return;
2831
2832 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2833 DMA_TO_DEVICE);
2834 req_ctx->hctx_wr.is_sg_map = 0;
2835
2836}
2837
2838int chcr_cipher_dma_map(struct device *dev,
2839 struct skcipher_request *req)
2840{
2841 int error;
2842
2843 if (req->src == req->dst) {
2844 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2845 DMA_BIDIRECTIONAL);
2846 if (!error)
2847 goto err;
2848 } else {
2849 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2850 DMA_TO_DEVICE);
2851 if (!error)
2852 goto err;
2853 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2854 DMA_FROM_DEVICE);
2855 if (!error) {
2856 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2857 DMA_TO_DEVICE);
2858 goto err;
2859 }
2860 }
2861
2862 return 0;
2863err:
2864 return -ENOMEM;
2865}
2866
2867void chcr_cipher_dma_unmap(struct device *dev,
2868 struct skcipher_request *req)
2869{
2870 if (req->src == req->dst) {
2871 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2872 DMA_BIDIRECTIONAL);
2873 } else {
2874 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2875 DMA_TO_DEVICE);
2876 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2877 DMA_FROM_DEVICE);
2878 }
2879}
2880
2881static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2882{
2883 __be32 data;
2884
2885 memset(block, 0, csize);
2886 block += csize;
2887
2888 if (csize >= 4)
2889 csize = 4;
2890 else if (msglen > (unsigned int)(1 << (8 * csize)))
2891 return -EOVERFLOW;
2892
2893 data = cpu_to_be32(msglen);
2894 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2895
2896 return 0;
2897}
2898
2899static int generate_b0(struct aead_request *req, u8 *ivptr,
2900 unsigned short op_type)
2901{
2902 unsigned int l, lp, m;
2903 int rc;
2904 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2905 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2906 u8 *b0 = reqctx->scratch_pad;
2907
2908 m = crypto_aead_authsize(aead);
2909
2910 memcpy(b0, ivptr, 16);
2911
2912 lp = b0[0];
2913 l = lp + 1;
2914
2915 /* set m, bits 3-5 */
2916 *b0 |= (8 * ((m - 2) / 2));
2917
2918 /* set adata, bit 6, if associated data is used */
2919 if (req->assoclen)
2920 *b0 |= 64;
2921 rc = set_msg_len(b0 + 16 - l,
2922 (op_type == CHCR_DECRYPT_OP) ?
2923 req->cryptlen - m : req->cryptlen, l);
2924
2925 return rc;
2926}
2927
2928static inline int crypto_ccm_check_iv(const u8 *iv)
2929{
2930 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2931 if (iv[0] < 1 || iv[0] > 7)
2932 return -EINVAL;
2933
2934 return 0;
2935}
2936
2937static int ccm_format_packet(struct aead_request *req,
2938 u8 *ivptr,
2939 unsigned int sub_type,
2940 unsigned short op_type,
2941 unsigned int assoclen)
2942{
2943 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2944 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2945 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2946 int rc = 0;
2947
2948 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2949 ivptr[0] = 3;
2950 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2951 memcpy(ivptr + 4, req->iv, 8);
2952 memset(ivptr + 12, 0, 4);
2953 } else {
2954 memcpy(ivptr, req->iv, 16);
2955 }
2956 if (assoclen)
2957 put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2958
2959 rc = generate_b0(req, ivptr, op_type);
2960 /* zero the ctr value */
2961 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2962 return rc;
2963}
2964
2965static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2966 unsigned int dst_size,
2967 struct aead_request *req,
2968 unsigned short op_type)
2969{
2970 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2971 struct chcr_context *ctx = a_ctx(tfm);
2972 struct uld_ctx *u_ctx = ULD_CTX(ctx);
2973 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2974 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
2975 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2976 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2977 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2978 unsigned int ccm_xtra;
2979 unsigned int tag_offset = 0, auth_offset = 0;
2980 unsigned int assoclen;
2981
2982 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2983
2984 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2985 assoclen = req->assoclen - 8;
2986 else
2987 assoclen = req->assoclen;
2988 ccm_xtra = CCM_B0_SIZE +
2989 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2990
2991 auth_offset = req->cryptlen ?
2992 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2993 if (op_type == CHCR_DECRYPT_OP) {
2994 if (crypto_aead_authsize(tfm) != req->cryptlen)
2995 tag_offset = crypto_aead_authsize(tfm);
2996 else
2997 auth_offset = 0;
2998 }
2999
3000 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
3001 sec_cpl->pldlen =
3002 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
3003 /* For CCM there wil be b0 always. So AAD start will be 1 always */
3004 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3005 1 + IV, IV + assoclen + ccm_xtra,
3006 req->assoclen + IV + 1 + ccm_xtra, 0);
3007
3008 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3009 auth_offset, tag_offset,
3010 (op_type == CHCR_ENCRYPT_OP) ? 0 :
3011 crypto_aead_authsize(tfm));
3012 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3013 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3014 cipher_mode, mac_mode,
3015 aeadctx->hmac_ctrl, IV >> 1);
3016
3017 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3018 0, dst_size);
3019}
3020
3021static int aead_ccm_validate_input(unsigned short op_type,
3022 struct aead_request *req,
3023 struct chcr_aead_ctx *aeadctx,
3024 unsigned int sub_type)
3025{
3026 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3027 if (crypto_ccm_check_iv(req->iv)) {
3028 pr_err("CCM: IV check fails\n");
3029 return -EINVAL;
3030 }
3031 } else {
3032 if (req->assoclen != 16 && req->assoclen != 20) {
3033 pr_err("RFC4309: Invalid AAD length %d\n",
3034 req->assoclen);
3035 return -EINVAL;
3036 }
3037 }
3038 return 0;
3039}
3040
3041static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3042 unsigned short qid,
3043 int size)
3044{
3045 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3046 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3047 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3048 struct sk_buff *skb = NULL;
3049 struct chcr_wr *chcr_req;
3050 struct cpl_rx_phys_dsgl *phys_cpl;
3051 struct ulptx_sgl *ulptx;
3052 unsigned int transhdr_len;
3053 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3054 unsigned int sub_type, assoclen = req->assoclen;
3055 unsigned int authsize = crypto_aead_authsize(tfm);
3056 int error = -EINVAL;
3057 u8 *ivptr;
3058 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3059 GFP_ATOMIC;
3060 struct adapter *adap = padap(a_ctx(tfm)->dev);
3061
3062 sub_type = get_aead_subtype(tfm);
3063 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3064 assoclen -= 8;
3065 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3066 error = chcr_aead_common_init(req);
3067 if (error)
3068 return ERR_PTR(error);
3069
3070 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3071 if (error)
3072 goto err;
3073 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3074 + (reqctx->op ? -authsize : authsize),
3075 CHCR_DST_SG_SIZE, 0);
3076 dnents += MIN_CCM_SG; // For IV and B0
3077 dst_size = get_space_for_phys_dsgl(dnents);
3078 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3079 CHCR_SRC_SG_SIZE, 0);
3080 snents += MIN_CCM_SG; //For B0
3081 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3082 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3083 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3084 reqctx->b0_len) <= SGE_MAX_WR_LEN;
3085 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3086 reqctx->b0_len, 16) :
3087 (sgl_len(snents) * 8);
3088 transhdr_len += temp;
3089 transhdr_len = roundup(transhdr_len, 16);
3090
3091 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3092 reqctx->b0_len, transhdr_len, reqctx->op)) {
3093 atomic_inc(&adap->chcr_stats.fallback);
3094 chcr_aead_common_exit(req);
3095 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3096 }
3097 skb = alloc_skb(transhdr_len, flags);
3098
3099 if (!skb) {
3100 error = -ENOMEM;
3101 goto err;
3102 }
3103
3104 chcr_req = __skb_put_zero(skb, transhdr_len);
3105
3106 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3107
3108 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3109 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3110 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3111 aeadctx->key, aeadctx->enckey_len);
3112
3113 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3114 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3115 ulptx = (struct ulptx_sgl *)(ivptr + IV);
3116 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3117 if (error)
3118 goto dstmap_fail;
3119 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3120 chcr_add_aead_src_ent(req, ulptx);
3121
3122 atomic_inc(&adap->chcr_stats.aead_rqst);
3123 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3124 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3125 reqctx->b0_len) : 0);
3126 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3127 transhdr_len, temp, 0);
3128 reqctx->skb = skb;
3129
3130 return skb;
3131dstmap_fail:
3132 kfree_skb(skb);
3133err:
3134 chcr_aead_common_exit(req);
3135 return ERR_PTR(error);
3136}
3137
3138static struct sk_buff *create_gcm_wr(struct aead_request *req,
3139 unsigned short qid,
3140 int size)
3141{
3142 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3143 struct chcr_context *ctx = a_ctx(tfm);
3144 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3145 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3146 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3147 struct sk_buff *skb = NULL;
3148 struct chcr_wr *chcr_req;
3149 struct cpl_rx_phys_dsgl *phys_cpl;
3150 struct ulptx_sgl *ulptx;
3151 unsigned int transhdr_len, dnents = 0, snents;
3152 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3153 unsigned int authsize = crypto_aead_authsize(tfm);
3154 int error = -EINVAL;
3155 u8 *ivptr;
3156 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3157 GFP_ATOMIC;
3158 struct adapter *adap = padap(ctx->dev);
3159 unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3160
3161 rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3162 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3163 assoclen = req->assoclen - 8;
3164
3165 reqctx->b0_len = 0;
3166 error = chcr_aead_common_init(req);
3167 if (error)
3168 return ERR_PTR(error);
3169 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3170 (reqctx->op ? -authsize : authsize),
3171 CHCR_DST_SG_SIZE, 0);
3172 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3173 CHCR_SRC_SG_SIZE, 0);
3174 dnents += MIN_GCM_SG; // For IV
3175 dst_size = get_space_for_phys_dsgl(dnents);
3176 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3177 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3178 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3179 SGE_MAX_WR_LEN;
3180 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3181 (sgl_len(snents) * 8);
3182 transhdr_len += temp;
3183 transhdr_len = roundup(transhdr_len, 16);
3184 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3185 transhdr_len, reqctx->op)) {
3186
3187 atomic_inc(&adap->chcr_stats.fallback);
3188 chcr_aead_common_exit(req);
3189 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3190 }
3191 skb = alloc_skb(transhdr_len, flags);
3192 if (!skb) {
3193 error = -ENOMEM;
3194 goto err;
3195 }
3196
3197 chcr_req = __skb_put_zero(skb, transhdr_len);
3198
3199 //Offset of tag from end
3200 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3201 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3202 rx_channel_id, 2, 1);
3203 chcr_req->sec_cpl.pldlen =
3204 htonl(req->assoclen + IV + req->cryptlen);
3205 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3206 assoclen ? 1 + IV : 0,
3207 assoclen ? IV + assoclen : 0,
3208 req->assoclen + IV + 1, 0);
3209 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3210 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3211 temp, temp);
3212 chcr_req->sec_cpl.seqno_numivs =
3213 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3214 CHCR_ENCRYPT_OP) ? 1 : 0,
3215 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3216 CHCR_SCMD_AUTH_MODE_GHASH,
3217 aeadctx->hmac_ctrl, IV >> 1);
3218 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3219 0, 0, dst_size);
3220 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3221 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3222 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3223 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3224
3225 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3226 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3227 /* prepare a 16 byte iv */
3228 /* S A L T | IV | 0x00000001 */
3229 if (get_aead_subtype(tfm) ==
3230 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3231 memcpy(ivptr, aeadctx->salt, 4);
3232 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3233 } else {
3234 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3235 }
3236 put_unaligned_be32(0x01, &ivptr[12]);
3237 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3238
3239 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3240 chcr_add_aead_src_ent(req, ulptx);
3241 atomic_inc(&adap->chcr_stats.aead_rqst);
3242 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3243 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3244 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3245 transhdr_len, temp, reqctx->verify);
3246 reqctx->skb = skb;
3247 return skb;
3248
3249err:
3250 chcr_aead_common_exit(req);
3251 return ERR_PTR(error);
3252}
3253
3254
3255
3256static int chcr_aead_cra_init(struct crypto_aead *tfm)
3257{
3258 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3259 struct aead_alg *alg = crypto_aead_alg(tfm);
3260
3261 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3262 CRYPTO_ALG_NEED_FALLBACK |
3263 CRYPTO_ALG_ASYNC);
3264 if (IS_ERR(aeadctx->sw_cipher))
3265 return PTR_ERR(aeadctx->sw_cipher);
3266 crypto_aead_set_reqsize_dma(
3267 tfm, max(sizeof(struct chcr_aead_reqctx),
3268 sizeof(struct aead_request) +
3269 crypto_aead_reqsize(aeadctx->sw_cipher)));
3270 return chcr_device_init(a_ctx(tfm));
3271}
3272
3273static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3274{
3275 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3276
3277 crypto_free_aead(aeadctx->sw_cipher);
3278}
3279
3280static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3281 unsigned int authsize)
3282{
3283 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3284
3285 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3286 aeadctx->mayverify = VERIFY_HW;
3287 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3288}
3289static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3290 unsigned int authsize)
3291{
3292 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3293 u32 maxauth = crypto_aead_maxauthsize(tfm);
3294
3295 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3296 * true for sha1. authsize == 12 condition should be before
3297 * authsize == (maxauth >> 1)
3298 */
3299 if (authsize == ICV_4) {
3300 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3301 aeadctx->mayverify = VERIFY_HW;
3302 } else if (authsize == ICV_6) {
3303 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3304 aeadctx->mayverify = VERIFY_HW;
3305 } else if (authsize == ICV_10) {
3306 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3307 aeadctx->mayverify = VERIFY_HW;
3308 } else if (authsize == ICV_12) {
3309 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3310 aeadctx->mayverify = VERIFY_HW;
3311 } else if (authsize == ICV_14) {
3312 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3313 aeadctx->mayverify = VERIFY_HW;
3314 } else if (authsize == (maxauth >> 1)) {
3315 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3316 aeadctx->mayverify = VERIFY_HW;
3317 } else if (authsize == maxauth) {
3318 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3319 aeadctx->mayverify = VERIFY_HW;
3320 } else {
3321 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3322 aeadctx->mayverify = VERIFY_SW;
3323 }
3324 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3325}
3326
3327
3328static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3329{
3330 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3331
3332 switch (authsize) {
3333 case ICV_4:
3334 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3335 aeadctx->mayverify = VERIFY_HW;
3336 break;
3337 case ICV_8:
3338 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3339 aeadctx->mayverify = VERIFY_HW;
3340 break;
3341 case ICV_12:
3342 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3343 aeadctx->mayverify = VERIFY_HW;
3344 break;
3345 case ICV_14:
3346 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3347 aeadctx->mayverify = VERIFY_HW;
3348 break;
3349 case ICV_16:
3350 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3351 aeadctx->mayverify = VERIFY_HW;
3352 break;
3353 case ICV_13:
3354 case ICV_15:
3355 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3356 aeadctx->mayverify = VERIFY_SW;
3357 break;
3358 default:
3359 return -EINVAL;
3360 }
3361 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3362}
3363
3364static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3365 unsigned int authsize)
3366{
3367 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3368
3369 switch (authsize) {
3370 case ICV_8:
3371 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3372 aeadctx->mayverify = VERIFY_HW;
3373 break;
3374 case ICV_12:
3375 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3376 aeadctx->mayverify = VERIFY_HW;
3377 break;
3378 case ICV_16:
3379 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3380 aeadctx->mayverify = VERIFY_HW;
3381 break;
3382 default:
3383 return -EINVAL;
3384 }
3385 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3386}
3387
3388static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3389 unsigned int authsize)
3390{
3391 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3392
3393 switch (authsize) {
3394 case ICV_4:
3395 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3396 aeadctx->mayverify = VERIFY_HW;
3397 break;
3398 case ICV_6:
3399 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3400 aeadctx->mayverify = VERIFY_HW;
3401 break;
3402 case ICV_8:
3403 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3404 aeadctx->mayverify = VERIFY_HW;
3405 break;
3406 case ICV_10:
3407 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3408 aeadctx->mayverify = VERIFY_HW;
3409 break;
3410 case ICV_12:
3411 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3412 aeadctx->mayverify = VERIFY_HW;
3413 break;
3414 case ICV_14:
3415 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3416 aeadctx->mayverify = VERIFY_HW;
3417 break;
3418 case ICV_16:
3419 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3420 aeadctx->mayverify = VERIFY_HW;
3421 break;
3422 default:
3423 return -EINVAL;
3424 }
3425 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3426}
3427
3428static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3429 const u8 *key,
3430 unsigned int keylen)
3431{
3432 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3433 unsigned char ck_size, mk_size;
3434 int key_ctx_size = 0;
3435
3436 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3437 if (keylen == AES_KEYSIZE_128) {
3438 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3439 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3440 } else if (keylen == AES_KEYSIZE_192) {
3441 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3442 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3443 } else if (keylen == AES_KEYSIZE_256) {
3444 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3445 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3446 } else {
3447 aeadctx->enckey_len = 0;
3448 return -EINVAL;
3449 }
3450 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3451 key_ctx_size >> 4);
3452 memcpy(aeadctx->key, key, keylen);
3453 aeadctx->enckey_len = keylen;
3454
3455 return 0;
3456}
3457
3458static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3459 const u8 *key,
3460 unsigned int keylen)
3461{
3462 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3463 int error;
3464
3465 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3466 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3467 CRYPTO_TFM_REQ_MASK);
3468 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3469 if (error)
3470 return error;
3471 return chcr_ccm_common_setkey(aead, key, keylen);
3472}
3473
3474static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3475 unsigned int keylen)
3476{
3477 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3478 int error;
3479
3480 if (keylen < 3) {
3481 aeadctx->enckey_len = 0;
3482 return -EINVAL;
3483 }
3484 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3485 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3486 CRYPTO_TFM_REQ_MASK);
3487 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3488 if (error)
3489 return error;
3490 keylen -= 3;
3491 memcpy(aeadctx->salt, key + keylen, 3);
3492 return chcr_ccm_common_setkey(aead, key, keylen);
3493}
3494
3495static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3496 unsigned int keylen)
3497{
3498 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3499 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3500 unsigned int ck_size;
3501 int ret = 0, key_ctx_size = 0;
3502 struct crypto_aes_ctx aes;
3503
3504 aeadctx->enckey_len = 0;
3505 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3506 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3507 & CRYPTO_TFM_REQ_MASK);
3508 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3509 if (ret)
3510 goto out;
3511
3512 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3513 keylen > 3) {
3514 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3515 memcpy(aeadctx->salt, key + keylen, 4);
3516 }
3517 if (keylen == AES_KEYSIZE_128) {
3518 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3519 } else if (keylen == AES_KEYSIZE_192) {
3520 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3521 } else if (keylen == AES_KEYSIZE_256) {
3522 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3523 } else {
3524 pr_err("GCM: Invalid key length %d\n", keylen);
3525 ret = -EINVAL;
3526 goto out;
3527 }
3528
3529 memcpy(aeadctx->key, key, keylen);
3530 aeadctx->enckey_len = keylen;
3531 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3532 AEAD_H_SIZE;
3533 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3534 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3535 0, 0,
3536 key_ctx_size >> 4);
3537 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3538 * It will go in key context
3539 */
3540 ret = aes_expandkey(&aes, key, keylen);
3541 if (ret) {
3542 aeadctx->enckey_len = 0;
3543 goto out;
3544 }
3545 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3546 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3547 memzero_explicit(&aes, sizeof(aes));
3548
3549out:
3550 return ret;
3551}
3552
3553static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3554 unsigned int keylen)
3555{
3556 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3557 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3558 /* it contains auth and cipher key both*/
3559 struct crypto_authenc_keys keys;
3560 unsigned int bs, subtype;
3561 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3562 int err = 0, i, key_ctx_len = 0;
3563 unsigned char ck_size = 0;
3564 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3565 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3566 struct algo_param param;
3567 int align;
3568 u8 *o_ptr = NULL;
3569
3570 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3571 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3572 & CRYPTO_TFM_REQ_MASK);
3573 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3574 if (err)
3575 goto out;
3576
3577 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3578 goto out;
3579
3580 if (get_alg_config(¶m, max_authsize)) {
3581 pr_err("Unsupported digest size\n");
3582 goto out;
3583 }
3584 subtype = get_aead_subtype(authenc);
3585 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3586 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3587 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3588 goto out;
3589 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3590 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3591 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3592 }
3593 if (keys.enckeylen == AES_KEYSIZE_128) {
3594 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3595 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3596 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3597 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3598 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3599 } else {
3600 pr_err("Unsupported cipher key\n");
3601 goto out;
3602 }
3603
3604 /* Copy only encryption key. We use authkey to generate h(ipad) and
3605 * h(opad) so authkey is not needed again. authkeylen size have the
3606 * size of the hash digest size.
3607 */
3608 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3609 aeadctx->enckey_len = keys.enckeylen;
3610 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3611 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3612
3613 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3614 aeadctx->enckey_len << 3);
3615 }
3616 base_hash = chcr_alloc_shash(max_authsize);
3617 if (IS_ERR(base_hash)) {
3618 pr_err("Base driver cannot be loaded\n");
3619 goto out;
3620 }
3621 {
3622 SHASH_DESC_ON_STACK(shash, base_hash);
3623
3624 shash->tfm = base_hash;
3625 bs = crypto_shash_blocksize(base_hash);
3626 align = KEYCTX_ALIGN_PAD(max_authsize);
3627 o_ptr = actx->h_iopad + param.result_size + align;
3628
3629 if (keys.authkeylen > bs) {
3630 err = crypto_shash_digest(shash, keys.authkey,
3631 keys.authkeylen,
3632 o_ptr);
3633 if (err) {
3634 pr_err("Base driver cannot be loaded\n");
3635 goto out;
3636 }
3637 keys.authkeylen = max_authsize;
3638 } else
3639 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3640
3641 /* Compute the ipad-digest*/
3642 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3643 memcpy(pad, o_ptr, keys.authkeylen);
3644 for (i = 0; i < bs >> 2; i++)
3645 *((unsigned int *)pad + i) ^= IPAD_DATA;
3646
3647 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3648 max_authsize))
3649 goto out;
3650 /* Compute the opad-digest */
3651 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3652 memcpy(pad, o_ptr, keys.authkeylen);
3653 for (i = 0; i < bs >> 2; i++)
3654 *((unsigned int *)pad + i) ^= OPAD_DATA;
3655
3656 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3657 goto out;
3658
3659 /* convert the ipad and opad digest to network order */
3660 chcr_change_order(actx->h_iopad, param.result_size);
3661 chcr_change_order(o_ptr, param.result_size);
3662 key_ctx_len = sizeof(struct _key_ctx) +
3663 roundup(keys.enckeylen, 16) +
3664 (param.result_size + align) * 2;
3665 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3666 0, 1, key_ctx_len >> 4);
3667 actx->auth_mode = param.auth_mode;
3668 chcr_free_shash(base_hash);
3669
3670 memzero_explicit(&keys, sizeof(keys));
3671 return 0;
3672 }
3673out:
3674 aeadctx->enckey_len = 0;
3675 memzero_explicit(&keys, sizeof(keys));
3676 if (!IS_ERR(base_hash))
3677 chcr_free_shash(base_hash);
3678 return -EINVAL;
3679}
3680
3681static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3682 const u8 *key, unsigned int keylen)
3683{
3684 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3685 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3686 struct crypto_authenc_keys keys;
3687 int err;
3688 /* it contains auth and cipher key both*/
3689 unsigned int subtype;
3690 int key_ctx_len = 0;
3691 unsigned char ck_size = 0;
3692
3693 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3694 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3695 & CRYPTO_TFM_REQ_MASK);
3696 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3697 if (err)
3698 goto out;
3699
3700 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3701 goto out;
3702
3703 subtype = get_aead_subtype(authenc);
3704 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3705 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3706 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3707 goto out;
3708 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3709 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3710 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3711 }
3712 if (keys.enckeylen == AES_KEYSIZE_128) {
3713 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3714 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3715 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3716 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3717 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3718 } else {
3719 pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3720 goto out;
3721 }
3722 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3723 aeadctx->enckey_len = keys.enckeylen;
3724 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3725 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3726 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3727 aeadctx->enckey_len << 3);
3728 }
3729 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3730
3731 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3732 0, key_ctx_len >> 4);
3733 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3734 memzero_explicit(&keys, sizeof(keys));
3735 return 0;
3736out:
3737 aeadctx->enckey_len = 0;
3738 memzero_explicit(&keys, sizeof(keys));
3739 return -EINVAL;
3740}
3741
3742static int chcr_aead_op(struct aead_request *req,
3743 int size,
3744 create_wr_t create_wr_fn)
3745{
3746 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3747 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3748 struct chcr_context *ctx = a_ctx(tfm);
3749 struct uld_ctx *u_ctx = ULD_CTX(ctx);
3750 struct sk_buff *skb;
3751 struct chcr_dev *cdev;
3752
3753 cdev = a_ctx(tfm)->dev;
3754 if (!cdev) {
3755 pr_err("%s : No crypto device.\n", __func__);
3756 return -ENXIO;
3757 }
3758
3759 if (chcr_inc_wrcount(cdev)) {
3760 /* Detach state for CHCR means lldi or padap is freed.
3761 * We cannot increment fallback here.
3762 */
3763 return chcr_aead_fallback(req, reqctx->op);
3764 }
3765
3766 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3767 reqctx->txqidx) &&
3768 (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3769 chcr_dec_wrcount(cdev);
3770 return -ENOSPC;
3771 }
3772
3773 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3774 crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3775 pr_err("RFC4106: Invalid value of assoclen %d\n",
3776 req->assoclen);
3777 return -EINVAL;
3778 }
3779
3780 /* Form a WR from req */
3781 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3782
3783 if (IS_ERR_OR_NULL(skb)) {
3784 chcr_dec_wrcount(cdev);
3785 return PTR_ERR_OR_ZERO(skb);
3786 }
3787
3788 skb->dev = u_ctx->lldi.ports[0];
3789 set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3790 chcr_send_wr(skb);
3791 return -EINPROGRESS;
3792}
3793
3794static int chcr_aead_encrypt(struct aead_request *req)
3795{
3796 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3797 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3798 struct chcr_context *ctx = a_ctx(tfm);
3799 unsigned int cpu;
3800
3801 cpu = get_cpu();
3802 reqctx->txqidx = cpu % ctx->ntxq;
3803 reqctx->rxqidx = cpu % ctx->nrxq;
3804 put_cpu();
3805
3806 reqctx->verify = VERIFY_HW;
3807 reqctx->op = CHCR_ENCRYPT_OP;
3808
3809 switch (get_aead_subtype(tfm)) {
3810 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3811 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3812 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3813 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3814 return chcr_aead_op(req, 0, create_authenc_wr);
3815 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3816 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3817 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3818 default:
3819 return chcr_aead_op(req, 0, create_gcm_wr);
3820 }
3821}
3822
3823static int chcr_aead_decrypt(struct aead_request *req)
3824{
3825 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3826 struct chcr_context *ctx = a_ctx(tfm);
3827 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3828 struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
3829 int size;
3830 unsigned int cpu;
3831
3832 cpu = get_cpu();
3833 reqctx->txqidx = cpu % ctx->ntxq;
3834 reqctx->rxqidx = cpu % ctx->nrxq;
3835 put_cpu();
3836
3837 if (aeadctx->mayverify == VERIFY_SW) {
3838 size = crypto_aead_maxauthsize(tfm);
3839 reqctx->verify = VERIFY_SW;
3840 } else {
3841 size = 0;
3842 reqctx->verify = VERIFY_HW;
3843 }
3844 reqctx->op = CHCR_DECRYPT_OP;
3845 switch (get_aead_subtype(tfm)) {
3846 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3847 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3848 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3849 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3850 return chcr_aead_op(req, size, create_authenc_wr);
3851 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3852 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3853 return chcr_aead_op(req, size, create_aead_ccm_wr);
3854 default:
3855 return chcr_aead_op(req, size, create_gcm_wr);
3856 }
3857}
3858
3859static struct chcr_alg_template driver_algs[] = {
3860 /* AES-CBC */
3861 {
3862 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3863 .is_registered = 0,
3864 .alg.skcipher = {
3865 .base.cra_name = "cbc(aes)",
3866 .base.cra_driver_name = "cbc-aes-chcr",
3867 .base.cra_blocksize = AES_BLOCK_SIZE,
3868
3869 .init = chcr_init_tfm,
3870 .exit = chcr_exit_tfm,
3871 .min_keysize = AES_MIN_KEY_SIZE,
3872 .max_keysize = AES_MAX_KEY_SIZE,
3873 .ivsize = AES_BLOCK_SIZE,
3874 .setkey = chcr_aes_cbc_setkey,
3875 .encrypt = chcr_aes_encrypt,
3876 .decrypt = chcr_aes_decrypt,
3877 }
3878 },
3879 {
3880 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3881 .is_registered = 0,
3882 .alg.skcipher = {
3883 .base.cra_name = "xts(aes)",
3884 .base.cra_driver_name = "xts-aes-chcr",
3885 .base.cra_blocksize = AES_BLOCK_SIZE,
3886
3887 .init = chcr_init_tfm,
3888 .exit = chcr_exit_tfm,
3889 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3890 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3891 .ivsize = AES_BLOCK_SIZE,
3892 .setkey = chcr_aes_xts_setkey,
3893 .encrypt = chcr_aes_encrypt,
3894 .decrypt = chcr_aes_decrypt,
3895 }
3896 },
3897 {
3898 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3899 .is_registered = 0,
3900 .alg.skcipher = {
3901 .base.cra_name = "ctr(aes)",
3902 .base.cra_driver_name = "ctr-aes-chcr",
3903 .base.cra_blocksize = 1,
3904
3905 .init = chcr_init_tfm,
3906 .exit = chcr_exit_tfm,
3907 .min_keysize = AES_MIN_KEY_SIZE,
3908 .max_keysize = AES_MAX_KEY_SIZE,
3909 .ivsize = AES_BLOCK_SIZE,
3910 .setkey = chcr_aes_ctr_setkey,
3911 .encrypt = chcr_aes_encrypt,
3912 .decrypt = chcr_aes_decrypt,
3913 }
3914 },
3915 {
3916 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3917 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3918 .is_registered = 0,
3919 .alg.skcipher = {
3920 .base.cra_name = "rfc3686(ctr(aes))",
3921 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3922 .base.cra_blocksize = 1,
3923
3924 .init = chcr_rfc3686_init,
3925 .exit = chcr_exit_tfm,
3926 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3927 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3928 .ivsize = CTR_RFC3686_IV_SIZE,
3929 .setkey = chcr_aes_rfc3686_setkey,
3930 .encrypt = chcr_aes_encrypt,
3931 .decrypt = chcr_aes_decrypt,
3932 }
3933 },
3934 /* SHA */
3935 {
3936 .type = CRYPTO_ALG_TYPE_AHASH,
3937 .is_registered = 0,
3938 .alg.hash = {
3939 .halg.digestsize = SHA1_DIGEST_SIZE,
3940 .halg.base = {
3941 .cra_name = "sha1",
3942 .cra_driver_name = "sha1-chcr",
3943 .cra_blocksize = SHA1_BLOCK_SIZE,
3944 }
3945 }
3946 },
3947 {
3948 .type = CRYPTO_ALG_TYPE_AHASH,
3949 .is_registered = 0,
3950 .alg.hash = {
3951 .halg.digestsize = SHA256_DIGEST_SIZE,
3952 .halg.base = {
3953 .cra_name = "sha256",
3954 .cra_driver_name = "sha256-chcr",
3955 .cra_blocksize = SHA256_BLOCK_SIZE,
3956 }
3957 }
3958 },
3959 {
3960 .type = CRYPTO_ALG_TYPE_AHASH,
3961 .is_registered = 0,
3962 .alg.hash = {
3963 .halg.digestsize = SHA224_DIGEST_SIZE,
3964 .halg.base = {
3965 .cra_name = "sha224",
3966 .cra_driver_name = "sha224-chcr",
3967 .cra_blocksize = SHA224_BLOCK_SIZE,
3968 }
3969 }
3970 },
3971 {
3972 .type = CRYPTO_ALG_TYPE_AHASH,
3973 .is_registered = 0,
3974 .alg.hash = {
3975 .halg.digestsize = SHA384_DIGEST_SIZE,
3976 .halg.base = {
3977 .cra_name = "sha384",
3978 .cra_driver_name = "sha384-chcr",
3979 .cra_blocksize = SHA384_BLOCK_SIZE,
3980 }
3981 }
3982 },
3983 {
3984 .type = CRYPTO_ALG_TYPE_AHASH,
3985 .is_registered = 0,
3986 .alg.hash = {
3987 .halg.digestsize = SHA512_DIGEST_SIZE,
3988 .halg.base = {
3989 .cra_name = "sha512",
3990 .cra_driver_name = "sha512-chcr",
3991 .cra_blocksize = SHA512_BLOCK_SIZE,
3992 }
3993 }
3994 },
3995 /* HMAC */
3996 {
3997 .type = CRYPTO_ALG_TYPE_HMAC,
3998 .is_registered = 0,
3999 .alg.hash = {
4000 .halg.digestsize = SHA1_DIGEST_SIZE,
4001 .halg.base = {
4002 .cra_name = "hmac(sha1)",
4003 .cra_driver_name = "hmac-sha1-chcr",
4004 .cra_blocksize = SHA1_BLOCK_SIZE,
4005 }
4006 }
4007 },
4008 {
4009 .type = CRYPTO_ALG_TYPE_HMAC,
4010 .is_registered = 0,
4011 .alg.hash = {
4012 .halg.digestsize = SHA224_DIGEST_SIZE,
4013 .halg.base = {
4014 .cra_name = "hmac(sha224)",
4015 .cra_driver_name = "hmac-sha224-chcr",
4016 .cra_blocksize = SHA224_BLOCK_SIZE,
4017 }
4018 }
4019 },
4020 {
4021 .type = CRYPTO_ALG_TYPE_HMAC,
4022 .is_registered = 0,
4023 .alg.hash = {
4024 .halg.digestsize = SHA256_DIGEST_SIZE,
4025 .halg.base = {
4026 .cra_name = "hmac(sha256)",
4027 .cra_driver_name = "hmac-sha256-chcr",
4028 .cra_blocksize = SHA256_BLOCK_SIZE,
4029 }
4030 }
4031 },
4032 {
4033 .type = CRYPTO_ALG_TYPE_HMAC,
4034 .is_registered = 0,
4035 .alg.hash = {
4036 .halg.digestsize = SHA384_DIGEST_SIZE,
4037 .halg.base = {
4038 .cra_name = "hmac(sha384)",
4039 .cra_driver_name = "hmac-sha384-chcr",
4040 .cra_blocksize = SHA384_BLOCK_SIZE,
4041 }
4042 }
4043 },
4044 {
4045 .type = CRYPTO_ALG_TYPE_HMAC,
4046 .is_registered = 0,
4047 .alg.hash = {
4048 .halg.digestsize = SHA512_DIGEST_SIZE,
4049 .halg.base = {
4050 .cra_name = "hmac(sha512)",
4051 .cra_driver_name = "hmac-sha512-chcr",
4052 .cra_blocksize = SHA512_BLOCK_SIZE,
4053 }
4054 }
4055 },
4056 /* Add AEAD Algorithms */
4057 {
4058 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4059 .is_registered = 0,
4060 .alg.aead = {
4061 .base = {
4062 .cra_name = "gcm(aes)",
4063 .cra_driver_name = "gcm-aes-chcr",
4064 .cra_blocksize = 1,
4065 .cra_priority = CHCR_AEAD_PRIORITY,
4066 .cra_ctxsize = sizeof(struct chcr_context) +
4067 sizeof(struct chcr_aead_ctx) +
4068 sizeof(struct chcr_gcm_ctx),
4069 },
4070 .ivsize = GCM_AES_IV_SIZE,
4071 .maxauthsize = GHASH_DIGEST_SIZE,
4072 .setkey = chcr_gcm_setkey,
4073 .setauthsize = chcr_gcm_setauthsize,
4074 }
4075 },
4076 {
4077 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4078 .is_registered = 0,
4079 .alg.aead = {
4080 .base = {
4081 .cra_name = "rfc4106(gcm(aes))",
4082 .cra_driver_name = "rfc4106-gcm-aes-chcr",
4083 .cra_blocksize = 1,
4084 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4085 .cra_ctxsize = sizeof(struct chcr_context) +
4086 sizeof(struct chcr_aead_ctx) +
4087 sizeof(struct chcr_gcm_ctx),
4088
4089 },
4090 .ivsize = GCM_RFC4106_IV_SIZE,
4091 .maxauthsize = GHASH_DIGEST_SIZE,
4092 .setkey = chcr_gcm_setkey,
4093 .setauthsize = chcr_4106_4309_setauthsize,
4094 }
4095 },
4096 {
4097 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4098 .is_registered = 0,
4099 .alg.aead = {
4100 .base = {
4101 .cra_name = "ccm(aes)",
4102 .cra_driver_name = "ccm-aes-chcr",
4103 .cra_blocksize = 1,
4104 .cra_priority = CHCR_AEAD_PRIORITY,
4105 .cra_ctxsize = sizeof(struct chcr_context) +
4106 sizeof(struct chcr_aead_ctx),
4107
4108 },
4109 .ivsize = AES_BLOCK_SIZE,
4110 .maxauthsize = GHASH_DIGEST_SIZE,
4111 .setkey = chcr_aead_ccm_setkey,
4112 .setauthsize = chcr_ccm_setauthsize,
4113 }
4114 },
4115 {
4116 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4117 .is_registered = 0,
4118 .alg.aead = {
4119 .base = {
4120 .cra_name = "rfc4309(ccm(aes))",
4121 .cra_driver_name = "rfc4309-ccm-aes-chcr",
4122 .cra_blocksize = 1,
4123 .cra_priority = CHCR_AEAD_PRIORITY + 1,
4124 .cra_ctxsize = sizeof(struct chcr_context) +
4125 sizeof(struct chcr_aead_ctx),
4126
4127 },
4128 .ivsize = 8,
4129 .maxauthsize = GHASH_DIGEST_SIZE,
4130 .setkey = chcr_aead_rfc4309_setkey,
4131 .setauthsize = chcr_4106_4309_setauthsize,
4132 }
4133 },
4134 {
4135 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4136 .is_registered = 0,
4137 .alg.aead = {
4138 .base = {
4139 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4140 .cra_driver_name =
4141 "authenc-hmac-sha1-cbc-aes-chcr",
4142 .cra_blocksize = AES_BLOCK_SIZE,
4143 .cra_priority = CHCR_AEAD_PRIORITY,
4144 .cra_ctxsize = sizeof(struct chcr_context) +
4145 sizeof(struct chcr_aead_ctx) +
4146 sizeof(struct chcr_authenc_ctx),
4147
4148 },
4149 .ivsize = AES_BLOCK_SIZE,
4150 .maxauthsize = SHA1_DIGEST_SIZE,
4151 .setkey = chcr_authenc_setkey,
4152 .setauthsize = chcr_authenc_setauthsize,
4153 }
4154 },
4155 {
4156 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4157 .is_registered = 0,
4158 .alg.aead = {
4159 .base = {
4160
4161 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4162 .cra_driver_name =
4163 "authenc-hmac-sha256-cbc-aes-chcr",
4164 .cra_blocksize = AES_BLOCK_SIZE,
4165 .cra_priority = CHCR_AEAD_PRIORITY,
4166 .cra_ctxsize = sizeof(struct chcr_context) +
4167 sizeof(struct chcr_aead_ctx) +
4168 sizeof(struct chcr_authenc_ctx),
4169
4170 },
4171 .ivsize = AES_BLOCK_SIZE,
4172 .maxauthsize = SHA256_DIGEST_SIZE,
4173 .setkey = chcr_authenc_setkey,
4174 .setauthsize = chcr_authenc_setauthsize,
4175 }
4176 },
4177 {
4178 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4179 .is_registered = 0,
4180 .alg.aead = {
4181 .base = {
4182 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4183 .cra_driver_name =
4184 "authenc-hmac-sha224-cbc-aes-chcr",
4185 .cra_blocksize = AES_BLOCK_SIZE,
4186 .cra_priority = CHCR_AEAD_PRIORITY,
4187 .cra_ctxsize = sizeof(struct chcr_context) +
4188 sizeof(struct chcr_aead_ctx) +
4189 sizeof(struct chcr_authenc_ctx),
4190 },
4191 .ivsize = AES_BLOCK_SIZE,
4192 .maxauthsize = SHA224_DIGEST_SIZE,
4193 .setkey = chcr_authenc_setkey,
4194 .setauthsize = chcr_authenc_setauthsize,
4195 }
4196 },
4197 {
4198 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4199 .is_registered = 0,
4200 .alg.aead = {
4201 .base = {
4202 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4203 .cra_driver_name =
4204 "authenc-hmac-sha384-cbc-aes-chcr",
4205 .cra_blocksize = AES_BLOCK_SIZE,
4206 .cra_priority = CHCR_AEAD_PRIORITY,
4207 .cra_ctxsize = sizeof(struct chcr_context) +
4208 sizeof(struct chcr_aead_ctx) +
4209 sizeof(struct chcr_authenc_ctx),
4210
4211 },
4212 .ivsize = AES_BLOCK_SIZE,
4213 .maxauthsize = SHA384_DIGEST_SIZE,
4214 .setkey = chcr_authenc_setkey,
4215 .setauthsize = chcr_authenc_setauthsize,
4216 }
4217 },
4218 {
4219 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4220 .is_registered = 0,
4221 .alg.aead = {
4222 .base = {
4223 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4224 .cra_driver_name =
4225 "authenc-hmac-sha512-cbc-aes-chcr",
4226 .cra_blocksize = AES_BLOCK_SIZE,
4227 .cra_priority = CHCR_AEAD_PRIORITY,
4228 .cra_ctxsize = sizeof(struct chcr_context) +
4229 sizeof(struct chcr_aead_ctx) +
4230 sizeof(struct chcr_authenc_ctx),
4231
4232 },
4233 .ivsize = AES_BLOCK_SIZE,
4234 .maxauthsize = SHA512_DIGEST_SIZE,
4235 .setkey = chcr_authenc_setkey,
4236 .setauthsize = chcr_authenc_setauthsize,
4237 }
4238 },
4239 {
4240 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4241 .is_registered = 0,
4242 .alg.aead = {
4243 .base = {
4244 .cra_name = "authenc(digest_null,cbc(aes))",
4245 .cra_driver_name =
4246 "authenc-digest_null-cbc-aes-chcr",
4247 .cra_blocksize = AES_BLOCK_SIZE,
4248 .cra_priority = CHCR_AEAD_PRIORITY,
4249 .cra_ctxsize = sizeof(struct chcr_context) +
4250 sizeof(struct chcr_aead_ctx) +
4251 sizeof(struct chcr_authenc_ctx),
4252
4253 },
4254 .ivsize = AES_BLOCK_SIZE,
4255 .maxauthsize = 0,
4256 .setkey = chcr_aead_digest_null_setkey,
4257 .setauthsize = chcr_authenc_null_setauthsize,
4258 }
4259 },
4260 {
4261 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4262 .is_registered = 0,
4263 .alg.aead = {
4264 .base = {
4265 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4266 .cra_driver_name =
4267 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4268 .cra_blocksize = 1,
4269 .cra_priority = CHCR_AEAD_PRIORITY,
4270 .cra_ctxsize = sizeof(struct chcr_context) +
4271 sizeof(struct chcr_aead_ctx) +
4272 sizeof(struct chcr_authenc_ctx),
4273
4274 },
4275 .ivsize = CTR_RFC3686_IV_SIZE,
4276 .maxauthsize = SHA1_DIGEST_SIZE,
4277 .setkey = chcr_authenc_setkey,
4278 .setauthsize = chcr_authenc_setauthsize,
4279 }
4280 },
4281 {
4282 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4283 .is_registered = 0,
4284 .alg.aead = {
4285 .base = {
4286
4287 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4288 .cra_driver_name =
4289 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4290 .cra_blocksize = 1,
4291 .cra_priority = CHCR_AEAD_PRIORITY,
4292 .cra_ctxsize = sizeof(struct chcr_context) +
4293 sizeof(struct chcr_aead_ctx) +
4294 sizeof(struct chcr_authenc_ctx),
4295
4296 },
4297 .ivsize = CTR_RFC3686_IV_SIZE,
4298 .maxauthsize = SHA256_DIGEST_SIZE,
4299 .setkey = chcr_authenc_setkey,
4300 .setauthsize = chcr_authenc_setauthsize,
4301 }
4302 },
4303 {
4304 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4305 .is_registered = 0,
4306 .alg.aead = {
4307 .base = {
4308 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4309 .cra_driver_name =
4310 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4311 .cra_blocksize = 1,
4312 .cra_priority = CHCR_AEAD_PRIORITY,
4313 .cra_ctxsize = sizeof(struct chcr_context) +
4314 sizeof(struct chcr_aead_ctx) +
4315 sizeof(struct chcr_authenc_ctx),
4316 },
4317 .ivsize = CTR_RFC3686_IV_SIZE,
4318 .maxauthsize = SHA224_DIGEST_SIZE,
4319 .setkey = chcr_authenc_setkey,
4320 .setauthsize = chcr_authenc_setauthsize,
4321 }
4322 },
4323 {
4324 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4325 .is_registered = 0,
4326 .alg.aead = {
4327 .base = {
4328 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4329 .cra_driver_name =
4330 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4331 .cra_blocksize = 1,
4332 .cra_priority = CHCR_AEAD_PRIORITY,
4333 .cra_ctxsize = sizeof(struct chcr_context) +
4334 sizeof(struct chcr_aead_ctx) +
4335 sizeof(struct chcr_authenc_ctx),
4336
4337 },
4338 .ivsize = CTR_RFC3686_IV_SIZE,
4339 .maxauthsize = SHA384_DIGEST_SIZE,
4340 .setkey = chcr_authenc_setkey,
4341 .setauthsize = chcr_authenc_setauthsize,
4342 }
4343 },
4344 {
4345 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4346 .is_registered = 0,
4347 .alg.aead = {
4348 .base = {
4349 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4350 .cra_driver_name =
4351 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4352 .cra_blocksize = 1,
4353 .cra_priority = CHCR_AEAD_PRIORITY,
4354 .cra_ctxsize = sizeof(struct chcr_context) +
4355 sizeof(struct chcr_aead_ctx) +
4356 sizeof(struct chcr_authenc_ctx),
4357
4358 },
4359 .ivsize = CTR_RFC3686_IV_SIZE,
4360 .maxauthsize = SHA512_DIGEST_SIZE,
4361 .setkey = chcr_authenc_setkey,
4362 .setauthsize = chcr_authenc_setauthsize,
4363 }
4364 },
4365 {
4366 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4367 .is_registered = 0,
4368 .alg.aead = {
4369 .base = {
4370 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4371 .cra_driver_name =
4372 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4373 .cra_blocksize = 1,
4374 .cra_priority = CHCR_AEAD_PRIORITY,
4375 .cra_ctxsize = sizeof(struct chcr_context) +
4376 sizeof(struct chcr_aead_ctx) +
4377 sizeof(struct chcr_authenc_ctx),
4378
4379 },
4380 .ivsize = CTR_RFC3686_IV_SIZE,
4381 .maxauthsize = 0,
4382 .setkey = chcr_aead_digest_null_setkey,
4383 .setauthsize = chcr_authenc_null_setauthsize,
4384 }
4385 },
4386};
4387
4388/*
4389 * chcr_unregister_alg - Deregister crypto algorithms with
4390 * kernel framework.
4391 */
4392static int chcr_unregister_alg(void)
4393{
4394 int i;
4395
4396 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4397 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4398 case CRYPTO_ALG_TYPE_SKCIPHER:
4399 if (driver_algs[i].is_registered && refcount_read(
4400 &driver_algs[i].alg.skcipher.base.cra_refcnt)
4401 == 1) {
4402 crypto_unregister_skcipher(
4403 &driver_algs[i].alg.skcipher);
4404 driver_algs[i].is_registered = 0;
4405 }
4406 break;
4407 case CRYPTO_ALG_TYPE_AEAD:
4408 if (driver_algs[i].is_registered && refcount_read(
4409 &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4410 crypto_unregister_aead(
4411 &driver_algs[i].alg.aead);
4412 driver_algs[i].is_registered = 0;
4413 }
4414 break;
4415 case CRYPTO_ALG_TYPE_AHASH:
4416 if (driver_algs[i].is_registered && refcount_read(
4417 &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4418 == 1) {
4419 crypto_unregister_ahash(
4420 &driver_algs[i].alg.hash);
4421 driver_algs[i].is_registered = 0;
4422 }
4423 break;
4424 }
4425 }
4426 return 0;
4427}
4428
4429#define SZ_AHASH_CTX sizeof(struct chcr_context)
4430#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4431#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4432
4433/*
4434 * chcr_register_alg - Register crypto algorithms with kernel framework.
4435 */
4436static int chcr_register_alg(void)
4437{
4438 struct crypto_alg ai;
4439 struct ahash_alg *a_hash;
4440 int err = 0, i;
4441 char *name = NULL;
4442
4443 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4444 if (driver_algs[i].is_registered)
4445 continue;
4446 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4447 case CRYPTO_ALG_TYPE_SKCIPHER:
4448 driver_algs[i].alg.skcipher.base.cra_priority =
4449 CHCR_CRA_PRIORITY;
4450 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4451 driver_algs[i].alg.skcipher.base.cra_flags =
4452 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4453 CRYPTO_ALG_ALLOCATES_MEMORY |
4454 CRYPTO_ALG_NEED_FALLBACK;
4455 driver_algs[i].alg.skcipher.base.cra_ctxsize =
4456 sizeof(struct chcr_context) +
4457 sizeof(struct ablk_ctx);
4458 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4459
4460 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4461 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4462 break;
4463 case CRYPTO_ALG_TYPE_AEAD:
4464 driver_algs[i].alg.aead.base.cra_flags =
4465 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4466 CRYPTO_ALG_ALLOCATES_MEMORY;
4467 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4468 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4469 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4470 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4471 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4472 err = crypto_register_aead(&driver_algs[i].alg.aead);
4473 name = driver_algs[i].alg.aead.base.cra_driver_name;
4474 break;
4475 case CRYPTO_ALG_TYPE_AHASH:
4476 a_hash = &driver_algs[i].alg.hash;
4477 a_hash->update = chcr_ahash_update;
4478 a_hash->final = chcr_ahash_final;
4479 a_hash->finup = chcr_ahash_finup;
4480 a_hash->digest = chcr_ahash_digest;
4481 a_hash->export = chcr_ahash_export;
4482 a_hash->import = chcr_ahash_import;
4483 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4484 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4485 a_hash->halg.base.cra_module = THIS_MODULE;
4486 a_hash->halg.base.cra_flags =
4487 CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4488 a_hash->halg.base.cra_alignmask = 0;
4489 a_hash->halg.base.cra_exit = NULL;
4490
4491 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4492 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4493 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4494 a_hash->init = chcr_hmac_init;
4495 a_hash->setkey = chcr_ahash_setkey;
4496 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4497 } else {
4498 a_hash->init = chcr_sha_init;
4499 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4500 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4501 }
4502 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4503 ai = driver_algs[i].alg.hash.halg.base;
4504 name = ai.cra_driver_name;
4505 break;
4506 }
4507 if (err) {
4508 pr_err("%s : Algorithm registration failed\n", name);
4509 goto register_err;
4510 } else {
4511 driver_algs[i].is_registered = 1;
4512 }
4513 }
4514 return 0;
4515
4516register_err:
4517 chcr_unregister_alg();
4518 return err;
4519}
4520
4521/*
4522 * start_crypto - Register the crypto algorithms.
4523 * This should called once when the first device comesup. After this
4524 * kernel will start calling driver APIs for crypto operations.
4525 */
4526int start_crypto(void)
4527{
4528 return chcr_register_alg();
4529}
4530
4531/*
4532 * stop_crypto - Deregister all the crypto algorithms with kernel.
4533 * This should be called once when the last device goes down. After this
4534 * kernel will not call the driver API for crypto operations.
4535 */
4536int stop_crypto(void)
4537{
4538 chcr_unregister_alg();
4539 return 0;
4540}