Loading...
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
36 */
37
38#define pr_fmt(fmt) "chcr:" fmt
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/crypto.h>
43#include <linux/skbuff.h>
44#include <linux/rtnetlink.h>
45#include <linux/highmem.h>
46#include <linux/if_vlan.h>
47#include <linux/ip.h>
48#include <linux/netdevice.h>
49#include <net/esp.h>
50#include <net/xfrm.h>
51#include <crypto/aes.h>
52#include <crypto/algapi.h>
53#include <crypto/hash.h>
54#include <crypto/sha.h>
55#include <crypto/authenc.h>
56#include <crypto/internal/aead.h>
57#include <crypto/null.h>
58#include <crypto/internal/skcipher.h>
59#include <crypto/aead.h>
60#include <crypto/scatterwalk.h>
61#include <crypto/internal/hash.h>
62
63#include "chcr_core.h"
64#include "chcr_algo.h"
65#include "chcr_crypto.h"
66
67/*
68 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
69 * into a WR.
70 */
71#define MAX_IMM_TX_PKT_LEN 256
72#define GCM_ESP_IV_SIZE 8
73
74static int chcr_xfrm_add_state(struct xfrm_state *x);
75static void chcr_xfrm_del_state(struct xfrm_state *x);
76static void chcr_xfrm_free_state(struct xfrm_state *x);
77static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
78static void chcr_advance_esn_state(struct xfrm_state *x);
79
80static const struct xfrmdev_ops chcr_xfrmdev_ops = {
81 .xdo_dev_state_add = chcr_xfrm_add_state,
82 .xdo_dev_state_delete = chcr_xfrm_del_state,
83 .xdo_dev_state_free = chcr_xfrm_free_state,
84 .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
85 .xdo_dev_state_advance_esn = chcr_advance_esn_state,
86};
87
88/* Add offload xfrms to Chelsio Interface */
89void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
90{
91 struct net_device *netdev = NULL;
92 int i;
93
94 for (i = 0; i < lld->nports; i++) {
95 netdev = lld->ports[i];
96 if (!netdev)
97 continue;
98 netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
99 netdev->hw_enc_features |= NETIF_F_HW_ESP;
100 netdev->features |= NETIF_F_HW_ESP;
101 netdev_change_features(netdev);
102 }
103}
104
105static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
106 struct ipsec_sa_entry *sa_entry)
107{
108 int hmac_ctrl;
109 int authsize = x->aead->alg_icv_len / 8;
110
111 sa_entry->authsize = authsize;
112
113 switch (authsize) {
114 case ICV_8:
115 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
116 break;
117 case ICV_12:
118 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
119 break;
120 case ICV_16:
121 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
122 break;
123 default:
124 return -EINVAL;
125 }
126 return hmac_ctrl;
127}
128
129static inline int chcr_ipsec_setkey(struct xfrm_state *x,
130 struct ipsec_sa_entry *sa_entry)
131{
132 int keylen = (x->aead->alg_key_len + 7) / 8;
133 unsigned char *key = x->aead->alg_key;
134 int ck_size, key_ctx_size = 0;
135 unsigned char ghash_h[AEAD_H_SIZE];
136 struct crypto_aes_ctx aes;
137 int ret = 0;
138
139 if (keylen > 3) {
140 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
141 memcpy(sa_entry->salt, key + keylen, 4);
142 }
143
144 if (keylen == AES_KEYSIZE_128) {
145 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
146 } else if (keylen == AES_KEYSIZE_192) {
147 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
148 } else if (keylen == AES_KEYSIZE_256) {
149 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
150 } else {
151 pr_err("GCM: Invalid key length %d\n", keylen);
152 ret = -EINVAL;
153 goto out;
154 }
155
156 memcpy(sa_entry->key, key, keylen);
157 sa_entry->enckey_len = keylen;
158 key_ctx_size = sizeof(struct _key_ctx) +
159 ((DIV_ROUND_UP(keylen, 16)) << 4) +
160 AEAD_H_SIZE;
161
162 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
163 CHCR_KEYCTX_MAC_KEY_SIZE_128,
164 0, 0,
165 key_ctx_size >> 4);
166
167 /* Calculate the H = CIPH(K, 0 repeated 16 times).
168 * It will go in key context
169 */
170 ret = aes_expandkey(&aes, key, keylen);
171 if (ret) {
172 sa_entry->enckey_len = 0;
173 goto out;
174 }
175 memset(ghash_h, 0, AEAD_H_SIZE);
176 aes_encrypt(&aes, ghash_h, ghash_h);
177 memzero_explicit(&aes, sizeof(aes));
178
179 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
180 16), ghash_h, AEAD_H_SIZE);
181 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
182 AEAD_H_SIZE;
183out:
184 return ret;
185}
186
187/*
188 * chcr_xfrm_add_state
189 * returns 0 on success, negative error if failed to send message to FPGA
190 * positive error if FPGA returned a bad response
191 */
192static int chcr_xfrm_add_state(struct xfrm_state *x)
193{
194 struct ipsec_sa_entry *sa_entry;
195 int res = 0;
196
197 if (x->props.aalgo != SADB_AALG_NONE) {
198 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
199 return -EINVAL;
200 }
201 if (x->props.calgo != SADB_X_CALG_NONE) {
202 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
203 return -EINVAL;
204 }
205 if (x->props.family != AF_INET &&
206 x->props.family != AF_INET6) {
207 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
208 return -EINVAL;
209 }
210 if (x->props.mode != XFRM_MODE_TRANSPORT &&
211 x->props.mode != XFRM_MODE_TUNNEL) {
212 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
213 return -EINVAL;
214 }
215 if (x->id.proto != IPPROTO_ESP) {
216 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
217 return -EINVAL;
218 }
219 if (x->encap) {
220 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
221 return -EINVAL;
222 }
223 if (!x->aead) {
224 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
225 return -EINVAL;
226 }
227 if (x->aead->alg_icv_len != 128 &&
228 x->aead->alg_icv_len != 96) {
229 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
230 return -EINVAL;
231 }
232 if ((x->aead->alg_key_len != 128 + 32) &&
233 (x->aead->alg_key_len != 256 + 32)) {
234 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
235 return -EINVAL;
236 }
237 if (x->tfcpad) {
238 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
239 return -EINVAL;
240 }
241 if (!x->geniv) {
242 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
243 return -EINVAL;
244 }
245 if (strcmp(x->geniv, "seqiv")) {
246 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
247 return -EINVAL;
248 }
249
250 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
251 if (!sa_entry) {
252 res = -ENOMEM;
253 goto out;
254 }
255
256 sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
257 if (x->props.flags & XFRM_STATE_ESN)
258 sa_entry->esn = 1;
259 chcr_ipsec_setkey(x, sa_entry);
260 x->xso.offload_handle = (unsigned long)sa_entry;
261 try_module_get(THIS_MODULE);
262out:
263 return res;
264}
265
266static void chcr_xfrm_del_state(struct xfrm_state *x)
267{
268 /* do nothing */
269 if (!x->xso.offload_handle)
270 return;
271}
272
273static void chcr_xfrm_free_state(struct xfrm_state *x)
274{
275 struct ipsec_sa_entry *sa_entry;
276
277 if (!x->xso.offload_handle)
278 return;
279
280 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
281 kfree(sa_entry);
282 module_put(THIS_MODULE);
283}
284
285static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
286{
287 if (x->props.family == AF_INET) {
288 /* Offload with IP options is not supported yet */
289 if (ip_hdr(skb)->ihl > 5)
290 return false;
291 } else {
292 /* Offload with IPv6 extension headers is not support yet */
293 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
294 return false;
295 }
296 return true;
297}
298
299static void chcr_advance_esn_state(struct xfrm_state *x)
300{
301 /* do nothing */
302 if (!x->xso.offload_handle)
303 return;
304}
305
306static inline int is_eth_imm(const struct sk_buff *skb,
307 struct ipsec_sa_entry *sa_entry)
308{
309 unsigned int kctx_len;
310 int hdrlen;
311
312 kctx_len = sa_entry->kctx_len;
313 hdrlen = sizeof(struct fw_ulptx_wr) +
314 sizeof(struct chcr_ipsec_req) + kctx_len;
315
316 hdrlen += sizeof(struct cpl_tx_pkt);
317 if (sa_entry->esn)
318 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
319 << 4);
320 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
321 return hdrlen;
322 return 0;
323}
324
325static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
326 struct ipsec_sa_entry *sa_entry,
327 bool *immediate)
328{
329 unsigned int kctx_len;
330 unsigned int flits;
331 int aadivlen;
332 int hdrlen;
333
334 kctx_len = sa_entry->kctx_len;
335 hdrlen = is_eth_imm(skb, sa_entry);
336 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
337 16) : 0;
338 aadivlen <<= 4;
339
340 /* If the skb is small enough, we can pump it out as a work request
341 * with only immediate data. In that case we just have to have the
342 * TX Packet header plus the skb data in the Work Request.
343 */
344
345 if (hdrlen) {
346 *immediate = true;
347 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
348 }
349
350 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
351
352 /* Otherwise, we're going to have to construct a Scatter gather list
353 * of the skb body and fragments. We also include the flits necessary
354 * for the TX Packet Work Request and CPL. We always have a firmware
355 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
356 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
357 * message or, if we're doing a Large Send Offload, an LSO CPL message
358 * with an embedded TX Packet Write CPL message.
359 */
360 flits += (sizeof(struct fw_ulptx_wr) +
361 sizeof(struct chcr_ipsec_req) +
362 kctx_len +
363 sizeof(struct cpl_tx_pkt_core) +
364 aadivlen) / sizeof(__be64);
365 return flits;
366}
367
368inline void *copy_esn_pktxt(struct sk_buff *skb,
369 struct net_device *dev,
370 void *pos,
371 struct ipsec_sa_entry *sa_entry)
372{
373 struct chcr_ipsec_aadiv *aadiv;
374 struct ulptx_idata *sc_imm;
375 struct ip_esp_hdr *esphdr;
376 struct xfrm_offload *xo;
377 struct sge_eth_txq *q;
378 struct adapter *adap;
379 struct port_info *pi;
380 __be64 seqno;
381 u32 qidx;
382 u32 seqlo;
383 u8 *iv;
384 int eoq;
385 int len;
386
387 pi = netdev_priv(dev);
388 adap = pi->adapter;
389 qidx = skb->queue_mapping;
390 q = &adap->sge.ethtxq[qidx + pi->first_qset];
391
392 /* end of queue, reset pos to start of queue */
393 eoq = (void *)q->q.stat - pos;
394 if (!eoq)
395 pos = q->q.desc;
396
397 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
398 memset(pos, 0, len);
399 aadiv = (struct chcr_ipsec_aadiv *)pos;
400 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
401 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
402 xo = xfrm_offload(skb);
403
404 aadiv->spi = (esphdr->spi);
405 seqlo = ntohl(esphdr->seq_no);
406 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
407 memcpy(aadiv->seq_no, &seqno, 8);
408 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
409 memcpy(aadiv->iv, iv, 8);
410
411 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
412 sc_imm = (struct ulptx_idata *)(pos +
413 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
414 sizeof(__be64)) << 3));
415 sc_imm->cmd_more = FILL_CMD_MORE(0);
416 sc_imm->len = cpu_to_be32(skb->len);
417 }
418 pos += len;
419 return pos;
420}
421
422inline void *copy_cpltx_pktxt(struct sk_buff *skb,
423 struct net_device *dev,
424 void *pos,
425 struct ipsec_sa_entry *sa_entry)
426{
427 struct cpl_tx_pkt_core *cpl;
428 struct sge_eth_txq *q;
429 struct adapter *adap;
430 struct port_info *pi;
431 u32 ctrl0, qidx;
432 u64 cntrl = 0;
433 int left;
434
435 pi = netdev_priv(dev);
436 adap = pi->adapter;
437 qidx = skb->queue_mapping;
438 q = &adap->sge.ethtxq[qidx + pi->first_qset];
439
440 left = (void *)q->q.stat - pos;
441 if (!left)
442 pos = q->q.desc;
443
444 cpl = (struct cpl_tx_pkt_core *)pos;
445
446 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
447 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
448 TXPKT_PF_V(adap->pf);
449 if (skb_vlan_tag_present(skb)) {
450 q->vlan_ins++;
451 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
452 }
453
454 cpl->ctrl0 = htonl(ctrl0);
455 cpl->pack = htons(0);
456 cpl->len = htons(skb->len);
457 cpl->ctrl1 = cpu_to_be64(cntrl);
458
459 pos += sizeof(struct cpl_tx_pkt_core);
460 /* Copy ESN info for HW */
461 if (sa_entry->esn)
462 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
463 return pos;
464}
465
466inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
467 struct net_device *dev,
468 void *pos,
469 struct ipsec_sa_entry *sa_entry)
470{
471 struct _key_ctx *key_ctx;
472 int left, eoq, key_len;
473 struct sge_eth_txq *q;
474 struct adapter *adap;
475 struct port_info *pi;
476 unsigned int qidx;
477
478 pi = netdev_priv(dev);
479 adap = pi->adapter;
480 qidx = skb->queue_mapping;
481 q = &adap->sge.ethtxq[qidx + pi->first_qset];
482 key_len = sa_entry->kctx_len;
483
484 /* end of queue, reset pos to start of queue */
485 eoq = (void *)q->q.stat - pos;
486 left = eoq;
487 if (!eoq) {
488 pos = q->q.desc;
489 left = 64 * q->q.size;
490 }
491
492 /* Copy the Key context header */
493 key_ctx = (struct _key_ctx *)pos;
494 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
495 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
496 pos += sizeof(struct _key_ctx);
497 left -= sizeof(struct _key_ctx);
498
499 if (likely(key_len <= left)) {
500 memcpy(key_ctx->key, sa_entry->key, key_len);
501 pos += key_len;
502 } else {
503 memcpy(pos, sa_entry->key, left);
504 memcpy(q->q.desc, sa_entry->key + left,
505 key_len - left);
506 pos = (u8 *)q->q.desc + (key_len - left);
507 }
508 /* Copy CPL TX PKT XT */
509 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
510
511 return pos;
512}
513
514inline void *chcr_crypto_wreq(struct sk_buff *skb,
515 struct net_device *dev,
516 void *pos,
517 int credits,
518 struct ipsec_sa_entry *sa_entry)
519{
520 struct port_info *pi = netdev_priv(dev);
521 struct adapter *adap = pi->adapter;
522 unsigned int ivsize = GCM_ESP_IV_SIZE;
523 struct chcr_ipsec_wr *wr;
524 bool immediate = false;
525 u16 immdatalen = 0;
526 unsigned int flits;
527 u32 ivinoffset;
528 u32 aadstart;
529 u32 aadstop;
530 u32 ciphstart;
531 u16 sc_more = 0;
532 u32 ivdrop = 0;
533 u32 esnlen = 0;
534 u32 wr_mid;
535 u16 ndesc;
536 int qidx = skb_get_queue_mapping(skb);
537 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
538 unsigned int kctx_len = sa_entry->kctx_len;
539 int qid = q->q.cntxt_id;
540
541 atomic_inc(&adap->chcr_stats.ipsec_cnt);
542
543 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
544 ndesc = DIV_ROUND_UP(flits, 2);
545 if (sa_entry->esn)
546 ivdrop = 1;
547
548 if (immediate)
549 immdatalen = skb->len;
550
551 if (sa_entry->esn) {
552 esnlen = sizeof(struct chcr_ipsec_aadiv);
553 if (!skb_is_nonlinear(skb))
554 sc_more = 1;
555 }
556
557 /* WR Header */
558 wr = (struct chcr_ipsec_wr *)pos;
559 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
560 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
561
562 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
563 netif_tx_stop_queue(q->txq);
564 q->q.stops++;
565 if (!q->dbqt)
566 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
567 }
568 wr_mid |= FW_ULPTX_WR_DATA_F;
569 wr->wreq.flowid_len16 = htonl(wr_mid);
570
571 /* ULPTX */
572 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
573 wr->req.ulptx.len = htonl(ndesc - 1);
574
575 /* Sub-command */
576 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
577 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
578 sizeof(wr->req.key_ctx) +
579 kctx_len +
580 sizeof(struct cpl_tx_pkt_core) +
581 esnlen +
582 (esnlen ? 0 : immdatalen));
583
584 /* CPL_SEC_PDU */
585 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
586 (skb_transport_offset(skb) +
587 sizeof(struct ip_esp_hdr) + 1);
588 wr->req.sec_cpl.op_ivinsrtofst = htonl(
589 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
590 CPL_TX_SEC_PDU_CPLLEN_V(2) |
591 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
592 CPL_TX_SEC_PDU_IVINSRTOFST_V(
593 ivinoffset));
594
595 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
596 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
597 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
598 (skb_transport_offset(skb) +
599 sizeof(struct ip_esp_hdr));
600 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
601 GCM_ESP_IV_SIZE + 1;
602 ciphstart += sa_entry->esn ? esnlen : 0;
603
604 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
605 aadstart,
606 aadstop,
607 ciphstart, 0);
608
609 wr->req.sec_cpl.cipherstop_lo_authinsert =
610 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
611 sa_entry->authsize,
612 sa_entry->authsize);
613 wr->req.sec_cpl.seqno_numivs =
614 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
615 CHCR_SCMD_CIPHER_MODE_AES_GCM,
616 CHCR_SCMD_AUTH_MODE_GHASH,
617 sa_entry->hmac_ctrl,
618 ivsize >> 1);
619 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
620 0, ivdrop, 0);
621
622 pos += sizeof(struct fw_ulptx_wr) +
623 sizeof(struct ulp_txpkt) +
624 sizeof(struct ulptx_idata) +
625 sizeof(struct cpl_tx_sec_pdu);
626
627 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
628
629 return pos;
630}
631
632/**
633 * flits_to_desc - returns the num of Tx descriptors for the given flits
634 * @n: the number of flits
635 *
636 * Returns the number of Tx descriptors needed for the supplied number
637 * of flits.
638 */
639static inline unsigned int flits_to_desc(unsigned int n)
640{
641 WARN_ON(n > SGE_MAX_WR_LEN / 8);
642 return DIV_ROUND_UP(n, 8);
643}
644
645static inline unsigned int txq_avail(const struct sge_txq *q)
646{
647 return q->size - 1 - q->in_use;
648}
649
650static void eth_txq_stop(struct sge_eth_txq *q)
651{
652 netif_tx_stop_queue(q->txq);
653 q->q.stops++;
654}
655
656static inline void txq_advance(struct sge_txq *q, unsigned int n)
657{
658 q->in_use += n;
659 q->pidx += n;
660 if (q->pidx >= q->size)
661 q->pidx -= q->size;
662}
663
664/*
665 * chcr_ipsec_xmit called from ULD Tx handler
666 */
667int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
668{
669 struct xfrm_state *x = xfrm_input_state(skb);
670 unsigned int last_desc, ndesc, flits = 0;
671 struct ipsec_sa_entry *sa_entry;
672 u64 *pos, *end, *before, *sgl;
673 struct tx_sw_desc *sgl_sdesc;
674 int qidx, left, credits;
675 bool immediate = false;
676 struct sge_eth_txq *q;
677 struct adapter *adap;
678 struct port_info *pi;
679 struct sec_path *sp;
680
681 if (!x->xso.offload_handle)
682 return NETDEV_TX_BUSY;
683
684 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
685
686 sp = skb_sec_path(skb);
687 if (sp->len != 1) {
688out_free: dev_kfree_skb_any(skb);
689 return NETDEV_TX_OK;
690 }
691
692 pi = netdev_priv(dev);
693 adap = pi->adapter;
694 qidx = skb->queue_mapping;
695 q = &adap->sge.ethtxq[qidx + pi->first_qset];
696
697 cxgb4_reclaim_completed_tx(adap, &q->q, true);
698
699 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
700 ndesc = flits_to_desc(flits);
701 credits = txq_avail(&q->q) - ndesc;
702
703 if (unlikely(credits < 0)) {
704 eth_txq_stop(q);
705 dev_err(adap->pdev_dev,
706 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
707 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
708 flits);
709 return NETDEV_TX_BUSY;
710 }
711
712 last_desc = q->q.pidx + ndesc - 1;
713 if (last_desc >= q->q.size)
714 last_desc -= q->q.size;
715 sgl_sdesc = &q->q.sdesc[last_desc];
716
717 if (!immediate &&
718 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
719 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
720 q->mapping_err++;
721 goto out_free;
722 }
723
724 pos = (u64 *)&q->q.desc[q->q.pidx];
725 before = (u64 *)pos;
726 end = (u64 *)pos + flits;
727 /* Setup IPSec CPL */
728 pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
729 credits, sa_entry);
730 if (before > (u64 *)pos) {
731 left = (u8 *)end - (u8 *)q->q.stat;
732 end = (void *)q->q.desc + left;
733 }
734 if (pos == (u64 *)q->q.stat) {
735 left = (u8 *)end - (u8 *)q->q.stat;
736 end = (void *)q->q.desc + left;
737 pos = (void *)q->q.desc;
738 }
739
740 sgl = (void *)pos;
741 if (immediate) {
742 cxgb4_inline_tx_skb(skb, &q->q, sgl);
743 dev_consume_skb_any(skb);
744 } else {
745 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
746 0, sgl_sdesc->addr);
747 skb_orphan(skb);
748 sgl_sdesc->skb = skb;
749 }
750 txq_advance(&q->q, ndesc);
751
752 cxgb4_ring_tx_db(adap, &q->q, ndesc);
753 return NETDEV_TX_OK;
754}
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
36 */
37
38#define pr_fmt(fmt) "chcr:" fmt
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/crypto.h>
43#include <linux/cryptohash.h>
44#include <linux/skbuff.h>
45#include <linux/rtnetlink.h>
46#include <linux/highmem.h>
47#include <linux/if_vlan.h>
48#include <linux/ip.h>
49#include <linux/netdevice.h>
50#include <net/esp.h>
51#include <net/xfrm.h>
52#include <crypto/aes.h>
53#include <crypto/algapi.h>
54#include <crypto/hash.h>
55#include <crypto/sha.h>
56#include <crypto/authenc.h>
57#include <crypto/internal/aead.h>
58#include <crypto/null.h>
59#include <crypto/internal/skcipher.h>
60#include <crypto/aead.h>
61#include <crypto/scatterwalk.h>
62#include <crypto/internal/hash.h>
63
64#include "chcr_core.h"
65#include "chcr_algo.h"
66#include "chcr_crypto.h"
67
68/*
69 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
70 * into a WR.
71 */
72#define MAX_IMM_TX_PKT_LEN 256
73#define GCM_ESP_IV_SIZE 8
74
75static int chcr_xfrm_add_state(struct xfrm_state *x);
76static void chcr_xfrm_del_state(struct xfrm_state *x);
77static void chcr_xfrm_free_state(struct xfrm_state *x);
78static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
79static void chcr_advance_esn_state(struct xfrm_state *x);
80
81static const struct xfrmdev_ops chcr_xfrmdev_ops = {
82 .xdo_dev_state_add = chcr_xfrm_add_state,
83 .xdo_dev_state_delete = chcr_xfrm_del_state,
84 .xdo_dev_state_free = chcr_xfrm_free_state,
85 .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
86 .xdo_dev_state_advance_esn = chcr_advance_esn_state,
87};
88
89/* Add offload xfrms to Chelsio Interface */
90void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
91{
92 struct net_device *netdev = NULL;
93 int i;
94
95 for (i = 0; i < lld->nports; i++) {
96 netdev = lld->ports[i];
97 if (!netdev)
98 continue;
99 netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
100 netdev->hw_enc_features |= NETIF_F_HW_ESP;
101 netdev->features |= NETIF_F_HW_ESP;
102 rtnl_lock();
103 netdev_change_features(netdev);
104 rtnl_unlock();
105 }
106}
107
108static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
109 struct ipsec_sa_entry *sa_entry)
110{
111 int hmac_ctrl;
112 int authsize = x->aead->alg_icv_len / 8;
113
114 sa_entry->authsize = authsize;
115
116 switch (authsize) {
117 case ICV_8:
118 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
119 break;
120 case ICV_12:
121 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
122 break;
123 case ICV_16:
124 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
125 break;
126 default:
127 return -EINVAL;
128 }
129 return hmac_ctrl;
130}
131
132static inline int chcr_ipsec_setkey(struct xfrm_state *x,
133 struct ipsec_sa_entry *sa_entry)
134{
135 int keylen = (x->aead->alg_key_len + 7) / 8;
136 unsigned char *key = x->aead->alg_key;
137 int ck_size, key_ctx_size = 0;
138 unsigned char ghash_h[AEAD_H_SIZE];
139 struct crypto_aes_ctx aes;
140 int ret = 0;
141
142 if (keylen > 3) {
143 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
144 memcpy(sa_entry->salt, key + keylen, 4);
145 }
146
147 if (keylen == AES_KEYSIZE_128) {
148 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
149 } else if (keylen == AES_KEYSIZE_192) {
150 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
151 } else if (keylen == AES_KEYSIZE_256) {
152 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
153 } else {
154 pr_err("GCM: Invalid key length %d\n", keylen);
155 ret = -EINVAL;
156 goto out;
157 }
158
159 memcpy(sa_entry->key, key, keylen);
160 sa_entry->enckey_len = keylen;
161 key_ctx_size = sizeof(struct _key_ctx) +
162 ((DIV_ROUND_UP(keylen, 16)) << 4) +
163 AEAD_H_SIZE;
164
165 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
166 CHCR_KEYCTX_MAC_KEY_SIZE_128,
167 0, 0,
168 key_ctx_size >> 4);
169
170 /* Calculate the H = CIPH(K, 0 repeated 16 times).
171 * It will go in key context
172 */
173 ret = aes_expandkey(&aes, key, keylen);
174 if (ret) {
175 sa_entry->enckey_len = 0;
176 goto out;
177 }
178 memset(ghash_h, 0, AEAD_H_SIZE);
179 aes_encrypt(&aes, ghash_h, ghash_h);
180 memzero_explicit(&aes, sizeof(aes));
181
182 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
183 16), ghash_h, AEAD_H_SIZE);
184 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
185 AEAD_H_SIZE;
186out:
187 return ret;
188}
189
190/*
191 * chcr_xfrm_add_state
192 * returns 0 on success, negative error if failed to send message to FPGA
193 * positive error if FPGA returned a bad response
194 */
195static int chcr_xfrm_add_state(struct xfrm_state *x)
196{
197 struct ipsec_sa_entry *sa_entry;
198 int res = 0;
199
200 if (x->props.aalgo != SADB_AALG_NONE) {
201 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
202 return -EINVAL;
203 }
204 if (x->props.calgo != SADB_X_CALG_NONE) {
205 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
206 return -EINVAL;
207 }
208 if (x->props.family != AF_INET &&
209 x->props.family != AF_INET6) {
210 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
211 return -EINVAL;
212 }
213 if (x->props.mode != XFRM_MODE_TRANSPORT &&
214 x->props.mode != XFRM_MODE_TUNNEL) {
215 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
216 return -EINVAL;
217 }
218 if (x->id.proto != IPPROTO_ESP) {
219 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
220 return -EINVAL;
221 }
222 if (x->encap) {
223 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
224 return -EINVAL;
225 }
226 if (!x->aead) {
227 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
228 return -EINVAL;
229 }
230 if (x->aead->alg_icv_len != 128 &&
231 x->aead->alg_icv_len != 96) {
232 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
233 return -EINVAL;
234 }
235 if ((x->aead->alg_key_len != 128 + 32) &&
236 (x->aead->alg_key_len != 256 + 32)) {
237 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
238 return -EINVAL;
239 }
240 if (x->tfcpad) {
241 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
242 return -EINVAL;
243 }
244 if (!x->geniv) {
245 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
246 return -EINVAL;
247 }
248 if (strcmp(x->geniv, "seqiv")) {
249 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
250 return -EINVAL;
251 }
252
253 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
254 if (!sa_entry) {
255 res = -ENOMEM;
256 goto out;
257 }
258
259 sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
260 if (x->props.flags & XFRM_STATE_ESN)
261 sa_entry->esn = 1;
262 chcr_ipsec_setkey(x, sa_entry);
263 x->xso.offload_handle = (unsigned long)sa_entry;
264 try_module_get(THIS_MODULE);
265out:
266 return res;
267}
268
269static void chcr_xfrm_del_state(struct xfrm_state *x)
270{
271 /* do nothing */
272 if (!x->xso.offload_handle)
273 return;
274}
275
276static void chcr_xfrm_free_state(struct xfrm_state *x)
277{
278 struct ipsec_sa_entry *sa_entry;
279
280 if (!x->xso.offload_handle)
281 return;
282
283 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
284 kfree(sa_entry);
285 module_put(THIS_MODULE);
286}
287
288static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
289{
290 if (x->props.family == AF_INET) {
291 /* Offload with IP options is not supported yet */
292 if (ip_hdr(skb)->ihl > 5)
293 return false;
294 } else {
295 /* Offload with IPv6 extension headers is not support yet */
296 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
297 return false;
298 }
299 /* Inline single pdu */
300 if (skb_shinfo(skb)->gso_size)
301 return false;
302 return true;
303}
304
305static void chcr_advance_esn_state(struct xfrm_state *x)
306{
307 /* do nothing */
308 if (!x->xso.offload_handle)
309 return;
310}
311
312static inline int is_eth_imm(const struct sk_buff *skb,
313 struct ipsec_sa_entry *sa_entry)
314{
315 unsigned int kctx_len;
316 int hdrlen;
317
318 kctx_len = sa_entry->kctx_len;
319 hdrlen = sizeof(struct fw_ulptx_wr) +
320 sizeof(struct chcr_ipsec_req) + kctx_len;
321
322 hdrlen += sizeof(struct cpl_tx_pkt);
323 if (sa_entry->esn)
324 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
325 << 4);
326 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
327 return hdrlen;
328 return 0;
329}
330
331static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
332 struct ipsec_sa_entry *sa_entry,
333 bool *immediate)
334{
335 unsigned int kctx_len;
336 unsigned int flits;
337 int aadivlen;
338 int hdrlen;
339
340 kctx_len = sa_entry->kctx_len;
341 hdrlen = is_eth_imm(skb, sa_entry);
342 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
343 16) : 0;
344 aadivlen <<= 4;
345
346 /* If the skb is small enough, we can pump it out as a work request
347 * with only immediate data. In that case we just have to have the
348 * TX Packet header plus the skb data in the Work Request.
349 */
350
351 if (hdrlen) {
352 *immediate = true;
353 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
354 }
355
356 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
357
358 /* Otherwise, we're going to have to construct a Scatter gather list
359 * of the skb body and fragments. We also include the flits necessary
360 * for the TX Packet Work Request and CPL. We always have a firmware
361 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
362 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
363 * message or, if we're doing a Large Send Offload, an LSO CPL message
364 * with an embedded TX Packet Write CPL message.
365 */
366 flits += (sizeof(struct fw_ulptx_wr) +
367 sizeof(struct chcr_ipsec_req) +
368 kctx_len +
369 sizeof(struct cpl_tx_pkt_core) +
370 aadivlen) / sizeof(__be64);
371 return flits;
372}
373
374inline void *copy_esn_pktxt(struct sk_buff *skb,
375 struct net_device *dev,
376 void *pos,
377 struct ipsec_sa_entry *sa_entry)
378{
379 struct chcr_ipsec_aadiv *aadiv;
380 struct ulptx_idata *sc_imm;
381 struct ip_esp_hdr *esphdr;
382 struct xfrm_offload *xo;
383 struct sge_eth_txq *q;
384 struct adapter *adap;
385 struct port_info *pi;
386 __be64 seqno;
387 u32 qidx;
388 u32 seqlo;
389 u8 *iv;
390 int eoq;
391 int len;
392
393 pi = netdev_priv(dev);
394 adap = pi->adapter;
395 qidx = skb->queue_mapping;
396 q = &adap->sge.ethtxq[qidx + pi->first_qset];
397
398 /* end of queue, reset pos to start of queue */
399 eoq = (void *)q->q.stat - pos;
400 if (!eoq)
401 pos = q->q.desc;
402
403 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
404 memset(pos, 0, len);
405 aadiv = (struct chcr_ipsec_aadiv *)pos;
406 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
407 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
408 xo = xfrm_offload(skb);
409
410 aadiv->spi = (esphdr->spi);
411 seqlo = htonl(esphdr->seq_no);
412 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
413 memcpy(aadiv->seq_no, &seqno, 8);
414 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
415 memcpy(aadiv->iv, iv, 8);
416
417 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
418 sc_imm = (struct ulptx_idata *)(pos +
419 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
420 sizeof(__be64)) << 3));
421 sc_imm->cmd_more = FILL_CMD_MORE(0);
422 sc_imm->len = cpu_to_be32(skb->len);
423 }
424 pos += len;
425 return pos;
426}
427
428inline void *copy_cpltx_pktxt(struct sk_buff *skb,
429 struct net_device *dev,
430 void *pos,
431 struct ipsec_sa_entry *sa_entry)
432{
433 struct cpl_tx_pkt_core *cpl;
434 struct sge_eth_txq *q;
435 struct adapter *adap;
436 struct port_info *pi;
437 u32 ctrl0, qidx;
438 u64 cntrl = 0;
439 int left;
440
441 pi = netdev_priv(dev);
442 adap = pi->adapter;
443 qidx = skb->queue_mapping;
444 q = &adap->sge.ethtxq[qidx + pi->first_qset];
445
446 left = (void *)q->q.stat - pos;
447 if (!left)
448 pos = q->q.desc;
449
450 cpl = (struct cpl_tx_pkt_core *)pos;
451
452 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
453 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
454 TXPKT_PF_V(adap->pf);
455 if (skb_vlan_tag_present(skb)) {
456 q->vlan_ins++;
457 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
458 }
459
460 cpl->ctrl0 = htonl(ctrl0);
461 cpl->pack = htons(0);
462 cpl->len = htons(skb->len);
463 cpl->ctrl1 = cpu_to_be64(cntrl);
464
465 pos += sizeof(struct cpl_tx_pkt_core);
466 /* Copy ESN info for HW */
467 if (sa_entry->esn)
468 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
469 return pos;
470}
471
472inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
473 struct net_device *dev,
474 void *pos,
475 struct ipsec_sa_entry *sa_entry)
476{
477 struct _key_ctx *key_ctx;
478 int left, eoq, key_len;
479 struct sge_eth_txq *q;
480 struct adapter *adap;
481 struct port_info *pi;
482 unsigned int qidx;
483
484 pi = netdev_priv(dev);
485 adap = pi->adapter;
486 qidx = skb->queue_mapping;
487 q = &adap->sge.ethtxq[qidx + pi->first_qset];
488 key_len = sa_entry->kctx_len;
489
490 /* end of queue, reset pos to start of queue */
491 eoq = (void *)q->q.stat - pos;
492 left = eoq;
493 if (!eoq) {
494 pos = q->q.desc;
495 left = 64 * q->q.size;
496 }
497
498 /* Copy the Key context header */
499 key_ctx = (struct _key_ctx *)pos;
500 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
501 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
502 pos += sizeof(struct _key_ctx);
503 left -= sizeof(struct _key_ctx);
504
505 if (likely(key_len <= left)) {
506 memcpy(key_ctx->key, sa_entry->key, key_len);
507 pos += key_len;
508 } else {
509 memcpy(pos, sa_entry->key, left);
510 memcpy(q->q.desc, sa_entry->key + left,
511 key_len - left);
512 pos = (u8 *)q->q.desc + (key_len - left);
513 }
514 /* Copy CPL TX PKT XT */
515 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
516
517 return pos;
518}
519
520inline void *chcr_crypto_wreq(struct sk_buff *skb,
521 struct net_device *dev,
522 void *pos,
523 int credits,
524 struct ipsec_sa_entry *sa_entry)
525{
526 struct port_info *pi = netdev_priv(dev);
527 struct adapter *adap = pi->adapter;
528 unsigned int ivsize = GCM_ESP_IV_SIZE;
529 struct chcr_ipsec_wr *wr;
530 bool immediate = false;
531 u16 immdatalen = 0;
532 unsigned int flits;
533 u32 ivinoffset;
534 u32 aadstart;
535 u32 aadstop;
536 u32 ciphstart;
537 u16 sc_more = 0;
538 u32 ivdrop = 0;
539 u32 esnlen = 0;
540 u32 wr_mid;
541 u16 ndesc;
542 int qidx = skb_get_queue_mapping(skb);
543 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
544 unsigned int kctx_len = sa_entry->kctx_len;
545 int qid = q->q.cntxt_id;
546
547 atomic_inc(&adap->chcr_stats.ipsec_cnt);
548
549 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
550 ndesc = DIV_ROUND_UP(flits, 2);
551 if (sa_entry->esn)
552 ivdrop = 1;
553
554 if (immediate)
555 immdatalen = skb->len;
556
557 if (sa_entry->esn) {
558 esnlen = sizeof(struct chcr_ipsec_aadiv);
559 if (!skb_is_nonlinear(skb))
560 sc_more = 1;
561 }
562
563 /* WR Header */
564 wr = (struct chcr_ipsec_wr *)pos;
565 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
566 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
567
568 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
569 netif_tx_stop_queue(q->txq);
570 q->q.stops++;
571 if (!q->dbqt)
572 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
573 }
574 wr_mid |= FW_ULPTX_WR_DATA_F;
575 wr->wreq.flowid_len16 = htonl(wr_mid);
576
577 /* ULPTX */
578 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
579 wr->req.ulptx.len = htonl(ndesc - 1);
580
581 /* Sub-command */
582 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
583 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
584 sizeof(wr->req.key_ctx) +
585 kctx_len +
586 sizeof(struct cpl_tx_pkt_core) +
587 esnlen +
588 (esnlen ? 0 : immdatalen));
589
590 /* CPL_SEC_PDU */
591 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
592 (skb_transport_offset(skb) +
593 sizeof(struct ip_esp_hdr) + 1);
594 wr->req.sec_cpl.op_ivinsrtofst = htonl(
595 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
596 CPL_TX_SEC_PDU_CPLLEN_V(2) |
597 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
598 CPL_TX_SEC_PDU_IVINSRTOFST_V(
599 ivinoffset));
600
601 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
602 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
603 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
604 (skb_transport_offset(skb) +
605 sizeof(struct ip_esp_hdr));
606 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
607 GCM_ESP_IV_SIZE + 1;
608 ciphstart += sa_entry->esn ? esnlen : 0;
609
610 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
611 aadstart,
612 aadstop,
613 ciphstart, 0);
614
615 wr->req.sec_cpl.cipherstop_lo_authinsert =
616 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
617 sa_entry->authsize,
618 sa_entry->authsize);
619 wr->req.sec_cpl.seqno_numivs =
620 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
621 CHCR_SCMD_CIPHER_MODE_AES_GCM,
622 CHCR_SCMD_AUTH_MODE_GHASH,
623 sa_entry->hmac_ctrl,
624 ivsize >> 1);
625 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
626 0, ivdrop, 0);
627
628 pos += sizeof(struct fw_ulptx_wr) +
629 sizeof(struct ulp_txpkt) +
630 sizeof(struct ulptx_idata) +
631 sizeof(struct cpl_tx_sec_pdu);
632
633 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
634
635 return pos;
636}
637
638/**
639 * flits_to_desc - returns the num of Tx descriptors for the given flits
640 * @n: the number of flits
641 *
642 * Returns the number of Tx descriptors needed for the supplied number
643 * of flits.
644 */
645static inline unsigned int flits_to_desc(unsigned int n)
646{
647 WARN_ON(n > SGE_MAX_WR_LEN / 8);
648 return DIV_ROUND_UP(n, 8);
649}
650
651static inline unsigned int txq_avail(const struct sge_txq *q)
652{
653 return q->size - 1 - q->in_use;
654}
655
656static void eth_txq_stop(struct sge_eth_txq *q)
657{
658 netif_tx_stop_queue(q->txq);
659 q->q.stops++;
660}
661
662static inline void txq_advance(struct sge_txq *q, unsigned int n)
663{
664 q->in_use += n;
665 q->pidx += n;
666 if (q->pidx >= q->size)
667 q->pidx -= q->size;
668}
669
670/*
671 * chcr_ipsec_xmit called from ULD Tx handler
672 */
673int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
674{
675 struct xfrm_state *x = xfrm_input_state(skb);
676 struct ipsec_sa_entry *sa_entry;
677 u64 *pos, *end, *before, *sgl;
678 int qidx, left, credits;
679 unsigned int flits = 0, ndesc;
680 struct adapter *adap;
681 struct sge_eth_txq *q;
682 struct port_info *pi;
683 dma_addr_t addr[MAX_SKB_FRAGS + 1];
684 struct sec_path *sp;
685 bool immediate = false;
686
687 if (!x->xso.offload_handle)
688 return NETDEV_TX_BUSY;
689
690 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
691
692 sp = skb_sec_path(skb);
693 if (sp->len != 1) {
694out_free: dev_kfree_skb_any(skb);
695 return NETDEV_TX_OK;
696 }
697
698 pi = netdev_priv(dev);
699 adap = pi->adapter;
700 qidx = skb->queue_mapping;
701 q = &adap->sge.ethtxq[qidx + pi->first_qset];
702
703 cxgb4_reclaim_completed_tx(adap, &q->q, true);
704
705 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
706 ndesc = flits_to_desc(flits);
707 credits = txq_avail(&q->q) - ndesc;
708
709 if (unlikely(credits < 0)) {
710 eth_txq_stop(q);
711 dev_err(adap->pdev_dev,
712 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
713 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
714 flits);
715 return NETDEV_TX_BUSY;
716 }
717
718 if (!immediate &&
719 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
720 q->mapping_err++;
721 goto out_free;
722 }
723
724 pos = (u64 *)&q->q.desc[q->q.pidx];
725 before = (u64 *)pos;
726 end = (u64 *)pos + flits;
727 /* Setup IPSec CPL */
728 pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
729 credits, sa_entry);
730 if (before > (u64 *)pos) {
731 left = (u8 *)end - (u8 *)q->q.stat;
732 end = (void *)q->q.desc + left;
733 }
734 if (pos == (u64 *)q->q.stat) {
735 left = (u8 *)end - (u8 *)q->q.stat;
736 end = (void *)q->q.desc + left;
737 pos = (void *)q->q.desc;
738 }
739
740 sgl = (void *)pos;
741 if (immediate) {
742 cxgb4_inline_tx_skb(skb, &q->q, sgl);
743 dev_consume_skb_any(skb);
744 } else {
745 int last_desc;
746
747 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
748 0, addr);
749 skb_orphan(skb);
750
751 last_desc = q->q.pidx + ndesc - 1;
752 if (last_desc >= q->q.size)
753 last_desc -= q->q.size;
754 q->q.sdesc[last_desc].skb = skb;
755 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
756 }
757 txq_advance(&q->q, ndesc);
758
759 cxgb4_ring_tx_db(adap, &q->q, ndesc);
760 return NETDEV_TX_OK;
761}