Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#include <uapi/linux/bpf.h>
5
6#include <linux/debugfs.h>
7#include <linux/inetdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
10#include <linux/filter.h>
11#include <linux/mm.h>
12#include <linux/pci.h>
13
14#include <net/checksum.h>
15#include <net/ip6_checksum.h>
16#include <net/page_pool/helpers.h>
17#include <net/xdp.h>
18
19#include <net/mana/mana.h>
20#include <net/mana/mana_auxiliary.h>
21
22static DEFINE_IDA(mana_adev_ida);
23
24static int mana_adev_idx_alloc(void)
25{
26 return ida_alloc(&mana_adev_ida, GFP_KERNEL);
27}
28
29static void mana_adev_idx_free(int idx)
30{
31 ida_free(&mana_adev_ida, idx);
32}
33
34static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
35 loff_t *pos)
36{
37 struct gdma_queue *gdma_q = filp->private_data;
38
39 return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
40 gdma_q->queue_size);
41}
42
43static const struct file_operations mana_dbg_q_fops = {
44 .owner = THIS_MODULE,
45 .open = simple_open,
46 .read = mana_dbg_q_read,
47};
48
49/* Microsoft Azure Network Adapter (MANA) functions */
50
51static int mana_open(struct net_device *ndev)
52{
53 struct mana_port_context *apc = netdev_priv(ndev);
54 int err;
55
56 err = mana_alloc_queues(ndev);
57 if (err)
58 return err;
59
60 apc->port_is_up = true;
61
62 /* Ensure port state updated before txq state */
63 smp_wmb();
64
65 netif_carrier_on(ndev);
66 netif_tx_wake_all_queues(ndev);
67
68 return 0;
69}
70
71static int mana_close(struct net_device *ndev)
72{
73 struct mana_port_context *apc = netdev_priv(ndev);
74
75 if (!apc->port_is_up)
76 return 0;
77
78 return mana_detach(ndev, true);
79}
80
81static bool mana_can_tx(struct gdma_queue *wq)
82{
83 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
84}
85
86static unsigned int mana_checksum_info(struct sk_buff *skb)
87{
88 if (skb->protocol == htons(ETH_P_IP)) {
89 struct iphdr *ip = ip_hdr(skb);
90
91 if (ip->protocol == IPPROTO_TCP)
92 return IPPROTO_TCP;
93
94 if (ip->protocol == IPPROTO_UDP)
95 return IPPROTO_UDP;
96 } else if (skb->protocol == htons(ETH_P_IPV6)) {
97 struct ipv6hdr *ip6 = ipv6_hdr(skb);
98
99 if (ip6->nexthdr == IPPROTO_TCP)
100 return IPPROTO_TCP;
101
102 if (ip6->nexthdr == IPPROTO_UDP)
103 return IPPROTO_UDP;
104 }
105
106 /* No csum offloading */
107 return 0;
108}
109
110static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
111 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
112{
113 ash->dma_handle[sg_i] = da;
114 ash->size[sg_i] = sge_len;
115
116 tp->wqe_req.sgl[sg_i].address = da;
117 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
118 tp->wqe_req.sgl[sg_i].size = sge_len;
119}
120
121static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
122 struct mana_tx_package *tp, int gso_hs)
123{
124 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
125 int hsg = 1; /* num of SGEs of linear part */
126 struct gdma_dev *gd = apc->ac->gdma_dev;
127 int skb_hlen = skb_headlen(skb);
128 int sge0_len, sge1_len = 0;
129 struct gdma_context *gc;
130 struct device *dev;
131 skb_frag_t *frag;
132 dma_addr_t da;
133 int sg_i;
134 int i;
135
136 gc = gd->gdma_context;
137 dev = gc->dev;
138
139 if (gso_hs && gso_hs < skb_hlen) {
140 sge0_len = gso_hs;
141 sge1_len = skb_hlen - gso_hs;
142 } else {
143 sge0_len = skb_hlen;
144 }
145
146 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
147 if (dma_mapping_error(dev, da))
148 return -ENOMEM;
149
150 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
151
152 if (sge1_len) {
153 sg_i = 1;
154 da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
155 DMA_TO_DEVICE);
156 if (dma_mapping_error(dev, da))
157 goto frag_err;
158
159 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
160 hsg = 2;
161 }
162
163 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
164 sg_i = hsg + i;
165
166 frag = &skb_shinfo(skb)->frags[i];
167 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
168 DMA_TO_DEVICE);
169 if (dma_mapping_error(dev, da))
170 goto frag_err;
171
172 mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
173 gd->gpa_mkey);
174 }
175
176 return 0;
177
178frag_err:
179 for (i = sg_i - 1; i >= hsg; i--)
180 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
181 DMA_TO_DEVICE);
182
183 for (i = hsg - 1; i >= 0; i--)
184 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
185 DMA_TO_DEVICE);
186
187 return -ENOMEM;
188}
189
190/* Handle the case when GSO SKB linear length is too large.
191 * MANA NIC requires GSO packets to put only the packet header to SGE0.
192 * So, we need 2 SGEs for the skb linear part which contains more than the
193 * header.
194 * Return a positive value for the number of SGEs, or a negative value
195 * for an error.
196 */
197static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
198 int gso_hs)
199{
200 int num_sge = 1 + skb_shinfo(skb)->nr_frags;
201 int skb_hlen = skb_headlen(skb);
202
203 if (gso_hs < skb_hlen) {
204 num_sge++;
205 } else if (gso_hs > skb_hlen) {
206 if (net_ratelimit())
207 netdev_err(ndev,
208 "TX nonlinear head: hs:%d, skb_hlen:%d\n",
209 gso_hs, skb_hlen);
210
211 return -EINVAL;
212 }
213
214 return num_sge;
215}
216
217/* Get the GSO packet's header size */
218static int mana_get_gso_hs(struct sk_buff *skb)
219{
220 int gso_hs;
221
222 if (skb->encapsulation) {
223 gso_hs = skb_inner_tcp_all_headers(skb);
224 } else {
225 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
226 gso_hs = skb_transport_offset(skb) +
227 sizeof(struct udphdr);
228 } else {
229 gso_hs = skb_tcp_all_headers(skb);
230 }
231 }
232
233 return gso_hs;
234}
235
236netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
237{
238 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
239 struct mana_port_context *apc = netdev_priv(ndev);
240 int gso_hs = 0; /* zero for non-GSO pkts */
241 u16 txq_idx = skb_get_queue_mapping(skb);
242 struct gdma_dev *gd = apc->ac->gdma_dev;
243 bool ipv4 = false, ipv6 = false;
244 struct mana_tx_package pkg = {};
245 struct netdev_queue *net_txq;
246 struct mana_stats_tx *tx_stats;
247 struct gdma_queue *gdma_sq;
248 unsigned int csum_type;
249 struct mana_txq *txq;
250 struct mana_cq *cq;
251 int err, len;
252
253 if (unlikely(!apc->port_is_up))
254 goto tx_drop;
255
256 if (skb_cow_head(skb, MANA_HEADROOM))
257 goto tx_drop_count;
258
259 txq = &apc->tx_qp[txq_idx].txq;
260 gdma_sq = txq->gdma_sq;
261 cq = &apc->tx_qp[txq_idx].tx_cq;
262 tx_stats = &txq->stats;
263
264 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
265 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
266
267 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
268 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
269 pkt_fmt = MANA_LONG_PKT_FMT;
270 } else {
271 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
272 }
273
274 if (skb_vlan_tag_present(skb)) {
275 pkt_fmt = MANA_LONG_PKT_FMT;
276 pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
277 pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
278 pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
279 pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
280 }
281
282 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
283
284 if (pkt_fmt == MANA_SHORT_PKT_FMT) {
285 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
286 u64_stats_update_begin(&tx_stats->syncp);
287 tx_stats->short_pkt_fmt++;
288 u64_stats_update_end(&tx_stats->syncp);
289 } else {
290 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
291 u64_stats_update_begin(&tx_stats->syncp);
292 tx_stats->long_pkt_fmt++;
293 u64_stats_update_end(&tx_stats->syncp);
294 }
295
296 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
297 pkg.wqe_req.flags = 0;
298 pkg.wqe_req.client_data_unit = 0;
299
300 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
301
302 if (skb->protocol == htons(ETH_P_IP))
303 ipv4 = true;
304 else if (skb->protocol == htons(ETH_P_IPV6))
305 ipv6 = true;
306
307 if (skb_is_gso(skb)) {
308 int num_sge;
309
310 gso_hs = mana_get_gso_hs(skb);
311
312 num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
313 if (num_sge > 0)
314 pkg.wqe_req.num_sge = num_sge;
315 else
316 goto tx_drop_count;
317
318 u64_stats_update_begin(&tx_stats->syncp);
319 if (skb->encapsulation) {
320 tx_stats->tso_inner_packets++;
321 tx_stats->tso_inner_bytes += skb->len - gso_hs;
322 } else {
323 tx_stats->tso_packets++;
324 tx_stats->tso_bytes += skb->len - gso_hs;
325 }
326 u64_stats_update_end(&tx_stats->syncp);
327
328 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
329 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
330
331 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
332 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
333 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
334
335 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
336 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
337 if (ipv4) {
338 ip_hdr(skb)->tot_len = 0;
339 ip_hdr(skb)->check = 0;
340 tcp_hdr(skb)->check =
341 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
342 ip_hdr(skb)->daddr, 0,
343 IPPROTO_TCP, 0);
344 } else {
345 ipv6_hdr(skb)->payload_len = 0;
346 tcp_hdr(skb)->check =
347 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
348 &ipv6_hdr(skb)->daddr, 0,
349 IPPROTO_TCP, 0);
350 }
351 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
352 csum_type = mana_checksum_info(skb);
353
354 u64_stats_update_begin(&tx_stats->syncp);
355 tx_stats->csum_partial++;
356 u64_stats_update_end(&tx_stats->syncp);
357
358 if (csum_type == IPPROTO_TCP) {
359 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
360 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
361
362 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
363 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
364
365 } else if (csum_type == IPPROTO_UDP) {
366 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
367 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
368
369 pkg.tx_oob.s_oob.comp_udp_csum = 1;
370 } else {
371 /* Can't do offload of this type of checksum */
372 if (skb_checksum_help(skb))
373 goto tx_drop_count;
374 }
375 }
376
377 WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
378
379 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
380 pkg.wqe_req.sgl = pkg.sgl_array;
381 } else {
382 pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
383 sizeof(struct gdma_sge),
384 GFP_ATOMIC);
385 if (!pkg.sgl_ptr)
386 goto tx_drop_count;
387
388 pkg.wqe_req.sgl = pkg.sgl_ptr;
389 }
390
391 if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
392 u64_stats_update_begin(&tx_stats->syncp);
393 tx_stats->mana_map_err++;
394 u64_stats_update_end(&tx_stats->syncp);
395 goto free_sgl_ptr;
396 }
397
398 skb_queue_tail(&txq->pending_skbs, skb);
399
400 len = skb->len;
401 net_txq = netdev_get_tx_queue(ndev, txq_idx);
402
403 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
404 (struct gdma_posted_wqe_info *)skb->cb);
405 if (!mana_can_tx(gdma_sq)) {
406 netif_tx_stop_queue(net_txq);
407 apc->eth_stats.stop_queue++;
408 }
409
410 if (err) {
411 (void)skb_dequeue_tail(&txq->pending_skbs);
412 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
413 err = NETDEV_TX_BUSY;
414 goto tx_busy;
415 }
416
417 err = NETDEV_TX_OK;
418 atomic_inc(&txq->pending_sends);
419
420 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
421
422 /* skb may be freed after mana_gd_post_work_request. Do not use it. */
423 skb = NULL;
424
425 tx_stats = &txq->stats;
426 u64_stats_update_begin(&tx_stats->syncp);
427 tx_stats->packets++;
428 tx_stats->bytes += len;
429 u64_stats_update_end(&tx_stats->syncp);
430
431tx_busy:
432 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
433 netif_tx_wake_queue(net_txq);
434 apc->eth_stats.wake_queue++;
435 }
436
437 kfree(pkg.sgl_ptr);
438 return err;
439
440free_sgl_ptr:
441 kfree(pkg.sgl_ptr);
442tx_drop_count:
443 ndev->stats.tx_dropped++;
444tx_drop:
445 dev_kfree_skb_any(skb);
446 return NETDEV_TX_OK;
447}
448
449static void mana_get_stats64(struct net_device *ndev,
450 struct rtnl_link_stats64 *st)
451{
452 struct mana_port_context *apc = netdev_priv(ndev);
453 unsigned int num_queues = apc->num_queues;
454 struct mana_stats_rx *rx_stats;
455 struct mana_stats_tx *tx_stats;
456 unsigned int start;
457 u64 packets, bytes;
458 int q;
459
460 if (!apc->port_is_up)
461 return;
462
463 netdev_stats_to_stats64(st, &ndev->stats);
464
465 for (q = 0; q < num_queues; q++) {
466 rx_stats = &apc->rxqs[q]->stats;
467
468 do {
469 start = u64_stats_fetch_begin(&rx_stats->syncp);
470 packets = rx_stats->packets;
471 bytes = rx_stats->bytes;
472 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
473
474 st->rx_packets += packets;
475 st->rx_bytes += bytes;
476 }
477
478 for (q = 0; q < num_queues; q++) {
479 tx_stats = &apc->tx_qp[q].txq.stats;
480
481 do {
482 start = u64_stats_fetch_begin(&tx_stats->syncp);
483 packets = tx_stats->packets;
484 bytes = tx_stats->bytes;
485 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
486
487 st->tx_packets += packets;
488 st->tx_bytes += bytes;
489 }
490}
491
492static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
493 int old_q)
494{
495 struct mana_port_context *apc = netdev_priv(ndev);
496 u32 hash = skb_get_hash(skb);
497 struct sock *sk = skb->sk;
498 int txq;
499
500 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
501
502 if (txq != old_q && sk && sk_fullsock(sk) &&
503 rcu_access_pointer(sk->sk_dst_cache))
504 sk_tx_queue_set(sk, txq);
505
506 return txq;
507}
508
509static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
510 struct net_device *sb_dev)
511{
512 int txq;
513
514 if (ndev->real_num_tx_queues == 1)
515 return 0;
516
517 txq = sk_tx_queue_get(skb->sk);
518
519 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
520 if (skb_rx_queue_recorded(skb))
521 txq = skb_get_rx_queue(skb);
522 else
523 txq = mana_get_tx_queue(ndev, skb, txq);
524 }
525
526 return txq;
527}
528
529/* Release pre-allocated RX buffers */
530void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
531{
532 struct device *dev;
533 int i;
534
535 dev = mpc->ac->gdma_dev->gdma_context->dev;
536
537 if (!mpc->rxbufs_pre)
538 goto out1;
539
540 if (!mpc->das_pre)
541 goto out2;
542
543 while (mpc->rxbpre_total) {
544 i = --mpc->rxbpre_total;
545 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
546 DMA_FROM_DEVICE);
547 put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
548 }
549
550 kfree(mpc->das_pre);
551 mpc->das_pre = NULL;
552
553out2:
554 kfree(mpc->rxbufs_pre);
555 mpc->rxbufs_pre = NULL;
556
557out1:
558 mpc->rxbpre_datasize = 0;
559 mpc->rxbpre_alloc_size = 0;
560 mpc->rxbpre_headroom = 0;
561}
562
563/* Get a buffer from the pre-allocated RX buffers */
564static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
565{
566 struct net_device *ndev = rxq->ndev;
567 struct mana_port_context *mpc;
568 void *va;
569
570 mpc = netdev_priv(ndev);
571
572 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
573 netdev_err(ndev, "No RX pre-allocated bufs\n");
574 return NULL;
575 }
576
577 /* Check sizes to catch unexpected coding error */
578 if (mpc->rxbpre_datasize != rxq->datasize) {
579 netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
580 mpc->rxbpre_datasize, rxq->datasize);
581 return NULL;
582 }
583
584 if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
585 netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
586 mpc->rxbpre_alloc_size, rxq->alloc_size);
587 return NULL;
588 }
589
590 if (mpc->rxbpre_headroom != rxq->headroom) {
591 netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
592 mpc->rxbpre_headroom, rxq->headroom);
593 return NULL;
594 }
595
596 mpc->rxbpre_total--;
597
598 *da = mpc->das_pre[mpc->rxbpre_total];
599 va = mpc->rxbufs_pre[mpc->rxbpre_total];
600 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
601
602 /* Deallocate the array after all buffers are gone */
603 if (!mpc->rxbpre_total)
604 mana_pre_dealloc_rxbufs(mpc);
605
606 return va;
607}
608
609/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
610static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
611 u32 *headroom)
612{
613 if (mtu > MANA_XDP_MTU_MAX)
614 *headroom = 0; /* no support for XDP */
615 else
616 *headroom = XDP_PACKET_HEADROOM;
617
618 *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
619
620 /* Using page pool in this case, so alloc_size is PAGE_SIZE */
621 if (*alloc_size < PAGE_SIZE)
622 *alloc_size = PAGE_SIZE;
623
624 *datasize = mtu + ETH_HLEN;
625}
626
627int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
628{
629 struct device *dev;
630 struct page *page;
631 dma_addr_t da;
632 int num_rxb;
633 void *va;
634 int i;
635
636 mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
637 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
638
639 dev = mpc->ac->gdma_dev->gdma_context->dev;
640
641 num_rxb = num_queues * mpc->rx_queue_size;
642
643 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
644 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
645 if (!mpc->rxbufs_pre)
646 goto error;
647
648 mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
649 if (!mpc->das_pre)
650 goto error;
651
652 mpc->rxbpre_total = 0;
653
654 for (i = 0; i < num_rxb; i++) {
655 if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
656 va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
657 if (!va)
658 goto error;
659
660 page = virt_to_head_page(va);
661 /* Check if the frag falls back to single page */
662 if (compound_order(page) <
663 get_order(mpc->rxbpre_alloc_size)) {
664 put_page(page);
665 goto error;
666 }
667 } else {
668 page = dev_alloc_page();
669 if (!page)
670 goto error;
671
672 va = page_to_virt(page);
673 }
674
675 da = dma_map_single(dev, va + mpc->rxbpre_headroom,
676 mpc->rxbpre_datasize, DMA_FROM_DEVICE);
677 if (dma_mapping_error(dev, da)) {
678 put_page(virt_to_head_page(va));
679 goto error;
680 }
681
682 mpc->rxbufs_pre[i] = va;
683 mpc->das_pre[i] = da;
684 mpc->rxbpre_total = i + 1;
685 }
686
687 return 0;
688
689error:
690 mana_pre_dealloc_rxbufs(mpc);
691 return -ENOMEM;
692}
693
694static int mana_change_mtu(struct net_device *ndev, int new_mtu)
695{
696 struct mana_port_context *mpc = netdev_priv(ndev);
697 unsigned int old_mtu = ndev->mtu;
698 int err;
699
700 /* Pre-allocate buffers to prevent failure in mana_attach later */
701 err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
702 if (err) {
703 netdev_err(ndev, "Insufficient memory for new MTU\n");
704 return err;
705 }
706
707 err = mana_detach(ndev, false);
708 if (err) {
709 netdev_err(ndev, "mana_detach failed: %d\n", err);
710 goto out;
711 }
712
713 WRITE_ONCE(ndev->mtu, new_mtu);
714
715 err = mana_attach(ndev);
716 if (err) {
717 netdev_err(ndev, "mana_attach failed: %d\n", err);
718 WRITE_ONCE(ndev->mtu, old_mtu);
719 }
720
721out:
722 mana_pre_dealloc_rxbufs(mpc);
723 return err;
724}
725
726static const struct net_device_ops mana_devops = {
727 .ndo_open = mana_open,
728 .ndo_stop = mana_close,
729 .ndo_select_queue = mana_select_queue,
730 .ndo_start_xmit = mana_start_xmit,
731 .ndo_validate_addr = eth_validate_addr,
732 .ndo_get_stats64 = mana_get_stats64,
733 .ndo_bpf = mana_bpf,
734 .ndo_xdp_xmit = mana_xdp_xmit,
735 .ndo_change_mtu = mana_change_mtu,
736};
737
738static void mana_cleanup_port_context(struct mana_port_context *apc)
739{
740 /*
741 * at this point all dir/files under the vport directory
742 * are already cleaned up.
743 * We are sure the apc->mana_port_debugfs remove will not
744 * cause any freed memory access issues
745 */
746 debugfs_remove(apc->mana_port_debugfs);
747 kfree(apc->rxqs);
748 apc->rxqs = NULL;
749}
750
751static void mana_cleanup_indir_table(struct mana_port_context *apc)
752{
753 apc->indir_table_sz = 0;
754 kfree(apc->indir_table);
755 kfree(apc->rxobj_table);
756}
757
758static int mana_init_port_context(struct mana_port_context *apc)
759{
760 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
761 GFP_KERNEL);
762
763 return !apc->rxqs ? -ENOMEM : 0;
764}
765
766static int mana_send_request(struct mana_context *ac, void *in_buf,
767 u32 in_len, void *out_buf, u32 out_len)
768{
769 struct gdma_context *gc = ac->gdma_dev->gdma_context;
770 struct gdma_resp_hdr *resp = out_buf;
771 struct gdma_req_hdr *req = in_buf;
772 struct device *dev = gc->dev;
773 static atomic_t activity_id;
774 int err;
775
776 req->dev_id = gc->mana.dev_id;
777 req->activity_id = atomic_inc_return(&activity_id);
778
779 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
780 out_buf);
781 if (err || resp->status) {
782 dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
783 err, resp->status);
784 return err ? err : -EPROTO;
785 }
786
787 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
788 req->activity_id != resp->activity_id) {
789 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
790 req->dev_id.as_uint32, resp->dev_id.as_uint32,
791 req->activity_id, resp->activity_id);
792 return -EPROTO;
793 }
794
795 return 0;
796}
797
798static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
799 const enum mana_command_code expected_code,
800 const u32 min_size)
801{
802 if (resp_hdr->response.msg_type != expected_code)
803 return -EPROTO;
804
805 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
806 return -EPROTO;
807
808 if (resp_hdr->response.msg_size < min_size)
809 return -EPROTO;
810
811 return 0;
812}
813
814static int mana_pf_register_hw_vport(struct mana_port_context *apc)
815{
816 struct mana_register_hw_vport_resp resp = {};
817 struct mana_register_hw_vport_req req = {};
818 int err;
819
820 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
821 sizeof(req), sizeof(resp));
822 req.attached_gfid = 1;
823 req.is_pf_default_vport = 1;
824 req.allow_all_ether_types = 1;
825
826 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
827 sizeof(resp));
828 if (err) {
829 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
830 return err;
831 }
832
833 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
834 sizeof(resp));
835 if (err || resp.hdr.status) {
836 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
837 err, resp.hdr.status);
838 return err ? err : -EPROTO;
839 }
840
841 apc->port_handle = resp.hw_vport_handle;
842 return 0;
843}
844
845static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
846{
847 struct mana_deregister_hw_vport_resp resp = {};
848 struct mana_deregister_hw_vport_req req = {};
849 int err;
850
851 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
852 sizeof(req), sizeof(resp));
853 req.hw_vport_handle = apc->port_handle;
854
855 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
856 sizeof(resp));
857 if (err) {
858 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
859 err);
860 return;
861 }
862
863 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
864 sizeof(resp));
865 if (err || resp.hdr.status)
866 netdev_err(apc->ndev,
867 "Failed to deregister hw vPort: %d, 0x%x\n",
868 err, resp.hdr.status);
869}
870
871static int mana_pf_register_filter(struct mana_port_context *apc)
872{
873 struct mana_register_filter_resp resp = {};
874 struct mana_register_filter_req req = {};
875 int err;
876
877 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
878 sizeof(req), sizeof(resp));
879 req.vport = apc->port_handle;
880 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
881
882 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
883 sizeof(resp));
884 if (err) {
885 netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
886 return err;
887 }
888
889 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
890 sizeof(resp));
891 if (err || resp.hdr.status) {
892 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
893 err, resp.hdr.status);
894 return err ? err : -EPROTO;
895 }
896
897 apc->pf_filter_handle = resp.filter_handle;
898 return 0;
899}
900
901static void mana_pf_deregister_filter(struct mana_port_context *apc)
902{
903 struct mana_deregister_filter_resp resp = {};
904 struct mana_deregister_filter_req req = {};
905 int err;
906
907 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
908 sizeof(req), sizeof(resp));
909 req.filter_handle = apc->pf_filter_handle;
910
911 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
912 sizeof(resp));
913 if (err) {
914 netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
915 err);
916 return;
917 }
918
919 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
920 sizeof(resp));
921 if (err || resp.hdr.status)
922 netdev_err(apc->ndev,
923 "Failed to deregister filter: %d, 0x%x\n",
924 err, resp.hdr.status);
925}
926
927static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
928 u32 proto_minor_ver, u32 proto_micro_ver,
929 u16 *max_num_vports)
930{
931 struct gdma_context *gc = ac->gdma_dev->gdma_context;
932 struct mana_query_device_cfg_resp resp = {};
933 struct mana_query_device_cfg_req req = {};
934 struct device *dev = gc->dev;
935 int err = 0;
936
937 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
938 sizeof(req), sizeof(resp));
939
940 req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
941
942 req.proto_major_ver = proto_major_ver;
943 req.proto_minor_ver = proto_minor_ver;
944 req.proto_micro_ver = proto_micro_ver;
945
946 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
947 if (err) {
948 dev_err(dev, "Failed to query config: %d", err);
949 return err;
950 }
951
952 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
953 sizeof(resp));
954 if (err || resp.hdr.status) {
955 dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
956 resp.hdr.status);
957 if (!err)
958 err = -EPROTO;
959 return err;
960 }
961
962 *max_num_vports = resp.max_num_vports;
963
964 if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
965 gc->adapter_mtu = resp.adapter_mtu;
966 else
967 gc->adapter_mtu = ETH_FRAME_LEN;
968
969 debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
970
971 return 0;
972}
973
974static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
975 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
976{
977 struct mana_query_vport_cfg_resp resp = {};
978 struct mana_query_vport_cfg_req req = {};
979 int err;
980
981 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
982 sizeof(req), sizeof(resp));
983
984 req.vport_index = vport_index;
985
986 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
987 sizeof(resp));
988 if (err)
989 return err;
990
991 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
992 sizeof(resp));
993 if (err)
994 return err;
995
996 if (resp.hdr.status)
997 return -EPROTO;
998
999 *max_sq = resp.max_num_sq;
1000 *max_rq = resp.max_num_rq;
1001 if (resp.num_indirection_ent > 0 &&
1002 resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
1003 is_power_of_2(resp.num_indirection_ent)) {
1004 *num_indir_entry = resp.num_indirection_ent;
1005 } else {
1006 netdev_warn(apc->ndev,
1007 "Setting indirection table size to default %d for vPort %d\n",
1008 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1009 *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
1010 }
1011
1012 apc->port_handle = resp.vport;
1013 ether_addr_copy(apc->mac_addr, resp.mac_addr);
1014
1015 return 0;
1016}
1017
1018void mana_uncfg_vport(struct mana_port_context *apc)
1019{
1020 mutex_lock(&apc->vport_mutex);
1021 apc->vport_use_count--;
1022 WARN_ON(apc->vport_use_count < 0);
1023 mutex_unlock(&apc->vport_mutex);
1024}
1025EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
1026
1027int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1028 u32 doorbell_pg_id)
1029{
1030 struct mana_config_vport_resp resp = {};
1031 struct mana_config_vport_req req = {};
1032 int err;
1033
1034 /* This function is used to program the Ethernet port in the hardware
1035 * table. It can be called from the Ethernet driver or the RDMA driver.
1036 *
1037 * For Ethernet usage, the hardware supports only one active user on a
1038 * physical port. The driver checks on the port usage before programming
1039 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1040 * device to kernel NET layer (Ethernet driver).
1041 *
1042 * Because the RDMA driver doesn't know in advance which QP type the
1043 * user will create, it exposes the device with all its ports. The user
1044 * may not be able to create RAW QP on a port if this port is already
1045 * in used by the Ethernet driver from the kernel.
1046 *
1047 * This physical port limitation only applies to the RAW QP. For RC QP,
1048 * the hardware doesn't have this limitation. The user can create RC
1049 * QPs on a physical port up to the hardware limits independent of the
1050 * Ethernet usage on the same port.
1051 */
1052 mutex_lock(&apc->vport_mutex);
1053 if (apc->vport_use_count > 0) {
1054 mutex_unlock(&apc->vport_mutex);
1055 return -EBUSY;
1056 }
1057 apc->vport_use_count++;
1058 mutex_unlock(&apc->vport_mutex);
1059
1060 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1061 sizeof(req), sizeof(resp));
1062 req.vport = apc->port_handle;
1063 req.pdid = protection_dom_id;
1064 req.doorbell_pageid = doorbell_pg_id;
1065
1066 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1067 sizeof(resp));
1068 if (err) {
1069 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1070 goto out;
1071 }
1072
1073 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1074 sizeof(resp));
1075 if (err || resp.hdr.status) {
1076 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1077 err, resp.hdr.status);
1078 if (!err)
1079 err = -EPROTO;
1080
1081 goto out;
1082 }
1083
1084 apc->tx_shortform_allowed = resp.short_form_allowed;
1085 apc->tx_vp_offset = resp.tx_vport_offset;
1086
1087 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1088 apc->port_handle, protection_dom_id, doorbell_pg_id);
1089out:
1090 if (err)
1091 mana_uncfg_vport(apc);
1092
1093 return err;
1094}
1095EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
1096
1097static int mana_cfg_vport_steering(struct mana_port_context *apc,
1098 enum TRI_STATE rx,
1099 bool update_default_rxobj, bool update_key,
1100 bool update_tab)
1101{
1102 struct mana_cfg_rx_steer_req_v2 *req;
1103 struct mana_cfg_rx_steer_resp resp = {};
1104 struct net_device *ndev = apc->ndev;
1105 u32 req_buf_size;
1106 int err;
1107
1108 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1109 req = kzalloc(req_buf_size, GFP_KERNEL);
1110 if (!req)
1111 return -ENOMEM;
1112
1113 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1114 sizeof(resp));
1115
1116 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1117
1118 req->vport = apc->port_handle;
1119 req->num_indir_entries = apc->indir_table_sz;
1120 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
1121 indir_tab);
1122 req->rx_enable = rx;
1123 req->rss_enable = apc->rss_state;
1124 req->update_default_rxobj = update_default_rxobj;
1125 req->update_hashkey = update_key;
1126 req->update_indir_tab = update_tab;
1127 req->default_rxobj = apc->default_rxobj;
1128 req->cqe_coalescing_enable = 0;
1129
1130 if (update_key)
1131 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1132
1133 if (update_tab)
1134 memcpy(req->indir_tab, apc->rxobj_table,
1135 flex_array_size(req, indir_tab, req->num_indir_entries));
1136
1137 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1138 sizeof(resp));
1139 if (err) {
1140 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1141 goto out;
1142 }
1143
1144 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1145 sizeof(resp));
1146 if (err) {
1147 netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1148 goto out;
1149 }
1150
1151 if (resp.hdr.status) {
1152 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1153 resp.hdr.status);
1154 err = -EPROTO;
1155 }
1156
1157 netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1158 apc->port_handle, apc->indir_table_sz);
1159out:
1160 kfree(req);
1161 return err;
1162}
1163
1164int mana_create_wq_obj(struct mana_port_context *apc,
1165 mana_handle_t vport,
1166 u32 wq_type, struct mana_obj_spec *wq_spec,
1167 struct mana_obj_spec *cq_spec,
1168 mana_handle_t *wq_obj)
1169{
1170 struct mana_create_wqobj_resp resp = {};
1171 struct mana_create_wqobj_req req = {};
1172 struct net_device *ndev = apc->ndev;
1173 int err;
1174
1175 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1176 sizeof(req), sizeof(resp));
1177 req.vport = vport;
1178 req.wq_type = wq_type;
1179 req.wq_gdma_region = wq_spec->gdma_region;
1180 req.cq_gdma_region = cq_spec->gdma_region;
1181 req.wq_size = wq_spec->queue_size;
1182 req.cq_size = cq_spec->queue_size;
1183 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1184 req.cq_parent_qid = cq_spec->attached_eq;
1185
1186 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1187 sizeof(resp));
1188 if (err) {
1189 netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1190 goto out;
1191 }
1192
1193 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1194 sizeof(resp));
1195 if (err || resp.hdr.status) {
1196 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1197 resp.hdr.status);
1198 if (!err)
1199 err = -EPROTO;
1200 goto out;
1201 }
1202
1203 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1204 netdev_err(ndev, "Got an invalid WQ object handle\n");
1205 err = -EPROTO;
1206 goto out;
1207 }
1208
1209 *wq_obj = resp.wq_obj;
1210 wq_spec->queue_index = resp.wq_id;
1211 cq_spec->queue_index = resp.cq_id;
1212
1213 return 0;
1214out:
1215 return err;
1216}
1217EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
1218
1219void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1220 mana_handle_t wq_obj)
1221{
1222 struct mana_destroy_wqobj_resp resp = {};
1223 struct mana_destroy_wqobj_req req = {};
1224 struct net_device *ndev = apc->ndev;
1225 int err;
1226
1227 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1228 sizeof(req), sizeof(resp));
1229 req.wq_type = wq_type;
1230 req.wq_obj_handle = wq_obj;
1231
1232 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1233 sizeof(resp));
1234 if (err) {
1235 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1236 return;
1237 }
1238
1239 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1240 sizeof(resp));
1241 if (err || resp.hdr.status)
1242 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1243 resp.hdr.status);
1244}
1245EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
1246
1247static void mana_destroy_eq(struct mana_context *ac)
1248{
1249 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1250 struct gdma_queue *eq;
1251 int i;
1252
1253 if (!ac->eqs)
1254 return;
1255
1256 debugfs_remove_recursive(ac->mana_eqs_debugfs);
1257
1258 for (i = 0; i < gc->max_num_queues; i++) {
1259 eq = ac->eqs[i].eq;
1260 if (!eq)
1261 continue;
1262
1263 mana_gd_destroy_queue(gc, eq);
1264 }
1265
1266 kfree(ac->eqs);
1267 ac->eqs = NULL;
1268}
1269
1270static void mana_create_eq_debugfs(struct mana_context *ac, int i)
1271{
1272 struct mana_eq eq = ac->eqs[i];
1273 char eqnum[32];
1274
1275 sprintf(eqnum, "eq%d", i);
1276 eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
1277 debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
1278 debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
1279 debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
1280}
1281
1282static int mana_create_eq(struct mana_context *ac)
1283{
1284 struct gdma_dev *gd = ac->gdma_dev;
1285 struct gdma_context *gc = gd->gdma_context;
1286 struct gdma_queue_spec spec = {};
1287 int err;
1288 int i;
1289
1290 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1291 GFP_KERNEL);
1292 if (!ac->eqs)
1293 return -ENOMEM;
1294
1295 spec.type = GDMA_EQ;
1296 spec.monitor_avl_buf = false;
1297 spec.queue_size = EQ_SIZE;
1298 spec.eq.callback = NULL;
1299 spec.eq.context = ac->eqs;
1300 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1301
1302 ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
1303
1304 for (i = 0; i < gc->max_num_queues; i++) {
1305 spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1306 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1307 if (err)
1308 goto out;
1309 mana_create_eq_debugfs(ac, i);
1310 }
1311
1312 return 0;
1313out:
1314 mana_destroy_eq(ac);
1315 return err;
1316}
1317
1318static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1319{
1320 struct mana_fence_rq_resp resp = {};
1321 struct mana_fence_rq_req req = {};
1322 int err;
1323
1324 init_completion(&rxq->fence_event);
1325
1326 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1327 sizeof(req), sizeof(resp));
1328 req.wq_obj_handle = rxq->rxobj;
1329
1330 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1331 sizeof(resp));
1332 if (err) {
1333 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1334 rxq->rxq_idx, err);
1335 return err;
1336 }
1337
1338 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1339 if (err || resp.hdr.status) {
1340 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1341 rxq->rxq_idx, err, resp.hdr.status);
1342 if (!err)
1343 err = -EPROTO;
1344
1345 return err;
1346 }
1347
1348 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1349 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1350 rxq->rxq_idx);
1351 return -ETIMEDOUT;
1352 }
1353
1354 return 0;
1355}
1356
1357static void mana_fence_rqs(struct mana_port_context *apc)
1358{
1359 unsigned int rxq_idx;
1360 struct mana_rxq *rxq;
1361 int err;
1362
1363 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1364 rxq = apc->rxqs[rxq_idx];
1365 err = mana_fence_rq(apc, rxq);
1366
1367 /* In case of any error, use sleep instead. */
1368 if (err)
1369 msleep(100);
1370 }
1371}
1372
1373static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1374{
1375 u32 used_space_old;
1376 u32 used_space_new;
1377
1378 used_space_old = wq->head - wq->tail;
1379 used_space_new = wq->head - (wq->tail + num_units);
1380
1381 if (WARN_ON_ONCE(used_space_new > used_space_old))
1382 return -ERANGE;
1383
1384 wq->tail += num_units;
1385 return 0;
1386}
1387
1388static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1389{
1390 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1391 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1392 struct device *dev = gc->dev;
1393 int hsg, i;
1394
1395 /* Number of SGEs of linear part */
1396 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1397
1398 for (i = 0; i < hsg; i++)
1399 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1400 DMA_TO_DEVICE);
1401
1402 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1403 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1404 DMA_TO_DEVICE);
1405}
1406
1407static void mana_poll_tx_cq(struct mana_cq *cq)
1408{
1409 struct gdma_comp *completions = cq->gdma_comp_buf;
1410 struct gdma_posted_wqe_info *wqe_info;
1411 unsigned int pkt_transmitted = 0;
1412 unsigned int wqe_unit_cnt = 0;
1413 struct mana_txq *txq = cq->txq;
1414 struct mana_port_context *apc;
1415 struct netdev_queue *net_txq;
1416 struct gdma_queue *gdma_wq;
1417 unsigned int avail_space;
1418 struct net_device *ndev;
1419 struct sk_buff *skb;
1420 bool txq_stopped;
1421 int comp_read;
1422 int i;
1423
1424 ndev = txq->ndev;
1425 apc = netdev_priv(ndev);
1426
1427 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1428 CQE_POLLING_BUFFER);
1429
1430 if (comp_read < 1)
1431 return;
1432
1433 for (i = 0; i < comp_read; i++) {
1434 struct mana_tx_comp_oob *cqe_oob;
1435
1436 if (WARN_ON_ONCE(!completions[i].is_sq))
1437 return;
1438
1439 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1440 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1441 MANA_CQE_COMPLETION))
1442 return;
1443
1444 switch (cqe_oob->cqe_hdr.cqe_type) {
1445 case CQE_TX_OKAY:
1446 break;
1447
1448 case CQE_TX_SA_DROP:
1449 case CQE_TX_MTU_DROP:
1450 case CQE_TX_INVALID_OOB:
1451 case CQE_TX_INVALID_ETH_TYPE:
1452 case CQE_TX_HDR_PROCESSING_ERROR:
1453 case CQE_TX_VF_DISABLED:
1454 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1455 case CQE_TX_VPORT_DISABLED:
1456 case CQE_TX_VLAN_TAGGING_VIOLATION:
1457 if (net_ratelimit())
1458 netdev_err(ndev, "TX: CQE error %d\n",
1459 cqe_oob->cqe_hdr.cqe_type);
1460
1461 apc->eth_stats.tx_cqe_err++;
1462 break;
1463
1464 default:
1465 /* If the CQE type is unknown, log an error,
1466 * and still free the SKB, update tail, etc.
1467 */
1468 if (net_ratelimit())
1469 netdev_err(ndev, "TX: unknown CQE type %d\n",
1470 cqe_oob->cqe_hdr.cqe_type);
1471
1472 apc->eth_stats.tx_cqe_unknown_type++;
1473 break;
1474 }
1475
1476 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1477 return;
1478
1479 skb = skb_dequeue(&txq->pending_skbs);
1480 if (WARN_ON_ONCE(!skb))
1481 return;
1482
1483 wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1484 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1485
1486 mana_unmap_skb(skb, apc);
1487
1488 napi_consume_skb(skb, cq->budget);
1489
1490 pkt_transmitted++;
1491 }
1492
1493 if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1494 return;
1495
1496 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1497
1498 gdma_wq = txq->gdma_sq;
1499 avail_space = mana_gd_wq_avail_space(gdma_wq);
1500
1501 /* Ensure tail updated before checking q stop */
1502 smp_mb();
1503
1504 net_txq = txq->net_txq;
1505 txq_stopped = netif_tx_queue_stopped(net_txq);
1506
1507 /* Ensure checking txq_stopped before apc->port_is_up. */
1508 smp_rmb();
1509
1510 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1511 netif_tx_wake_queue(net_txq);
1512 apc->eth_stats.wake_queue++;
1513 }
1514
1515 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1516 WARN_ON_ONCE(1);
1517
1518 cq->work_done = pkt_transmitted;
1519}
1520
1521static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1522{
1523 struct mana_recv_buf_oob *recv_buf_oob;
1524 u32 curr_index;
1525 int err;
1526
1527 curr_index = rxq->buf_index++;
1528 if (rxq->buf_index == rxq->num_rx_buf)
1529 rxq->buf_index = 0;
1530
1531 recv_buf_oob = &rxq->rx_oobs[curr_index];
1532
1533 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1534 &recv_buf_oob->wqe_inf);
1535 if (WARN_ON_ONCE(err))
1536 return;
1537
1538 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1539}
1540
1541static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1542 uint pkt_len, struct xdp_buff *xdp)
1543{
1544 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1545
1546 if (!skb)
1547 return NULL;
1548
1549 if (xdp->data_hard_start) {
1550 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1551 skb_put(skb, xdp->data_end - xdp->data);
1552 return skb;
1553 }
1554
1555 skb_reserve(skb, rxq->headroom);
1556 skb_put(skb, pkt_len);
1557
1558 return skb;
1559}
1560
1561static void mana_rx_skb(void *buf_va, bool from_pool,
1562 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1563{
1564 struct mana_stats_rx *rx_stats = &rxq->stats;
1565 struct net_device *ndev = rxq->ndev;
1566 uint pkt_len = cqe->ppi[0].pkt_len;
1567 u16 rxq_idx = rxq->rxq_idx;
1568 struct napi_struct *napi;
1569 struct xdp_buff xdp = {};
1570 struct sk_buff *skb;
1571 u32 hash_value;
1572 u32 act;
1573
1574 rxq->rx_cq.work_done++;
1575 napi = &rxq->rx_cq.napi;
1576
1577 if (!buf_va) {
1578 ++ndev->stats.rx_dropped;
1579 return;
1580 }
1581
1582 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1583
1584 if (act == XDP_REDIRECT && !rxq->xdp_rc)
1585 return;
1586
1587 if (act != XDP_PASS && act != XDP_TX)
1588 goto drop_xdp;
1589
1590 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1591
1592 if (!skb)
1593 goto drop;
1594
1595 if (from_pool)
1596 skb_mark_for_recycle(skb);
1597
1598 skb->dev = napi->dev;
1599
1600 skb->protocol = eth_type_trans(skb, ndev);
1601 skb_checksum_none_assert(skb);
1602 skb_record_rx_queue(skb, rxq_idx);
1603
1604 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1605 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1606 skb->ip_summed = CHECKSUM_UNNECESSARY;
1607 }
1608
1609 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1610 hash_value = cqe->ppi[0].pkt_hash;
1611
1612 if (cqe->rx_hashtype & MANA_HASH_L4)
1613 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1614 else
1615 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1616 }
1617
1618 if (cqe->rx_vlantag_present) {
1619 u16 vlan_tci = cqe->rx_vlan_id;
1620
1621 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1622 }
1623
1624 u64_stats_update_begin(&rx_stats->syncp);
1625 rx_stats->packets++;
1626 rx_stats->bytes += pkt_len;
1627
1628 if (act == XDP_TX)
1629 rx_stats->xdp_tx++;
1630 u64_stats_update_end(&rx_stats->syncp);
1631
1632 if (act == XDP_TX) {
1633 skb_set_queue_mapping(skb, rxq_idx);
1634 mana_xdp_tx(skb, ndev);
1635 return;
1636 }
1637
1638 napi_gro_receive(napi, skb);
1639
1640 return;
1641
1642drop_xdp:
1643 u64_stats_update_begin(&rx_stats->syncp);
1644 rx_stats->xdp_drop++;
1645 u64_stats_update_end(&rx_stats->syncp);
1646
1647drop:
1648 if (from_pool) {
1649 page_pool_recycle_direct(rxq->page_pool,
1650 virt_to_head_page(buf_va));
1651 } else {
1652 WARN_ON_ONCE(rxq->xdp_save_va);
1653 /* Save for reuse */
1654 rxq->xdp_save_va = buf_va;
1655 }
1656
1657 ++ndev->stats.rx_dropped;
1658
1659 return;
1660}
1661
1662static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1663 dma_addr_t *da, bool *from_pool, bool is_napi)
1664{
1665 struct page *page;
1666 void *va;
1667
1668 *from_pool = false;
1669
1670 /* Reuse XDP dropped page if available */
1671 if (rxq->xdp_save_va) {
1672 va = rxq->xdp_save_va;
1673 rxq->xdp_save_va = NULL;
1674 } else if (rxq->alloc_size > PAGE_SIZE) {
1675 if (is_napi)
1676 va = napi_alloc_frag(rxq->alloc_size);
1677 else
1678 va = netdev_alloc_frag(rxq->alloc_size);
1679
1680 if (!va)
1681 return NULL;
1682
1683 page = virt_to_head_page(va);
1684 /* Check if the frag falls back to single page */
1685 if (compound_order(page) < get_order(rxq->alloc_size)) {
1686 put_page(page);
1687 return NULL;
1688 }
1689 } else {
1690 page = page_pool_dev_alloc_pages(rxq->page_pool);
1691 if (!page)
1692 return NULL;
1693
1694 *from_pool = true;
1695 va = page_to_virt(page);
1696 }
1697
1698 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1699 DMA_FROM_DEVICE);
1700 if (dma_mapping_error(dev, *da)) {
1701 if (*from_pool)
1702 page_pool_put_full_page(rxq->page_pool, page, false);
1703 else
1704 put_page(virt_to_head_page(va));
1705
1706 return NULL;
1707 }
1708
1709 return va;
1710}
1711
1712/* Allocate frag for rx buffer, and save the old buf */
1713static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1714 struct mana_recv_buf_oob *rxoob, void **old_buf,
1715 bool *old_fp)
1716{
1717 bool from_pool;
1718 dma_addr_t da;
1719 void *va;
1720
1721 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1722 if (!va)
1723 return;
1724
1725 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1726 DMA_FROM_DEVICE);
1727 *old_buf = rxoob->buf_va;
1728 *old_fp = rxoob->from_pool;
1729
1730 rxoob->buf_va = va;
1731 rxoob->sgl[0].address = da;
1732 rxoob->from_pool = from_pool;
1733}
1734
1735static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1736 struct gdma_comp *cqe)
1737{
1738 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1739 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1740 struct net_device *ndev = rxq->ndev;
1741 struct mana_recv_buf_oob *rxbuf_oob;
1742 struct mana_port_context *apc;
1743 struct device *dev = gc->dev;
1744 void *old_buf = NULL;
1745 u32 curr, pktlen;
1746 bool old_fp;
1747
1748 apc = netdev_priv(ndev);
1749
1750 switch (oob->cqe_hdr.cqe_type) {
1751 case CQE_RX_OKAY:
1752 break;
1753
1754 case CQE_RX_TRUNCATED:
1755 ++ndev->stats.rx_dropped;
1756 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1757 netdev_warn_once(ndev, "Dropped a truncated packet\n");
1758 goto drop;
1759
1760 case CQE_RX_COALESCED_4:
1761 netdev_err(ndev, "RX coalescing is unsupported\n");
1762 apc->eth_stats.rx_coalesced_err++;
1763 return;
1764
1765 case CQE_RX_OBJECT_FENCE:
1766 complete(&rxq->fence_event);
1767 return;
1768
1769 default:
1770 netdev_err(ndev, "Unknown RX CQE type = %d\n",
1771 oob->cqe_hdr.cqe_type);
1772 apc->eth_stats.rx_cqe_unknown_type++;
1773 return;
1774 }
1775
1776 pktlen = oob->ppi[0].pkt_len;
1777
1778 if (pktlen == 0) {
1779 /* data packets should never have packetlength of zero */
1780 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1781 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1782 return;
1783 }
1784
1785 curr = rxq->buf_index;
1786 rxbuf_oob = &rxq->rx_oobs[curr];
1787 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1788
1789 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1790
1791 /* Unsuccessful refill will have old_buf == NULL.
1792 * In this case, mana_rx_skb() will drop the packet.
1793 */
1794 mana_rx_skb(old_buf, old_fp, oob, rxq);
1795
1796drop:
1797 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1798
1799 mana_post_pkt_rxq(rxq);
1800}
1801
1802static void mana_poll_rx_cq(struct mana_cq *cq)
1803{
1804 struct gdma_comp *comp = cq->gdma_comp_buf;
1805 struct mana_rxq *rxq = cq->rxq;
1806 int comp_read, i;
1807
1808 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1809 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1810
1811 rxq->xdp_flush = false;
1812
1813 for (i = 0; i < comp_read; i++) {
1814 if (WARN_ON_ONCE(comp[i].is_sq))
1815 return;
1816
1817 /* verify recv cqe references the right rxq */
1818 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1819 return;
1820
1821 mana_process_rx_cqe(rxq, cq, &comp[i]);
1822 }
1823
1824 if (comp_read > 0) {
1825 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1826
1827 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1828 }
1829
1830 if (rxq->xdp_flush)
1831 xdp_do_flush();
1832}
1833
1834static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1835{
1836 struct mana_cq *cq = context;
1837 int w;
1838
1839 WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1840
1841 if (cq->type == MANA_CQ_TYPE_RX)
1842 mana_poll_rx_cq(cq);
1843 else
1844 mana_poll_tx_cq(cq);
1845
1846 w = cq->work_done;
1847 cq->work_done_since_doorbell += w;
1848
1849 if (w < cq->budget) {
1850 mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
1851 cq->work_done_since_doorbell = 0;
1852 napi_complete_done(&cq->napi, w);
1853 } else if (cq->work_done_since_doorbell >
1854 cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
1855 /* MANA hardware requires at least one doorbell ring every 8
1856 * wraparounds of CQ even if there is no need to arm the CQ.
1857 * This driver rings the doorbell as soon as we have exceeded
1858 * 4 wraparounds.
1859 */
1860 mana_gd_ring_cq(gdma_queue, 0);
1861 cq->work_done_since_doorbell = 0;
1862 }
1863
1864 return w;
1865}
1866
1867static int mana_poll(struct napi_struct *napi, int budget)
1868{
1869 struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1870 int w;
1871
1872 cq->work_done = 0;
1873 cq->budget = budget;
1874
1875 w = mana_cq_handler(cq, cq->gdma_cq);
1876
1877 return min(w, budget);
1878}
1879
1880static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1881{
1882 struct mana_cq *cq = context;
1883
1884 napi_schedule_irqoff(&cq->napi);
1885}
1886
1887static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1888{
1889 struct gdma_dev *gd = apc->ac->gdma_dev;
1890
1891 if (!cq->gdma_cq)
1892 return;
1893
1894 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1895}
1896
1897static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1898{
1899 struct gdma_dev *gd = apc->ac->gdma_dev;
1900
1901 if (!txq->gdma_sq)
1902 return;
1903
1904 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1905}
1906
1907static void mana_destroy_txq(struct mana_port_context *apc)
1908{
1909 struct napi_struct *napi;
1910 int i;
1911
1912 if (!apc->tx_qp)
1913 return;
1914
1915 for (i = 0; i < apc->num_queues; i++) {
1916 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
1917
1918 napi = &apc->tx_qp[i].tx_cq.napi;
1919 if (apc->tx_qp[i].txq.napi_initialized) {
1920 napi_synchronize(napi);
1921 napi_disable(napi);
1922 netif_napi_del(napi);
1923 apc->tx_qp[i].txq.napi_initialized = false;
1924 }
1925 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1926
1927 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1928
1929 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1930 }
1931
1932 kfree(apc->tx_qp);
1933 apc->tx_qp = NULL;
1934}
1935
1936static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
1937{
1938 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
1939 char qnum[32];
1940
1941 sprintf(qnum, "TX-%d", idx);
1942 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
1943 debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
1944 &tx_qp->txq.gdma_sq->head);
1945 debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
1946 &tx_qp->txq.gdma_sq->tail);
1947 debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
1948 &tx_qp->txq.pending_skbs.qlen);
1949 debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
1950 &tx_qp->tx_cq.gdma_cq->head);
1951 debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
1952 &tx_qp->tx_cq.gdma_cq->tail);
1953 debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
1954 &tx_qp->tx_cq.budget);
1955 debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
1956 tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
1957 debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
1958 tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
1959}
1960
1961static int mana_create_txq(struct mana_port_context *apc,
1962 struct net_device *net)
1963{
1964 struct mana_context *ac = apc->ac;
1965 struct gdma_dev *gd = ac->gdma_dev;
1966 struct mana_obj_spec wq_spec;
1967 struct mana_obj_spec cq_spec;
1968 struct gdma_queue_spec spec;
1969 struct gdma_context *gc;
1970 struct mana_txq *txq;
1971 struct mana_cq *cq;
1972 u32 txq_size;
1973 u32 cq_size;
1974 int err;
1975 int i;
1976
1977 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1978 GFP_KERNEL);
1979 if (!apc->tx_qp)
1980 return -ENOMEM;
1981
1982 /* The minimum size of the WQE is 32 bytes, hence
1983 * apc->tx_queue_size represents the maximum number of WQEs
1984 * the SQ can store. This value is then used to size other queues
1985 * to prevent overflow.
1986 * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
1987 * as min val of apc->tx_queue_size is 128 and that would make
1988 * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
1989 * are always power of two
1990 */
1991 txq_size = apc->tx_queue_size * 32;
1992
1993 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
1994
1995 gc = gd->gdma_context;
1996
1997 for (i = 0; i < apc->num_queues; i++) {
1998 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1999
2000 /* Create SQ */
2001 txq = &apc->tx_qp[i].txq;
2002
2003 u64_stats_init(&txq->stats.syncp);
2004 txq->ndev = net;
2005 txq->net_txq = netdev_get_tx_queue(net, i);
2006 txq->vp_offset = apc->tx_vp_offset;
2007 txq->napi_initialized = false;
2008 skb_queue_head_init(&txq->pending_skbs);
2009
2010 memset(&spec, 0, sizeof(spec));
2011 spec.type = GDMA_SQ;
2012 spec.monitor_avl_buf = true;
2013 spec.queue_size = txq_size;
2014 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2015 if (err)
2016 goto out;
2017
2018 /* Create SQ's CQ */
2019 cq = &apc->tx_qp[i].tx_cq;
2020 cq->type = MANA_CQ_TYPE_TX;
2021
2022 cq->txq = txq;
2023
2024 memset(&spec, 0, sizeof(spec));
2025 spec.type = GDMA_CQ;
2026 spec.monitor_avl_buf = false;
2027 spec.queue_size = cq_size;
2028 spec.cq.callback = mana_schedule_napi;
2029 spec.cq.parent_eq = ac->eqs[i].eq;
2030 spec.cq.context = cq;
2031 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2032 if (err)
2033 goto out;
2034
2035 memset(&wq_spec, 0, sizeof(wq_spec));
2036 memset(&cq_spec, 0, sizeof(cq_spec));
2037
2038 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2039 wq_spec.queue_size = txq->gdma_sq->queue_size;
2040
2041 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2042 cq_spec.queue_size = cq->gdma_cq->queue_size;
2043 cq_spec.modr_ctx_id = 0;
2044 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2045
2046 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2047 &wq_spec, &cq_spec,
2048 &apc->tx_qp[i].tx_object);
2049
2050 if (err)
2051 goto out;
2052
2053 txq->gdma_sq->id = wq_spec.queue_index;
2054 cq->gdma_cq->id = cq_spec.queue_index;
2055
2056 txq->gdma_sq->mem_info.dma_region_handle =
2057 GDMA_INVALID_DMA_REGION;
2058 cq->gdma_cq->mem_info.dma_region_handle =
2059 GDMA_INVALID_DMA_REGION;
2060
2061 txq->gdma_txq_id = txq->gdma_sq->id;
2062
2063 cq->gdma_id = cq->gdma_cq->id;
2064
2065 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2066 err = -EINVAL;
2067 goto out;
2068 }
2069
2070 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2071
2072 mana_create_txq_debugfs(apc, i);
2073
2074 netif_napi_add_tx(net, &cq->napi, mana_poll);
2075 napi_enable(&cq->napi);
2076 txq->napi_initialized = true;
2077
2078 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2079 }
2080
2081 return 0;
2082out:
2083 mana_destroy_txq(apc);
2084 return err;
2085}
2086
2087static void mana_destroy_rxq(struct mana_port_context *apc,
2088 struct mana_rxq *rxq, bool napi_initialized)
2089
2090{
2091 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2092 struct mana_recv_buf_oob *rx_oob;
2093 struct device *dev = gc->dev;
2094 struct napi_struct *napi;
2095 struct page *page;
2096 int i;
2097
2098 if (!rxq)
2099 return;
2100
2101 debugfs_remove_recursive(rxq->mana_rx_debugfs);
2102
2103 napi = &rxq->rx_cq.napi;
2104
2105 if (napi_initialized) {
2106 napi_synchronize(napi);
2107
2108 napi_disable(napi);
2109
2110 netif_napi_del(napi);
2111 }
2112 xdp_rxq_info_unreg(&rxq->xdp_rxq);
2113
2114 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2115
2116 mana_deinit_cq(apc, &rxq->rx_cq);
2117
2118 if (rxq->xdp_save_va)
2119 put_page(virt_to_head_page(rxq->xdp_save_va));
2120
2121 for (i = 0; i < rxq->num_rx_buf; i++) {
2122 rx_oob = &rxq->rx_oobs[i];
2123
2124 if (!rx_oob->buf_va)
2125 continue;
2126
2127 dma_unmap_single(dev, rx_oob->sgl[0].address,
2128 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2129
2130 page = virt_to_head_page(rx_oob->buf_va);
2131
2132 if (rx_oob->from_pool)
2133 page_pool_put_full_page(rxq->page_pool, page, false);
2134 else
2135 put_page(page);
2136
2137 rx_oob->buf_va = NULL;
2138 }
2139
2140 page_pool_destroy(rxq->page_pool);
2141
2142 if (rxq->gdma_rq)
2143 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2144
2145 kfree(rxq);
2146}
2147
2148static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2149 struct mana_rxq *rxq, struct device *dev)
2150{
2151 struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2152 bool from_pool = false;
2153 dma_addr_t da;
2154 void *va;
2155
2156 if (mpc->rxbufs_pre)
2157 va = mana_get_rxbuf_pre(rxq, &da);
2158 else
2159 va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2160
2161 if (!va)
2162 return -ENOMEM;
2163
2164 rx_oob->buf_va = va;
2165 rx_oob->from_pool = from_pool;
2166
2167 rx_oob->sgl[0].address = da;
2168 rx_oob->sgl[0].size = rxq->datasize;
2169 rx_oob->sgl[0].mem_key = mem_key;
2170
2171 return 0;
2172}
2173
2174#define MANA_WQE_HEADER_SIZE 16
2175#define MANA_WQE_SGE_SIZE 16
2176
2177static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2178 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2179{
2180 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2181 struct mana_recv_buf_oob *rx_oob;
2182 struct device *dev = gc->dev;
2183 u32 buf_idx;
2184 int ret;
2185
2186 WARN_ON(rxq->datasize == 0);
2187
2188 *rxq_size = 0;
2189 *cq_size = 0;
2190
2191 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2192 rx_oob = &rxq->rx_oobs[buf_idx];
2193 memset(rx_oob, 0, sizeof(*rx_oob));
2194
2195 rx_oob->num_sge = 1;
2196
2197 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2198 dev);
2199 if (ret)
2200 return ret;
2201
2202 rx_oob->wqe_req.sgl = rx_oob->sgl;
2203 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2204 rx_oob->wqe_req.inline_oob_size = 0;
2205 rx_oob->wqe_req.inline_oob_data = NULL;
2206 rx_oob->wqe_req.flags = 0;
2207 rx_oob->wqe_req.client_data_unit = 0;
2208
2209 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2210 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2211 *cq_size += COMP_ENTRY_SIZE;
2212 }
2213
2214 return 0;
2215}
2216
2217static int mana_push_wqe(struct mana_rxq *rxq)
2218{
2219 struct mana_recv_buf_oob *rx_oob;
2220 u32 buf_idx;
2221 int err;
2222
2223 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2224 rx_oob = &rxq->rx_oobs[buf_idx];
2225
2226 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2227 &rx_oob->wqe_inf);
2228 if (err)
2229 return -ENOSPC;
2230 }
2231
2232 return 0;
2233}
2234
2235static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2236{
2237 struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2238 struct page_pool_params pprm = {};
2239 int ret;
2240
2241 pprm.pool_size = mpc->rx_queue_size;
2242 pprm.nid = gc->numa_node;
2243 pprm.napi = &rxq->rx_cq.napi;
2244 pprm.netdev = rxq->ndev;
2245
2246 rxq->page_pool = page_pool_create(&pprm);
2247
2248 if (IS_ERR(rxq->page_pool)) {
2249 ret = PTR_ERR(rxq->page_pool);
2250 rxq->page_pool = NULL;
2251 return ret;
2252 }
2253
2254 return 0;
2255}
2256
2257static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2258 u32 rxq_idx, struct mana_eq *eq,
2259 struct net_device *ndev)
2260{
2261 struct gdma_dev *gd = apc->ac->gdma_dev;
2262 struct mana_obj_spec wq_spec;
2263 struct mana_obj_spec cq_spec;
2264 struct gdma_queue_spec spec;
2265 struct mana_cq *cq = NULL;
2266 struct gdma_context *gc;
2267 u32 cq_size, rq_size;
2268 struct mana_rxq *rxq;
2269 int err;
2270
2271 gc = gd->gdma_context;
2272
2273 rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
2274 GFP_KERNEL);
2275 if (!rxq)
2276 return NULL;
2277
2278 rxq->ndev = ndev;
2279 rxq->num_rx_buf = apc->rx_queue_size;
2280 rxq->rxq_idx = rxq_idx;
2281 rxq->rxobj = INVALID_MANA_HANDLE;
2282
2283 mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2284 &rxq->headroom);
2285
2286 /* Create page pool for RX queue */
2287 err = mana_create_page_pool(rxq, gc);
2288 if (err) {
2289 netdev_err(ndev, "Create page pool err:%d\n", err);
2290 goto out;
2291 }
2292
2293 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2294 if (err)
2295 goto out;
2296
2297 rq_size = MANA_PAGE_ALIGN(rq_size);
2298 cq_size = MANA_PAGE_ALIGN(cq_size);
2299
2300 /* Create RQ */
2301 memset(&spec, 0, sizeof(spec));
2302 spec.type = GDMA_RQ;
2303 spec.monitor_avl_buf = true;
2304 spec.queue_size = rq_size;
2305 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2306 if (err)
2307 goto out;
2308
2309 /* Create RQ's CQ */
2310 cq = &rxq->rx_cq;
2311 cq->type = MANA_CQ_TYPE_RX;
2312 cq->rxq = rxq;
2313
2314 memset(&spec, 0, sizeof(spec));
2315 spec.type = GDMA_CQ;
2316 spec.monitor_avl_buf = false;
2317 spec.queue_size = cq_size;
2318 spec.cq.callback = mana_schedule_napi;
2319 spec.cq.parent_eq = eq->eq;
2320 spec.cq.context = cq;
2321 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2322 if (err)
2323 goto out;
2324
2325 memset(&wq_spec, 0, sizeof(wq_spec));
2326 memset(&cq_spec, 0, sizeof(cq_spec));
2327 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2328 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2329
2330 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2331 cq_spec.queue_size = cq->gdma_cq->queue_size;
2332 cq_spec.modr_ctx_id = 0;
2333 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2334
2335 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2336 &wq_spec, &cq_spec, &rxq->rxobj);
2337 if (err)
2338 goto out;
2339
2340 rxq->gdma_rq->id = wq_spec.queue_index;
2341 cq->gdma_cq->id = cq_spec.queue_index;
2342
2343 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2344 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2345
2346 rxq->gdma_id = rxq->gdma_rq->id;
2347 cq->gdma_id = cq->gdma_cq->id;
2348
2349 err = mana_push_wqe(rxq);
2350 if (err)
2351 goto out;
2352
2353 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2354 err = -EINVAL;
2355 goto out;
2356 }
2357
2358 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2359
2360 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2361
2362 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2363 cq->napi.napi_id));
2364 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2365 rxq->page_pool));
2366
2367 napi_enable(&cq->napi);
2368
2369 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2370out:
2371 if (!err)
2372 return rxq;
2373
2374 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2375
2376 mana_destroy_rxq(apc, rxq, false);
2377
2378 if (cq)
2379 mana_deinit_cq(apc, cq);
2380
2381 return NULL;
2382}
2383
2384static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2385{
2386 struct mana_rxq *rxq;
2387 char qnum[32];
2388
2389 rxq = apc->rxqs[idx];
2390
2391 sprintf(qnum, "RX-%d", idx);
2392 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2393 debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
2394 debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
2395 debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
2396 debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
2397 &rxq->rx_cq.gdma_cq->head);
2398 debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
2399 &rxq->rx_cq.gdma_cq->tail);
2400 debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
2401 debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
2402 debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
2403 &mana_dbg_q_fops);
2404}
2405
2406static int mana_add_rx_queues(struct mana_port_context *apc,
2407 struct net_device *ndev)
2408{
2409 struct mana_context *ac = apc->ac;
2410 struct mana_rxq *rxq;
2411 int err = 0;
2412 int i;
2413
2414 for (i = 0; i < apc->num_queues; i++) {
2415 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2416 if (!rxq) {
2417 err = -ENOMEM;
2418 goto out;
2419 }
2420
2421 u64_stats_init(&rxq->stats.syncp);
2422
2423 apc->rxqs[i] = rxq;
2424
2425 mana_create_rxq_debugfs(apc, i);
2426 }
2427
2428 apc->default_rxobj = apc->rxqs[0]->rxobj;
2429out:
2430 return err;
2431}
2432
2433static void mana_destroy_vport(struct mana_port_context *apc)
2434{
2435 struct gdma_dev *gd = apc->ac->gdma_dev;
2436 struct mana_rxq *rxq;
2437 u32 rxq_idx;
2438
2439 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2440 rxq = apc->rxqs[rxq_idx];
2441 if (!rxq)
2442 continue;
2443
2444 mana_destroy_rxq(apc, rxq, true);
2445 apc->rxqs[rxq_idx] = NULL;
2446 }
2447
2448 mana_destroy_txq(apc);
2449 mana_uncfg_vport(apc);
2450
2451 if (gd->gdma_context->is_pf)
2452 mana_pf_deregister_hw_vport(apc);
2453}
2454
2455static int mana_create_vport(struct mana_port_context *apc,
2456 struct net_device *net)
2457{
2458 struct gdma_dev *gd = apc->ac->gdma_dev;
2459 int err;
2460
2461 apc->default_rxobj = INVALID_MANA_HANDLE;
2462
2463 if (gd->gdma_context->is_pf) {
2464 err = mana_pf_register_hw_vport(apc);
2465 if (err)
2466 return err;
2467 }
2468
2469 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2470 if (err)
2471 return err;
2472
2473 return mana_create_txq(apc, net);
2474}
2475
2476static int mana_rss_table_alloc(struct mana_port_context *apc)
2477{
2478 if (!apc->indir_table_sz) {
2479 netdev_err(apc->ndev,
2480 "Indirection table size not set for vPort %d\n",
2481 apc->port_idx);
2482 return -EINVAL;
2483 }
2484
2485 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2486 if (!apc->indir_table)
2487 return -ENOMEM;
2488
2489 apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
2490 if (!apc->rxobj_table) {
2491 kfree(apc->indir_table);
2492 return -ENOMEM;
2493 }
2494
2495 return 0;
2496}
2497
2498static void mana_rss_table_init(struct mana_port_context *apc)
2499{
2500 int i;
2501
2502 for (i = 0; i < apc->indir_table_sz; i++)
2503 apc->indir_table[i] =
2504 ethtool_rxfh_indir_default(i, apc->num_queues);
2505}
2506
2507int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2508 bool update_hash, bool update_tab)
2509{
2510 u32 queue_idx;
2511 int err;
2512 int i;
2513
2514 if (update_tab) {
2515 for (i = 0; i < apc->indir_table_sz; i++) {
2516 queue_idx = apc->indir_table[i];
2517 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2518 }
2519 }
2520
2521 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2522 if (err)
2523 return err;
2524
2525 mana_fence_rqs(apc);
2526
2527 return 0;
2528}
2529
2530void mana_query_gf_stats(struct mana_port_context *apc)
2531{
2532 struct mana_query_gf_stat_resp resp = {};
2533 struct mana_query_gf_stat_req req = {};
2534 struct net_device *ndev = apc->ndev;
2535 int err;
2536
2537 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2538 sizeof(req), sizeof(resp));
2539 req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
2540 req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2541 STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2542 STATISTICS_FLAGS_HC_RX_BYTES |
2543 STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2544 STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2545 STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2546 STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2547 STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2548 STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2549 STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2550 STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2551 STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2552 STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2553 STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2554 STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2555 STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2556 STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2557 STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2558 STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2559 STATISTICS_FLAGS_HC_TX_BYTES |
2560 STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2561 STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2562 STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2563 STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2564 STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2565 STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2566 STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2567
2568 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2569 sizeof(resp));
2570 if (err) {
2571 netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2572 return;
2573 }
2574 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2575 sizeof(resp));
2576 if (err || resp.hdr.status) {
2577 netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2578 resp.hdr.status);
2579 return;
2580 }
2581
2582 apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2583 apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2584 apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
2585 apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2586 apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2587 apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2588 apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2589 apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2590 apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2591 apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2592 apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2593 apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
2594 resp.tx_err_inval_vport_offset_pkt;
2595 apc->eth_stats.hc_tx_err_vlan_enforcement =
2596 resp.tx_err_vlan_enforcement;
2597 apc->eth_stats.hc_tx_err_eth_type_enforcement =
2598 resp.tx_err_ethtype_enforcement;
2599 apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2600 apc->eth_stats.hc_tx_err_sqpdid_enforcement =
2601 resp.tx_err_SQPDID_enforcement;
2602 apc->eth_stats.hc_tx_err_cqpdid_enforcement =
2603 resp.tx_err_CQPDID_enforcement;
2604 apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2605 apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2606 apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2607 apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2608 apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2609 apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2610 apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2611 apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2612 apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2613 apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
2614}
2615
2616static int mana_init_port(struct net_device *ndev)
2617{
2618 struct mana_port_context *apc = netdev_priv(ndev);
2619 struct gdma_dev *gd = apc->ac->gdma_dev;
2620 u32 max_txq, max_rxq, max_queues;
2621 int port_idx = apc->port_idx;
2622 struct gdma_context *gc;
2623 char vport[32];
2624 int err;
2625
2626 err = mana_init_port_context(apc);
2627 if (err)
2628 return err;
2629
2630 gc = gd->gdma_context;
2631
2632 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2633 &apc->indir_table_sz);
2634 if (err) {
2635 netdev_err(ndev, "Failed to query info for vPort %d\n",
2636 port_idx);
2637 goto reset_apc;
2638 }
2639
2640 max_queues = min_t(u32, max_txq, max_rxq);
2641 if (apc->max_queues > max_queues)
2642 apc->max_queues = max_queues;
2643
2644 if (apc->num_queues > apc->max_queues)
2645 apc->num_queues = apc->max_queues;
2646
2647 eth_hw_addr_set(ndev, apc->mac_addr);
2648 sprintf(vport, "vport%d", port_idx);
2649 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
2650 return 0;
2651
2652reset_apc:
2653 mana_cleanup_port_context(apc);
2654 return err;
2655}
2656
2657int mana_alloc_queues(struct net_device *ndev)
2658{
2659 struct mana_port_context *apc = netdev_priv(ndev);
2660 struct gdma_dev *gd = apc->ac->gdma_dev;
2661 int err;
2662
2663 err = mana_create_vport(apc, ndev);
2664 if (err)
2665 return err;
2666
2667 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2668 if (err)
2669 goto destroy_vport;
2670
2671 err = mana_add_rx_queues(apc, ndev);
2672 if (err)
2673 goto destroy_vport;
2674
2675 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2676
2677 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2678 if (err)
2679 goto destroy_vport;
2680
2681 mana_rss_table_init(apc);
2682
2683 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2684 if (err)
2685 goto destroy_vport;
2686
2687 if (gd->gdma_context->is_pf) {
2688 err = mana_pf_register_filter(apc);
2689 if (err)
2690 goto destroy_vport;
2691 }
2692
2693 mana_chn_setxdp(apc, mana_xdp_get(apc));
2694
2695 return 0;
2696
2697destroy_vport:
2698 mana_destroy_vport(apc);
2699 return err;
2700}
2701
2702int mana_attach(struct net_device *ndev)
2703{
2704 struct mana_port_context *apc = netdev_priv(ndev);
2705 int err;
2706
2707 ASSERT_RTNL();
2708
2709 err = mana_init_port(ndev);
2710 if (err)
2711 return err;
2712
2713 if (apc->port_st_save) {
2714 err = mana_alloc_queues(ndev);
2715 if (err) {
2716 mana_cleanup_port_context(apc);
2717 return err;
2718 }
2719 }
2720
2721 apc->port_is_up = apc->port_st_save;
2722
2723 /* Ensure port state updated before txq state */
2724 smp_wmb();
2725
2726 if (apc->port_is_up)
2727 netif_carrier_on(ndev);
2728
2729 netif_device_attach(ndev);
2730
2731 return 0;
2732}
2733
2734static int mana_dealloc_queues(struct net_device *ndev)
2735{
2736 struct mana_port_context *apc = netdev_priv(ndev);
2737 unsigned long timeout = jiffies + 120 * HZ;
2738 struct gdma_dev *gd = apc->ac->gdma_dev;
2739 struct mana_txq *txq;
2740 struct sk_buff *skb;
2741 int i, err;
2742 u32 tsleep;
2743
2744 if (apc->port_is_up)
2745 return -EINVAL;
2746
2747 mana_chn_setxdp(apc, NULL);
2748
2749 if (gd->gdma_context->is_pf)
2750 mana_pf_deregister_filter(apc);
2751
2752 /* No packet can be transmitted now since apc->port_is_up is false.
2753 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2754 * a txq because it may not timely see apc->port_is_up being cleared
2755 * to false, but it doesn't matter since mana_start_xmit() drops any
2756 * new packets due to apc->port_is_up being false.
2757 *
2758 * Drain all the in-flight TX packets.
2759 * A timeout of 120 seconds for all the queues is used.
2760 * This will break the while loop when h/w is not responding.
2761 * This value of 120 has been decided here considering max
2762 * number of queues.
2763 */
2764
2765 for (i = 0; i < apc->num_queues; i++) {
2766 txq = &apc->tx_qp[i].txq;
2767 tsleep = 1000;
2768 while (atomic_read(&txq->pending_sends) > 0 &&
2769 time_before(jiffies, timeout)) {
2770 usleep_range(tsleep, tsleep + 1000);
2771 tsleep <<= 1;
2772 }
2773 if (atomic_read(&txq->pending_sends)) {
2774 err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2775 if (err) {
2776 netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2777 err, atomic_read(&txq->pending_sends),
2778 txq->gdma_txq_id);
2779 }
2780 break;
2781 }
2782 }
2783
2784 for (i = 0; i < apc->num_queues; i++) {
2785 txq = &apc->tx_qp[i].txq;
2786 while ((skb = skb_dequeue(&txq->pending_skbs))) {
2787 mana_unmap_skb(skb, apc);
2788 dev_kfree_skb_any(skb);
2789 }
2790 atomic_set(&txq->pending_sends, 0);
2791 }
2792 /* We're 100% sure the queues can no longer be woken up, because
2793 * we're sure now mana_poll_tx_cq() can't be running.
2794 */
2795
2796 apc->rss_state = TRI_STATE_FALSE;
2797 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2798 if (err) {
2799 netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2800 return err;
2801 }
2802
2803 mana_destroy_vport(apc);
2804
2805 return 0;
2806}
2807
2808int mana_detach(struct net_device *ndev, bool from_close)
2809{
2810 struct mana_port_context *apc = netdev_priv(ndev);
2811 int err;
2812
2813 ASSERT_RTNL();
2814
2815 apc->port_st_save = apc->port_is_up;
2816 apc->port_is_up = false;
2817
2818 /* Ensure port state updated before txq state */
2819 smp_wmb();
2820
2821 netif_tx_disable(ndev);
2822 netif_carrier_off(ndev);
2823
2824 if (apc->port_st_save) {
2825 err = mana_dealloc_queues(ndev);
2826 if (err)
2827 return err;
2828 }
2829
2830 if (!from_close) {
2831 netif_device_detach(ndev);
2832 mana_cleanup_port_context(apc);
2833 }
2834
2835 return 0;
2836}
2837
2838static int mana_probe_port(struct mana_context *ac, int port_idx,
2839 struct net_device **ndev_storage)
2840{
2841 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2842 struct mana_port_context *apc;
2843 struct net_device *ndev;
2844 int err;
2845
2846 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2847 gc->max_num_queues);
2848 if (!ndev)
2849 return -ENOMEM;
2850
2851 *ndev_storage = ndev;
2852
2853 apc = netdev_priv(ndev);
2854 apc->ac = ac;
2855 apc->ndev = ndev;
2856 apc->max_queues = gc->max_num_queues;
2857 apc->num_queues = gc->max_num_queues;
2858 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
2859 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
2860 apc->port_handle = INVALID_MANA_HANDLE;
2861 apc->pf_filter_handle = INVALID_MANA_HANDLE;
2862 apc->port_idx = port_idx;
2863
2864 mutex_init(&apc->vport_mutex);
2865 apc->vport_use_count = 0;
2866
2867 ndev->netdev_ops = &mana_devops;
2868 ndev->ethtool_ops = &mana_ethtool_ops;
2869 ndev->mtu = ETH_DATA_LEN;
2870 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2871 ndev->min_mtu = ETH_MIN_MTU;
2872 ndev->needed_headroom = MANA_HEADROOM;
2873 ndev->dev_port = port_idx;
2874 SET_NETDEV_DEV(ndev, gc->dev);
2875
2876 netif_carrier_off(ndev);
2877
2878 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2879
2880 err = mana_init_port(ndev);
2881 if (err)
2882 goto free_net;
2883
2884 err = mana_rss_table_alloc(apc);
2885 if (err)
2886 goto reset_apc;
2887
2888 netdev_lockdep_set_classes(ndev);
2889
2890 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2891 ndev->hw_features |= NETIF_F_RXCSUM;
2892 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2893 ndev->hw_features |= NETIF_F_RXHASH;
2894 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2895 NETIF_F_HW_VLAN_CTAG_RX;
2896 ndev->vlan_features = ndev->features;
2897 xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
2898 NETDEV_XDP_ACT_REDIRECT |
2899 NETDEV_XDP_ACT_NDO_XMIT);
2900
2901 err = register_netdev(ndev);
2902 if (err) {
2903 netdev_err(ndev, "Unable to register netdev.\n");
2904 goto free_indir;
2905 }
2906
2907 return 0;
2908
2909free_indir:
2910 mana_cleanup_indir_table(apc);
2911reset_apc:
2912 mana_cleanup_port_context(apc);
2913free_net:
2914 *ndev_storage = NULL;
2915 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2916 free_netdev(ndev);
2917 return err;
2918}
2919
2920static void adev_release(struct device *dev)
2921{
2922 struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2923
2924 kfree(madev);
2925}
2926
2927static void remove_adev(struct gdma_dev *gd)
2928{
2929 struct auxiliary_device *adev = gd->adev;
2930 int id = adev->id;
2931
2932 auxiliary_device_delete(adev);
2933 auxiliary_device_uninit(adev);
2934
2935 mana_adev_idx_free(id);
2936 gd->adev = NULL;
2937}
2938
2939static int add_adev(struct gdma_dev *gd)
2940{
2941 struct auxiliary_device *adev;
2942 struct mana_adev *madev;
2943 int ret;
2944
2945 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2946 if (!madev)
2947 return -ENOMEM;
2948
2949 adev = &madev->adev;
2950 ret = mana_adev_idx_alloc();
2951 if (ret < 0)
2952 goto idx_fail;
2953 adev->id = ret;
2954
2955 adev->name = "rdma";
2956 adev->dev.parent = gd->gdma_context->dev;
2957 adev->dev.release = adev_release;
2958 madev->mdev = gd;
2959
2960 ret = auxiliary_device_init(adev);
2961 if (ret)
2962 goto init_fail;
2963
2964 /* madev is owned by the auxiliary device */
2965 madev = NULL;
2966 ret = auxiliary_device_add(adev);
2967 if (ret)
2968 goto add_fail;
2969
2970 gd->adev = adev;
2971 return 0;
2972
2973add_fail:
2974 auxiliary_device_uninit(adev);
2975
2976init_fail:
2977 mana_adev_idx_free(adev->id);
2978
2979idx_fail:
2980 kfree(madev);
2981
2982 return ret;
2983}
2984
2985int mana_probe(struct gdma_dev *gd, bool resuming)
2986{
2987 struct gdma_context *gc = gd->gdma_context;
2988 struct mana_context *ac = gd->driver_data;
2989 struct device *dev = gc->dev;
2990 u16 num_ports = 0;
2991 int err;
2992 int i;
2993
2994 dev_info(dev,
2995 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2996 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2997
2998 err = mana_gd_register_device(gd);
2999 if (err)
3000 return err;
3001
3002 if (!resuming) {
3003 ac = kzalloc(sizeof(*ac), GFP_KERNEL);
3004 if (!ac)
3005 return -ENOMEM;
3006
3007 ac->gdma_dev = gd;
3008 gd->driver_data = ac;
3009 }
3010
3011 err = mana_create_eq(ac);
3012 if (err)
3013 goto out;
3014
3015 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3016 MANA_MICRO_VERSION, &num_ports);
3017 if (err)
3018 goto out;
3019
3020 if (!resuming) {
3021 ac->num_ports = num_ports;
3022 } else {
3023 if (ac->num_ports != num_ports) {
3024 dev_err(dev, "The number of vPorts changed: %d->%d\n",
3025 ac->num_ports, num_ports);
3026 err = -EPROTO;
3027 goto out;
3028 }
3029 }
3030
3031 if (ac->num_ports == 0)
3032 dev_err(dev, "Failed to detect any vPort\n");
3033
3034 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3035 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3036
3037 if (!resuming) {
3038 for (i = 0; i < ac->num_ports; i++) {
3039 err = mana_probe_port(ac, i, &ac->ports[i]);
3040 /* we log the port for which the probe failed and stop
3041 * probes for subsequent ports.
3042 * Note that we keep running ports, for which the probes
3043 * were successful, unless add_adev fails too
3044 */
3045 if (err) {
3046 dev_err(dev, "Probe Failed for port %d\n", i);
3047 break;
3048 }
3049 }
3050 } else {
3051 for (i = 0; i < ac->num_ports; i++) {
3052 rtnl_lock();
3053 err = mana_attach(ac->ports[i]);
3054 rtnl_unlock();
3055 /* we log the port for which the attach failed and stop
3056 * attach for subsequent ports
3057 * Note that we keep running ports, for which the attach
3058 * were successful, unless add_adev fails too
3059 */
3060 if (err) {
3061 dev_err(dev, "Attach Failed for port %d\n", i);
3062 break;
3063 }
3064 }
3065 }
3066
3067 err = add_adev(gd);
3068out:
3069 if (err)
3070 mana_remove(gd, false);
3071
3072 return err;
3073}
3074
3075void mana_remove(struct gdma_dev *gd, bool suspending)
3076{
3077 struct gdma_context *gc = gd->gdma_context;
3078 struct mana_context *ac = gd->driver_data;
3079 struct mana_port_context *apc;
3080 struct device *dev = gc->dev;
3081 struct net_device *ndev;
3082 int err;
3083 int i;
3084
3085 /* adev currently doesn't support suspending, always remove it */
3086 if (gd->adev)
3087 remove_adev(gd);
3088
3089 for (i = 0; i < ac->num_ports; i++) {
3090 ndev = ac->ports[i];
3091 apc = netdev_priv(ndev);
3092 if (!ndev) {
3093 if (i == 0)
3094 dev_err(dev, "No net device to remove\n");
3095 goto out;
3096 }
3097
3098 /* All cleanup actions should stay after rtnl_lock(), otherwise
3099 * other functions may access partially cleaned up data.
3100 */
3101 rtnl_lock();
3102
3103 err = mana_detach(ndev, false);
3104 if (err)
3105 netdev_err(ndev, "Failed to detach vPort %d: %d\n",
3106 i, err);
3107
3108 if (suspending) {
3109 /* No need to unregister the ndev. */
3110 rtnl_unlock();
3111 continue;
3112 }
3113
3114 unregister_netdevice(ndev);
3115 mana_cleanup_indir_table(apc);
3116
3117 rtnl_unlock();
3118
3119 free_netdev(ndev);
3120 }
3121
3122 mana_destroy_eq(ac);
3123out:
3124 mana_gd_deregister_device(gd);
3125
3126 if (suspending)
3127 return;
3128
3129 gd->driver_data = NULL;
3130 gd->gdma_context = NULL;
3131 kfree(ac);
3132}
3133
3134struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
3135{
3136 struct net_device *ndev;
3137
3138 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
3139 "Taking primary netdev without holding the RCU read lock");
3140 if (port_index >= ac->num_ports)
3141 return NULL;
3142
3143 /* When mana is used in netvsc, the upper netdevice should be returned. */
3144 if (ac->ports[port_index]->flags & IFF_SLAVE)
3145 ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
3146 else
3147 ndev = ac->ports[port_index];
3148
3149 return ndev;
3150}
3151EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, "NET_MANA");
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#include <uapi/linux/bpf.h>
5
6#include <linux/inetdevice.h>
7#include <linux/etherdevice.h>
8#include <linux/ethtool.h>
9#include <linux/filter.h>
10#include <linux/mm.h>
11
12#include <net/checksum.h>
13#include <net/ip6_checksum.h>
14
15#include <net/mana/mana.h>
16#include <net/mana/mana_auxiliary.h>
17
18static DEFINE_IDA(mana_adev_ida);
19
20static int mana_adev_idx_alloc(void)
21{
22 return ida_alloc(&mana_adev_ida, GFP_KERNEL);
23}
24
25static void mana_adev_idx_free(int idx)
26{
27 ida_free(&mana_adev_ida, idx);
28}
29
30/* Microsoft Azure Network Adapter (MANA) functions */
31
32static int mana_open(struct net_device *ndev)
33{
34 struct mana_port_context *apc = netdev_priv(ndev);
35 int err;
36
37 err = mana_alloc_queues(ndev);
38 if (err)
39 return err;
40
41 apc->port_is_up = true;
42
43 /* Ensure port state updated before txq state */
44 smp_wmb();
45
46 netif_carrier_on(ndev);
47 netif_tx_wake_all_queues(ndev);
48
49 return 0;
50}
51
52static int mana_close(struct net_device *ndev)
53{
54 struct mana_port_context *apc = netdev_priv(ndev);
55
56 if (!apc->port_is_up)
57 return 0;
58
59 return mana_detach(ndev, true);
60}
61
62static bool mana_can_tx(struct gdma_queue *wq)
63{
64 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
65}
66
67static unsigned int mana_checksum_info(struct sk_buff *skb)
68{
69 if (skb->protocol == htons(ETH_P_IP)) {
70 struct iphdr *ip = ip_hdr(skb);
71
72 if (ip->protocol == IPPROTO_TCP)
73 return IPPROTO_TCP;
74
75 if (ip->protocol == IPPROTO_UDP)
76 return IPPROTO_UDP;
77 } else if (skb->protocol == htons(ETH_P_IPV6)) {
78 struct ipv6hdr *ip6 = ipv6_hdr(skb);
79
80 if (ip6->nexthdr == IPPROTO_TCP)
81 return IPPROTO_TCP;
82
83 if (ip6->nexthdr == IPPROTO_UDP)
84 return IPPROTO_UDP;
85 }
86
87 /* No csum offloading */
88 return 0;
89}
90
91static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
92 struct mana_tx_package *tp)
93{
94 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
95 struct gdma_dev *gd = apc->ac->gdma_dev;
96 struct gdma_context *gc;
97 struct device *dev;
98 skb_frag_t *frag;
99 dma_addr_t da;
100 int i;
101
102 gc = gd->gdma_context;
103 dev = gc->dev;
104 da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
105
106 if (dma_mapping_error(dev, da))
107 return -ENOMEM;
108
109 ash->dma_handle[0] = da;
110 ash->size[0] = skb_headlen(skb);
111
112 tp->wqe_req.sgl[0].address = ash->dma_handle[0];
113 tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
114 tp->wqe_req.sgl[0].size = ash->size[0];
115
116 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
117 frag = &skb_shinfo(skb)->frags[i];
118 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
119 DMA_TO_DEVICE);
120
121 if (dma_mapping_error(dev, da))
122 goto frag_err;
123
124 ash->dma_handle[i + 1] = da;
125 ash->size[i + 1] = skb_frag_size(frag);
126
127 tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
128 tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
129 tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
130 }
131
132 return 0;
133
134frag_err:
135 for (i = i - 1; i >= 0; i--)
136 dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
137 DMA_TO_DEVICE);
138
139 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
140
141 return -ENOMEM;
142}
143
144netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
145{
146 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
147 struct mana_port_context *apc = netdev_priv(ndev);
148 u16 txq_idx = skb_get_queue_mapping(skb);
149 struct gdma_dev *gd = apc->ac->gdma_dev;
150 bool ipv4 = false, ipv6 = false;
151 struct mana_tx_package pkg = {};
152 struct netdev_queue *net_txq;
153 struct mana_stats_tx *tx_stats;
154 struct gdma_queue *gdma_sq;
155 unsigned int csum_type;
156 struct mana_txq *txq;
157 struct mana_cq *cq;
158 int err, len;
159
160 if (unlikely(!apc->port_is_up))
161 goto tx_drop;
162
163 if (skb_cow_head(skb, MANA_HEADROOM))
164 goto tx_drop_count;
165
166 txq = &apc->tx_qp[txq_idx].txq;
167 gdma_sq = txq->gdma_sq;
168 cq = &apc->tx_qp[txq_idx].tx_cq;
169
170 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
171 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
172
173 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
174 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
175 pkt_fmt = MANA_LONG_PKT_FMT;
176 } else {
177 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
178 }
179
180 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
181
182 if (pkt_fmt == MANA_SHORT_PKT_FMT)
183 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
184 else
185 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
186
187 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
188 pkg.wqe_req.flags = 0;
189 pkg.wqe_req.client_data_unit = 0;
190
191 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
192 WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
193
194 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
195 pkg.wqe_req.sgl = pkg.sgl_array;
196 } else {
197 pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
198 sizeof(struct gdma_sge),
199 GFP_ATOMIC);
200 if (!pkg.sgl_ptr)
201 goto tx_drop_count;
202
203 pkg.wqe_req.sgl = pkg.sgl_ptr;
204 }
205
206 if (skb->protocol == htons(ETH_P_IP))
207 ipv4 = true;
208 else if (skb->protocol == htons(ETH_P_IPV6))
209 ipv6 = true;
210
211 if (skb_is_gso(skb)) {
212 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
213 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
214
215 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
216 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
217 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
218
219 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
220 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
221 if (ipv4) {
222 ip_hdr(skb)->tot_len = 0;
223 ip_hdr(skb)->check = 0;
224 tcp_hdr(skb)->check =
225 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
226 ip_hdr(skb)->daddr, 0,
227 IPPROTO_TCP, 0);
228 } else {
229 ipv6_hdr(skb)->payload_len = 0;
230 tcp_hdr(skb)->check =
231 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
232 &ipv6_hdr(skb)->daddr, 0,
233 IPPROTO_TCP, 0);
234 }
235 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
236 csum_type = mana_checksum_info(skb);
237
238 if (csum_type == IPPROTO_TCP) {
239 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
240 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
241
242 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
243 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
244
245 } else if (csum_type == IPPROTO_UDP) {
246 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
247 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
248
249 pkg.tx_oob.s_oob.comp_udp_csum = 1;
250 } else {
251 /* Can't do offload of this type of checksum */
252 if (skb_checksum_help(skb))
253 goto free_sgl_ptr;
254 }
255 }
256
257 if (mana_map_skb(skb, apc, &pkg))
258 goto free_sgl_ptr;
259
260 skb_queue_tail(&txq->pending_skbs, skb);
261
262 len = skb->len;
263 net_txq = netdev_get_tx_queue(ndev, txq_idx);
264
265 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
266 (struct gdma_posted_wqe_info *)skb->cb);
267 if (!mana_can_tx(gdma_sq)) {
268 netif_tx_stop_queue(net_txq);
269 apc->eth_stats.stop_queue++;
270 }
271
272 if (err) {
273 (void)skb_dequeue_tail(&txq->pending_skbs);
274 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
275 err = NETDEV_TX_BUSY;
276 goto tx_busy;
277 }
278
279 err = NETDEV_TX_OK;
280 atomic_inc(&txq->pending_sends);
281
282 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
283
284 /* skb may be freed after mana_gd_post_work_request. Do not use it. */
285 skb = NULL;
286
287 tx_stats = &txq->stats;
288 u64_stats_update_begin(&tx_stats->syncp);
289 tx_stats->packets++;
290 tx_stats->bytes += len;
291 u64_stats_update_end(&tx_stats->syncp);
292
293tx_busy:
294 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
295 netif_tx_wake_queue(net_txq);
296 apc->eth_stats.wake_queue++;
297 }
298
299 kfree(pkg.sgl_ptr);
300 return err;
301
302free_sgl_ptr:
303 kfree(pkg.sgl_ptr);
304tx_drop_count:
305 ndev->stats.tx_dropped++;
306tx_drop:
307 dev_kfree_skb_any(skb);
308 return NETDEV_TX_OK;
309}
310
311static void mana_get_stats64(struct net_device *ndev,
312 struct rtnl_link_stats64 *st)
313{
314 struct mana_port_context *apc = netdev_priv(ndev);
315 unsigned int num_queues = apc->num_queues;
316 struct mana_stats_rx *rx_stats;
317 struct mana_stats_tx *tx_stats;
318 unsigned int start;
319 u64 packets, bytes;
320 int q;
321
322 if (!apc->port_is_up)
323 return;
324
325 netdev_stats_to_stats64(st, &ndev->stats);
326
327 for (q = 0; q < num_queues; q++) {
328 rx_stats = &apc->rxqs[q]->stats;
329
330 do {
331 start = u64_stats_fetch_begin(&rx_stats->syncp);
332 packets = rx_stats->packets;
333 bytes = rx_stats->bytes;
334 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
335
336 st->rx_packets += packets;
337 st->rx_bytes += bytes;
338 }
339
340 for (q = 0; q < num_queues; q++) {
341 tx_stats = &apc->tx_qp[q].txq.stats;
342
343 do {
344 start = u64_stats_fetch_begin(&tx_stats->syncp);
345 packets = tx_stats->packets;
346 bytes = tx_stats->bytes;
347 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
348
349 st->tx_packets += packets;
350 st->tx_bytes += bytes;
351 }
352}
353
354static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
355 int old_q)
356{
357 struct mana_port_context *apc = netdev_priv(ndev);
358 u32 hash = skb_get_hash(skb);
359 struct sock *sk = skb->sk;
360 int txq;
361
362 txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
363
364 if (txq != old_q && sk && sk_fullsock(sk) &&
365 rcu_access_pointer(sk->sk_dst_cache))
366 sk_tx_queue_set(sk, txq);
367
368 return txq;
369}
370
371static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
372 struct net_device *sb_dev)
373{
374 int txq;
375
376 if (ndev->real_num_tx_queues == 1)
377 return 0;
378
379 txq = sk_tx_queue_get(skb->sk);
380
381 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
382 if (skb_rx_queue_recorded(skb))
383 txq = skb_get_rx_queue(skb);
384 else
385 txq = mana_get_tx_queue(ndev, skb, txq);
386 }
387
388 return txq;
389}
390
391static const struct net_device_ops mana_devops = {
392 .ndo_open = mana_open,
393 .ndo_stop = mana_close,
394 .ndo_select_queue = mana_select_queue,
395 .ndo_start_xmit = mana_start_xmit,
396 .ndo_validate_addr = eth_validate_addr,
397 .ndo_get_stats64 = mana_get_stats64,
398 .ndo_bpf = mana_bpf,
399 .ndo_xdp_xmit = mana_xdp_xmit,
400};
401
402static void mana_cleanup_port_context(struct mana_port_context *apc)
403{
404 kfree(apc->rxqs);
405 apc->rxqs = NULL;
406}
407
408static int mana_init_port_context(struct mana_port_context *apc)
409{
410 apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
411 GFP_KERNEL);
412
413 return !apc->rxqs ? -ENOMEM : 0;
414}
415
416static int mana_send_request(struct mana_context *ac, void *in_buf,
417 u32 in_len, void *out_buf, u32 out_len)
418{
419 struct gdma_context *gc = ac->gdma_dev->gdma_context;
420 struct gdma_resp_hdr *resp = out_buf;
421 struct gdma_req_hdr *req = in_buf;
422 struct device *dev = gc->dev;
423 static atomic_t activity_id;
424 int err;
425
426 req->dev_id = gc->mana.dev_id;
427 req->activity_id = atomic_inc_return(&activity_id);
428
429 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
430 out_buf);
431 if (err || resp->status) {
432 dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
433 err, resp->status);
434 return err ? err : -EPROTO;
435 }
436
437 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
438 req->activity_id != resp->activity_id) {
439 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
440 req->dev_id.as_uint32, resp->dev_id.as_uint32,
441 req->activity_id, resp->activity_id);
442 return -EPROTO;
443 }
444
445 return 0;
446}
447
448static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
449 const enum mana_command_code expected_code,
450 const u32 min_size)
451{
452 if (resp_hdr->response.msg_type != expected_code)
453 return -EPROTO;
454
455 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
456 return -EPROTO;
457
458 if (resp_hdr->response.msg_size < min_size)
459 return -EPROTO;
460
461 return 0;
462}
463
464static int mana_pf_register_hw_vport(struct mana_port_context *apc)
465{
466 struct mana_register_hw_vport_resp resp = {};
467 struct mana_register_hw_vport_req req = {};
468 int err;
469
470 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
471 sizeof(req), sizeof(resp));
472 req.attached_gfid = 1;
473 req.is_pf_default_vport = 1;
474 req.allow_all_ether_types = 1;
475
476 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
477 sizeof(resp));
478 if (err) {
479 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
480 return err;
481 }
482
483 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
484 sizeof(resp));
485 if (err || resp.hdr.status) {
486 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
487 err, resp.hdr.status);
488 return err ? err : -EPROTO;
489 }
490
491 apc->port_handle = resp.hw_vport_handle;
492 return 0;
493}
494
495static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
496{
497 struct mana_deregister_hw_vport_resp resp = {};
498 struct mana_deregister_hw_vport_req req = {};
499 int err;
500
501 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
502 sizeof(req), sizeof(resp));
503 req.hw_vport_handle = apc->port_handle;
504
505 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
506 sizeof(resp));
507 if (err) {
508 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
509 err);
510 return;
511 }
512
513 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
514 sizeof(resp));
515 if (err || resp.hdr.status)
516 netdev_err(apc->ndev,
517 "Failed to deregister hw vPort: %d, 0x%x\n",
518 err, resp.hdr.status);
519}
520
521static int mana_pf_register_filter(struct mana_port_context *apc)
522{
523 struct mana_register_filter_resp resp = {};
524 struct mana_register_filter_req req = {};
525 int err;
526
527 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
528 sizeof(req), sizeof(resp));
529 req.vport = apc->port_handle;
530 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
531
532 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
533 sizeof(resp));
534 if (err) {
535 netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
536 return err;
537 }
538
539 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
540 sizeof(resp));
541 if (err || resp.hdr.status) {
542 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
543 err, resp.hdr.status);
544 return err ? err : -EPROTO;
545 }
546
547 apc->pf_filter_handle = resp.filter_handle;
548 return 0;
549}
550
551static void mana_pf_deregister_filter(struct mana_port_context *apc)
552{
553 struct mana_deregister_filter_resp resp = {};
554 struct mana_deregister_filter_req req = {};
555 int err;
556
557 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
558 sizeof(req), sizeof(resp));
559 req.filter_handle = apc->pf_filter_handle;
560
561 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
562 sizeof(resp));
563 if (err) {
564 netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
565 err);
566 return;
567 }
568
569 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
570 sizeof(resp));
571 if (err || resp.hdr.status)
572 netdev_err(apc->ndev,
573 "Failed to deregister filter: %d, 0x%x\n",
574 err, resp.hdr.status);
575}
576
577static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
578 u32 proto_minor_ver, u32 proto_micro_ver,
579 u16 *max_num_vports)
580{
581 struct gdma_context *gc = ac->gdma_dev->gdma_context;
582 struct mana_query_device_cfg_resp resp = {};
583 struct mana_query_device_cfg_req req = {};
584 struct device *dev = gc->dev;
585 int err = 0;
586
587 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
588 sizeof(req), sizeof(resp));
589 req.proto_major_ver = proto_major_ver;
590 req.proto_minor_ver = proto_minor_ver;
591 req.proto_micro_ver = proto_micro_ver;
592
593 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
594 if (err) {
595 dev_err(dev, "Failed to query config: %d", err);
596 return err;
597 }
598
599 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
600 sizeof(resp));
601 if (err || resp.hdr.status) {
602 dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
603 resp.hdr.status);
604 if (!err)
605 err = -EPROTO;
606 return err;
607 }
608
609 *max_num_vports = resp.max_num_vports;
610
611 return 0;
612}
613
614static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
615 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
616{
617 struct mana_query_vport_cfg_resp resp = {};
618 struct mana_query_vport_cfg_req req = {};
619 int err;
620
621 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
622 sizeof(req), sizeof(resp));
623
624 req.vport_index = vport_index;
625
626 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
627 sizeof(resp));
628 if (err)
629 return err;
630
631 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
632 sizeof(resp));
633 if (err)
634 return err;
635
636 if (resp.hdr.status)
637 return -EPROTO;
638
639 *max_sq = resp.max_num_sq;
640 *max_rq = resp.max_num_rq;
641 *num_indir_entry = resp.num_indirection_ent;
642
643 apc->port_handle = resp.vport;
644 ether_addr_copy(apc->mac_addr, resp.mac_addr);
645
646 return 0;
647}
648
649void mana_uncfg_vport(struct mana_port_context *apc)
650{
651 mutex_lock(&apc->vport_mutex);
652 apc->vport_use_count--;
653 WARN_ON(apc->vport_use_count < 0);
654 mutex_unlock(&apc->vport_mutex);
655}
656EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
657
658int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
659 u32 doorbell_pg_id)
660{
661 struct mana_config_vport_resp resp = {};
662 struct mana_config_vport_req req = {};
663 int err;
664
665 /* This function is used to program the Ethernet port in the hardware
666 * table. It can be called from the Ethernet driver or the RDMA driver.
667 *
668 * For Ethernet usage, the hardware supports only one active user on a
669 * physical port. The driver checks on the port usage before programming
670 * the hardware when creating the RAW QP (RDMA driver) or exposing the
671 * device to kernel NET layer (Ethernet driver).
672 *
673 * Because the RDMA driver doesn't know in advance which QP type the
674 * user will create, it exposes the device with all its ports. The user
675 * may not be able to create RAW QP on a port if this port is already
676 * in used by the Ethernet driver from the kernel.
677 *
678 * This physical port limitation only applies to the RAW QP. For RC QP,
679 * the hardware doesn't have this limitation. The user can create RC
680 * QPs on a physical port up to the hardware limits independent of the
681 * Ethernet usage on the same port.
682 */
683 mutex_lock(&apc->vport_mutex);
684 if (apc->vport_use_count > 0) {
685 mutex_unlock(&apc->vport_mutex);
686 return -EBUSY;
687 }
688 apc->vport_use_count++;
689 mutex_unlock(&apc->vport_mutex);
690
691 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
692 sizeof(req), sizeof(resp));
693 req.vport = apc->port_handle;
694 req.pdid = protection_dom_id;
695 req.doorbell_pageid = doorbell_pg_id;
696
697 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
698 sizeof(resp));
699 if (err) {
700 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
701 goto out;
702 }
703
704 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
705 sizeof(resp));
706 if (err || resp.hdr.status) {
707 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
708 err, resp.hdr.status);
709 if (!err)
710 err = -EPROTO;
711
712 goto out;
713 }
714
715 apc->tx_shortform_allowed = resp.short_form_allowed;
716 apc->tx_vp_offset = resp.tx_vport_offset;
717
718 netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
719 apc->port_handle, protection_dom_id, doorbell_pg_id);
720out:
721 if (err)
722 mana_uncfg_vport(apc);
723
724 return err;
725}
726EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
727
728static int mana_cfg_vport_steering(struct mana_port_context *apc,
729 enum TRI_STATE rx,
730 bool update_default_rxobj, bool update_key,
731 bool update_tab)
732{
733 u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
734 struct mana_cfg_rx_steer_req *req = NULL;
735 struct mana_cfg_rx_steer_resp resp = {};
736 struct net_device *ndev = apc->ndev;
737 mana_handle_t *req_indir_tab;
738 u32 req_buf_size;
739 int err;
740
741 req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
742 req = kzalloc(req_buf_size, GFP_KERNEL);
743 if (!req)
744 return -ENOMEM;
745
746 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
747 sizeof(resp));
748
749 req->vport = apc->port_handle;
750 req->num_indir_entries = num_entries;
751 req->indir_tab_offset = sizeof(*req);
752 req->rx_enable = rx;
753 req->rss_enable = apc->rss_state;
754 req->update_default_rxobj = update_default_rxobj;
755 req->update_hashkey = update_key;
756 req->update_indir_tab = update_tab;
757 req->default_rxobj = apc->default_rxobj;
758
759 if (update_key)
760 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
761
762 if (update_tab) {
763 req_indir_tab = (mana_handle_t *)(req + 1);
764 memcpy(req_indir_tab, apc->rxobj_table,
765 req->num_indir_entries * sizeof(mana_handle_t));
766 }
767
768 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
769 sizeof(resp));
770 if (err) {
771 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
772 goto out;
773 }
774
775 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
776 sizeof(resp));
777 if (err) {
778 netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
779 goto out;
780 }
781
782 if (resp.hdr.status) {
783 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
784 resp.hdr.status);
785 err = -EPROTO;
786 }
787
788 netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
789 apc->port_handle, num_entries);
790out:
791 kfree(req);
792 return err;
793}
794
795int mana_create_wq_obj(struct mana_port_context *apc,
796 mana_handle_t vport,
797 u32 wq_type, struct mana_obj_spec *wq_spec,
798 struct mana_obj_spec *cq_spec,
799 mana_handle_t *wq_obj)
800{
801 struct mana_create_wqobj_resp resp = {};
802 struct mana_create_wqobj_req req = {};
803 struct net_device *ndev = apc->ndev;
804 int err;
805
806 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
807 sizeof(req), sizeof(resp));
808 req.vport = vport;
809 req.wq_type = wq_type;
810 req.wq_gdma_region = wq_spec->gdma_region;
811 req.cq_gdma_region = cq_spec->gdma_region;
812 req.wq_size = wq_spec->queue_size;
813 req.cq_size = cq_spec->queue_size;
814 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
815 req.cq_parent_qid = cq_spec->attached_eq;
816
817 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
818 sizeof(resp));
819 if (err) {
820 netdev_err(ndev, "Failed to create WQ object: %d\n", err);
821 goto out;
822 }
823
824 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
825 sizeof(resp));
826 if (err || resp.hdr.status) {
827 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
828 resp.hdr.status);
829 if (!err)
830 err = -EPROTO;
831 goto out;
832 }
833
834 if (resp.wq_obj == INVALID_MANA_HANDLE) {
835 netdev_err(ndev, "Got an invalid WQ object handle\n");
836 err = -EPROTO;
837 goto out;
838 }
839
840 *wq_obj = resp.wq_obj;
841 wq_spec->queue_index = resp.wq_id;
842 cq_spec->queue_index = resp.cq_id;
843
844 return 0;
845out:
846 return err;
847}
848EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
849
850void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
851 mana_handle_t wq_obj)
852{
853 struct mana_destroy_wqobj_resp resp = {};
854 struct mana_destroy_wqobj_req req = {};
855 struct net_device *ndev = apc->ndev;
856 int err;
857
858 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
859 sizeof(req), sizeof(resp));
860 req.wq_type = wq_type;
861 req.wq_obj_handle = wq_obj;
862
863 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
864 sizeof(resp));
865 if (err) {
866 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
867 return;
868 }
869
870 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
871 sizeof(resp));
872 if (err || resp.hdr.status)
873 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
874 resp.hdr.status);
875}
876EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
877
878static void mana_destroy_eq(struct mana_context *ac)
879{
880 struct gdma_context *gc = ac->gdma_dev->gdma_context;
881 struct gdma_queue *eq;
882 int i;
883
884 if (!ac->eqs)
885 return;
886
887 for (i = 0; i < gc->max_num_queues; i++) {
888 eq = ac->eqs[i].eq;
889 if (!eq)
890 continue;
891
892 mana_gd_destroy_queue(gc, eq);
893 }
894
895 kfree(ac->eqs);
896 ac->eqs = NULL;
897}
898
899static int mana_create_eq(struct mana_context *ac)
900{
901 struct gdma_dev *gd = ac->gdma_dev;
902 struct gdma_context *gc = gd->gdma_context;
903 struct gdma_queue_spec spec = {};
904 int err;
905 int i;
906
907 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
908 GFP_KERNEL);
909 if (!ac->eqs)
910 return -ENOMEM;
911
912 spec.type = GDMA_EQ;
913 spec.monitor_avl_buf = false;
914 spec.queue_size = EQ_SIZE;
915 spec.eq.callback = NULL;
916 spec.eq.context = ac->eqs;
917 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
918
919 for (i = 0; i < gc->max_num_queues; i++) {
920 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
921 if (err)
922 goto out;
923 }
924
925 return 0;
926out:
927 mana_destroy_eq(ac);
928 return err;
929}
930
931static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
932{
933 struct mana_fence_rq_resp resp = {};
934 struct mana_fence_rq_req req = {};
935 int err;
936
937 init_completion(&rxq->fence_event);
938
939 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
940 sizeof(req), sizeof(resp));
941 req.wq_obj_handle = rxq->rxobj;
942
943 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
944 sizeof(resp));
945 if (err) {
946 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
947 rxq->rxq_idx, err);
948 return err;
949 }
950
951 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
952 if (err || resp.hdr.status) {
953 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
954 rxq->rxq_idx, err, resp.hdr.status);
955 if (!err)
956 err = -EPROTO;
957
958 return err;
959 }
960
961 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
962 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
963 rxq->rxq_idx);
964 return -ETIMEDOUT;
965 }
966
967 return 0;
968}
969
970static void mana_fence_rqs(struct mana_port_context *apc)
971{
972 unsigned int rxq_idx;
973 struct mana_rxq *rxq;
974 int err;
975
976 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
977 rxq = apc->rxqs[rxq_idx];
978 err = mana_fence_rq(apc, rxq);
979
980 /* In case of any error, use sleep instead. */
981 if (err)
982 msleep(100);
983 }
984}
985
986static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
987{
988 u32 used_space_old;
989 u32 used_space_new;
990
991 used_space_old = wq->head - wq->tail;
992 used_space_new = wq->head - (wq->tail + num_units);
993
994 if (WARN_ON_ONCE(used_space_new > used_space_old))
995 return -ERANGE;
996
997 wq->tail += num_units;
998 return 0;
999}
1000
1001static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1002{
1003 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1004 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1005 struct device *dev = gc->dev;
1006 int i;
1007
1008 dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
1009
1010 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1011 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1012 DMA_TO_DEVICE);
1013}
1014
1015static void mana_poll_tx_cq(struct mana_cq *cq)
1016{
1017 struct gdma_comp *completions = cq->gdma_comp_buf;
1018 struct gdma_posted_wqe_info *wqe_info;
1019 unsigned int pkt_transmitted = 0;
1020 unsigned int wqe_unit_cnt = 0;
1021 struct mana_txq *txq = cq->txq;
1022 struct mana_port_context *apc;
1023 struct netdev_queue *net_txq;
1024 struct gdma_queue *gdma_wq;
1025 unsigned int avail_space;
1026 struct net_device *ndev;
1027 struct sk_buff *skb;
1028 bool txq_stopped;
1029 int comp_read;
1030 int i;
1031
1032 ndev = txq->ndev;
1033 apc = netdev_priv(ndev);
1034
1035 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1036 CQE_POLLING_BUFFER);
1037
1038 if (comp_read < 1)
1039 return;
1040
1041 for (i = 0; i < comp_read; i++) {
1042 struct mana_tx_comp_oob *cqe_oob;
1043
1044 if (WARN_ON_ONCE(!completions[i].is_sq))
1045 return;
1046
1047 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1048 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1049 MANA_CQE_COMPLETION))
1050 return;
1051
1052 switch (cqe_oob->cqe_hdr.cqe_type) {
1053 case CQE_TX_OKAY:
1054 break;
1055
1056 case CQE_TX_SA_DROP:
1057 case CQE_TX_MTU_DROP:
1058 case CQE_TX_INVALID_OOB:
1059 case CQE_TX_INVALID_ETH_TYPE:
1060 case CQE_TX_HDR_PROCESSING_ERROR:
1061 case CQE_TX_VF_DISABLED:
1062 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1063 case CQE_TX_VPORT_DISABLED:
1064 case CQE_TX_VLAN_TAGGING_VIOLATION:
1065 WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
1066 cqe_oob->cqe_hdr.cqe_type);
1067 break;
1068
1069 default:
1070 /* If the CQE type is unexpected, log an error, assert,
1071 * and go through the error path.
1072 */
1073 WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
1074 cqe_oob->cqe_hdr.cqe_type);
1075 return;
1076 }
1077
1078 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1079 return;
1080
1081 skb = skb_dequeue(&txq->pending_skbs);
1082 if (WARN_ON_ONCE(!skb))
1083 return;
1084
1085 wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1086 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1087
1088 mana_unmap_skb(skb, apc);
1089
1090 napi_consume_skb(skb, cq->budget);
1091
1092 pkt_transmitted++;
1093 }
1094
1095 if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1096 return;
1097
1098 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1099
1100 gdma_wq = txq->gdma_sq;
1101 avail_space = mana_gd_wq_avail_space(gdma_wq);
1102
1103 /* Ensure tail updated before checking q stop */
1104 smp_mb();
1105
1106 net_txq = txq->net_txq;
1107 txq_stopped = netif_tx_queue_stopped(net_txq);
1108
1109 /* Ensure checking txq_stopped before apc->port_is_up. */
1110 smp_rmb();
1111
1112 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1113 netif_tx_wake_queue(net_txq);
1114 apc->eth_stats.wake_queue++;
1115 }
1116
1117 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1118 WARN_ON_ONCE(1);
1119
1120 cq->work_done = pkt_transmitted;
1121}
1122
1123static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1124{
1125 struct mana_recv_buf_oob *recv_buf_oob;
1126 u32 curr_index;
1127 int err;
1128
1129 curr_index = rxq->buf_index++;
1130 if (rxq->buf_index == rxq->num_rx_buf)
1131 rxq->buf_index = 0;
1132
1133 recv_buf_oob = &rxq->rx_oobs[curr_index];
1134
1135 err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1136 &recv_buf_oob->wqe_inf);
1137 if (WARN_ON_ONCE(err))
1138 return;
1139
1140 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1141}
1142
1143static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
1144 struct xdp_buff *xdp)
1145{
1146 struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
1147
1148 if (!skb)
1149 return NULL;
1150
1151 if (xdp->data_hard_start) {
1152 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1153 skb_put(skb, xdp->data_end - xdp->data);
1154 } else {
1155 skb_reserve(skb, XDP_PACKET_HEADROOM);
1156 skb_put(skb, pkt_len);
1157 }
1158
1159 return skb;
1160}
1161
1162static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
1163 struct mana_rxq *rxq)
1164{
1165 struct mana_stats_rx *rx_stats = &rxq->stats;
1166 struct net_device *ndev = rxq->ndev;
1167 uint pkt_len = cqe->ppi[0].pkt_len;
1168 u16 rxq_idx = rxq->rxq_idx;
1169 struct napi_struct *napi;
1170 struct xdp_buff xdp = {};
1171 struct sk_buff *skb;
1172 u32 hash_value;
1173 u32 act;
1174
1175 rxq->rx_cq.work_done++;
1176 napi = &rxq->rx_cq.napi;
1177
1178 if (!buf_va) {
1179 ++ndev->stats.rx_dropped;
1180 return;
1181 }
1182
1183 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1184
1185 if (act == XDP_REDIRECT && !rxq->xdp_rc)
1186 return;
1187
1188 if (act != XDP_PASS && act != XDP_TX)
1189 goto drop_xdp;
1190
1191 skb = mana_build_skb(buf_va, pkt_len, &xdp);
1192
1193 if (!skb)
1194 goto drop;
1195
1196 skb->dev = napi->dev;
1197
1198 skb->protocol = eth_type_trans(skb, ndev);
1199 skb_checksum_none_assert(skb);
1200 skb_record_rx_queue(skb, rxq_idx);
1201
1202 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1203 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
1205 }
1206
1207 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1208 hash_value = cqe->ppi[0].pkt_hash;
1209
1210 if (cqe->rx_hashtype & MANA_HASH_L4)
1211 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1212 else
1213 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1214 }
1215
1216 u64_stats_update_begin(&rx_stats->syncp);
1217 rx_stats->packets++;
1218 rx_stats->bytes += pkt_len;
1219
1220 if (act == XDP_TX)
1221 rx_stats->xdp_tx++;
1222 u64_stats_update_end(&rx_stats->syncp);
1223
1224 if (act == XDP_TX) {
1225 skb_set_queue_mapping(skb, rxq_idx);
1226 mana_xdp_tx(skb, ndev);
1227 return;
1228 }
1229
1230 napi_gro_receive(napi, skb);
1231
1232 return;
1233
1234drop_xdp:
1235 u64_stats_update_begin(&rx_stats->syncp);
1236 rx_stats->xdp_drop++;
1237 u64_stats_update_end(&rx_stats->syncp);
1238
1239drop:
1240 WARN_ON_ONCE(rxq->xdp_save_page);
1241 rxq->xdp_save_page = virt_to_page(buf_va);
1242
1243 ++ndev->stats.rx_dropped;
1244
1245 return;
1246}
1247
1248static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1249 struct gdma_comp *cqe)
1250{
1251 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1252 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1253 struct net_device *ndev = rxq->ndev;
1254 struct mana_recv_buf_oob *rxbuf_oob;
1255 struct device *dev = gc->dev;
1256 void *new_buf, *old_buf;
1257 struct page *new_page;
1258 u32 curr, pktlen;
1259 dma_addr_t da;
1260
1261 switch (oob->cqe_hdr.cqe_type) {
1262 case CQE_RX_OKAY:
1263 break;
1264
1265 case CQE_RX_TRUNCATED:
1266 ++ndev->stats.rx_dropped;
1267 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1268 netdev_warn_once(ndev, "Dropped a truncated packet\n");
1269 goto drop;
1270
1271 case CQE_RX_COALESCED_4:
1272 netdev_err(ndev, "RX coalescing is unsupported\n");
1273 return;
1274
1275 case CQE_RX_OBJECT_FENCE:
1276 complete(&rxq->fence_event);
1277 return;
1278
1279 default:
1280 netdev_err(ndev, "Unknown RX CQE type = %d\n",
1281 oob->cqe_hdr.cqe_type);
1282 return;
1283 }
1284
1285 pktlen = oob->ppi[0].pkt_len;
1286
1287 if (pktlen == 0) {
1288 /* data packets should never have packetlength of zero */
1289 netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1290 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1291 return;
1292 }
1293
1294 curr = rxq->buf_index;
1295 rxbuf_oob = &rxq->rx_oobs[curr];
1296 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1297
1298 /* Reuse XDP dropped page if available */
1299 if (rxq->xdp_save_page) {
1300 new_page = rxq->xdp_save_page;
1301 rxq->xdp_save_page = NULL;
1302 } else {
1303 new_page = alloc_page(GFP_ATOMIC);
1304 }
1305
1306 if (new_page) {
1307 da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1308 DMA_FROM_DEVICE);
1309
1310 if (dma_mapping_error(dev, da)) {
1311 __free_page(new_page);
1312 new_page = NULL;
1313 }
1314 }
1315
1316 new_buf = new_page ? page_to_virt(new_page) : NULL;
1317
1318 if (new_buf) {
1319 dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1320 DMA_FROM_DEVICE);
1321
1322 old_buf = rxbuf_oob->buf_va;
1323
1324 /* refresh the rxbuf_oob with the new page */
1325 rxbuf_oob->buf_va = new_buf;
1326 rxbuf_oob->buf_dma_addr = da;
1327 rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1328 } else {
1329 old_buf = NULL; /* drop the packet if no memory */
1330 }
1331
1332 mana_rx_skb(old_buf, oob, rxq);
1333
1334drop:
1335 mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1336
1337 mana_post_pkt_rxq(rxq);
1338}
1339
1340static void mana_poll_rx_cq(struct mana_cq *cq)
1341{
1342 struct gdma_comp *comp = cq->gdma_comp_buf;
1343 struct mana_rxq *rxq = cq->rxq;
1344 int comp_read, i;
1345
1346 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1347 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1348
1349 rxq->xdp_flush = false;
1350
1351 for (i = 0; i < comp_read; i++) {
1352 if (WARN_ON_ONCE(comp[i].is_sq))
1353 return;
1354
1355 /* verify recv cqe references the right rxq */
1356 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1357 return;
1358
1359 mana_process_rx_cqe(rxq, cq, &comp[i]);
1360 }
1361
1362 if (rxq->xdp_flush)
1363 xdp_do_flush();
1364}
1365
1366static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1367{
1368 struct mana_cq *cq = context;
1369 u8 arm_bit;
1370 int w;
1371
1372 WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1373
1374 if (cq->type == MANA_CQ_TYPE_RX)
1375 mana_poll_rx_cq(cq);
1376 else
1377 mana_poll_tx_cq(cq);
1378
1379 w = cq->work_done;
1380
1381 if (w < cq->budget &&
1382 napi_complete_done(&cq->napi, w)) {
1383 arm_bit = SET_ARM_BIT;
1384 } else {
1385 arm_bit = 0;
1386 }
1387
1388 mana_gd_ring_cq(gdma_queue, arm_bit);
1389
1390 return w;
1391}
1392
1393static int mana_poll(struct napi_struct *napi, int budget)
1394{
1395 struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1396 int w;
1397
1398 cq->work_done = 0;
1399 cq->budget = budget;
1400
1401 w = mana_cq_handler(cq, cq->gdma_cq);
1402
1403 return min(w, budget);
1404}
1405
1406static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1407{
1408 struct mana_cq *cq = context;
1409
1410 napi_schedule_irqoff(&cq->napi);
1411}
1412
1413static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1414{
1415 struct gdma_dev *gd = apc->ac->gdma_dev;
1416
1417 if (!cq->gdma_cq)
1418 return;
1419
1420 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1421}
1422
1423static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1424{
1425 struct gdma_dev *gd = apc->ac->gdma_dev;
1426
1427 if (!txq->gdma_sq)
1428 return;
1429
1430 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1431}
1432
1433static void mana_destroy_txq(struct mana_port_context *apc)
1434{
1435 struct napi_struct *napi;
1436 int i;
1437
1438 if (!apc->tx_qp)
1439 return;
1440
1441 for (i = 0; i < apc->num_queues; i++) {
1442 napi = &apc->tx_qp[i].tx_cq.napi;
1443 napi_synchronize(napi);
1444 napi_disable(napi);
1445 netif_napi_del(napi);
1446
1447 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1448
1449 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1450
1451 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1452 }
1453
1454 kfree(apc->tx_qp);
1455 apc->tx_qp = NULL;
1456}
1457
1458static int mana_create_txq(struct mana_port_context *apc,
1459 struct net_device *net)
1460{
1461 struct mana_context *ac = apc->ac;
1462 struct gdma_dev *gd = ac->gdma_dev;
1463 struct mana_obj_spec wq_spec;
1464 struct mana_obj_spec cq_spec;
1465 struct gdma_queue_spec spec;
1466 struct gdma_context *gc;
1467 struct mana_txq *txq;
1468 struct mana_cq *cq;
1469 u32 txq_size;
1470 u32 cq_size;
1471 int err;
1472 int i;
1473
1474 apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1475 GFP_KERNEL);
1476 if (!apc->tx_qp)
1477 return -ENOMEM;
1478
1479 /* The minimum size of the WQE is 32 bytes, hence
1480 * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1481 * the SQ can store. This value is then used to size other queues
1482 * to prevent overflow.
1483 */
1484 txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1485 BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1486
1487 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1488 cq_size = PAGE_ALIGN(cq_size);
1489
1490 gc = gd->gdma_context;
1491
1492 for (i = 0; i < apc->num_queues; i++) {
1493 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1494
1495 /* Create SQ */
1496 txq = &apc->tx_qp[i].txq;
1497
1498 u64_stats_init(&txq->stats.syncp);
1499 txq->ndev = net;
1500 txq->net_txq = netdev_get_tx_queue(net, i);
1501 txq->vp_offset = apc->tx_vp_offset;
1502 skb_queue_head_init(&txq->pending_skbs);
1503
1504 memset(&spec, 0, sizeof(spec));
1505 spec.type = GDMA_SQ;
1506 spec.monitor_avl_buf = true;
1507 spec.queue_size = txq_size;
1508 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1509 if (err)
1510 goto out;
1511
1512 /* Create SQ's CQ */
1513 cq = &apc->tx_qp[i].tx_cq;
1514 cq->type = MANA_CQ_TYPE_TX;
1515
1516 cq->txq = txq;
1517
1518 memset(&spec, 0, sizeof(spec));
1519 spec.type = GDMA_CQ;
1520 spec.monitor_avl_buf = false;
1521 spec.queue_size = cq_size;
1522 spec.cq.callback = mana_schedule_napi;
1523 spec.cq.parent_eq = ac->eqs[i].eq;
1524 spec.cq.context = cq;
1525 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1526 if (err)
1527 goto out;
1528
1529 memset(&wq_spec, 0, sizeof(wq_spec));
1530 memset(&cq_spec, 0, sizeof(cq_spec));
1531
1532 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
1533 wq_spec.queue_size = txq->gdma_sq->queue_size;
1534
1535 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1536 cq_spec.queue_size = cq->gdma_cq->queue_size;
1537 cq_spec.modr_ctx_id = 0;
1538 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1539
1540 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1541 &wq_spec, &cq_spec,
1542 &apc->tx_qp[i].tx_object);
1543
1544 if (err)
1545 goto out;
1546
1547 txq->gdma_sq->id = wq_spec.queue_index;
1548 cq->gdma_cq->id = cq_spec.queue_index;
1549
1550 txq->gdma_sq->mem_info.dma_region_handle =
1551 GDMA_INVALID_DMA_REGION;
1552 cq->gdma_cq->mem_info.dma_region_handle =
1553 GDMA_INVALID_DMA_REGION;
1554
1555 txq->gdma_txq_id = txq->gdma_sq->id;
1556
1557 cq->gdma_id = cq->gdma_cq->id;
1558
1559 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1560 err = -EINVAL;
1561 goto out;
1562 }
1563
1564 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1565
1566 netif_napi_add_tx(net, &cq->napi, mana_poll);
1567 napi_enable(&cq->napi);
1568
1569 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1570 }
1571
1572 return 0;
1573out:
1574 mana_destroy_txq(apc);
1575 return err;
1576}
1577
1578static void mana_destroy_rxq(struct mana_port_context *apc,
1579 struct mana_rxq *rxq, bool validate_state)
1580
1581{
1582 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1583 struct mana_recv_buf_oob *rx_oob;
1584 struct device *dev = gc->dev;
1585 struct napi_struct *napi;
1586 int i;
1587
1588 if (!rxq)
1589 return;
1590
1591 napi = &rxq->rx_cq.napi;
1592
1593 if (validate_state)
1594 napi_synchronize(napi);
1595
1596 napi_disable(napi);
1597
1598 xdp_rxq_info_unreg(&rxq->xdp_rxq);
1599
1600 netif_napi_del(napi);
1601
1602 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1603
1604 mana_deinit_cq(apc, &rxq->rx_cq);
1605
1606 if (rxq->xdp_save_page)
1607 __free_page(rxq->xdp_save_page);
1608
1609 for (i = 0; i < rxq->num_rx_buf; i++) {
1610 rx_oob = &rxq->rx_oobs[i];
1611
1612 if (!rx_oob->buf_va)
1613 continue;
1614
1615 dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1616 DMA_FROM_DEVICE);
1617
1618 free_page((unsigned long)rx_oob->buf_va);
1619 rx_oob->buf_va = NULL;
1620 }
1621
1622 if (rxq->gdma_rq)
1623 mana_gd_destroy_queue(gc, rxq->gdma_rq);
1624
1625 kfree(rxq);
1626}
1627
1628#define MANA_WQE_HEADER_SIZE 16
1629#define MANA_WQE_SGE_SIZE 16
1630
1631static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1632 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1633{
1634 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1635 struct mana_recv_buf_oob *rx_oob;
1636 struct device *dev = gc->dev;
1637 struct page *page;
1638 dma_addr_t da;
1639 u32 buf_idx;
1640
1641 WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1642
1643 *rxq_size = 0;
1644 *cq_size = 0;
1645
1646 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1647 rx_oob = &rxq->rx_oobs[buf_idx];
1648 memset(rx_oob, 0, sizeof(*rx_oob));
1649
1650 page = alloc_page(GFP_KERNEL);
1651 if (!page)
1652 return -ENOMEM;
1653
1654 da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1655 DMA_FROM_DEVICE);
1656
1657 if (dma_mapping_error(dev, da)) {
1658 __free_page(page);
1659 return -ENOMEM;
1660 }
1661
1662 rx_oob->buf_va = page_to_virt(page);
1663 rx_oob->buf_dma_addr = da;
1664
1665 rx_oob->num_sge = 1;
1666 rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1667 rx_oob->sgl[0].size = rxq->datasize;
1668 rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1669
1670 rx_oob->wqe_req.sgl = rx_oob->sgl;
1671 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1672 rx_oob->wqe_req.inline_oob_size = 0;
1673 rx_oob->wqe_req.inline_oob_data = NULL;
1674 rx_oob->wqe_req.flags = 0;
1675 rx_oob->wqe_req.client_data_unit = 0;
1676
1677 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1678 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1679 *cq_size += COMP_ENTRY_SIZE;
1680 }
1681
1682 return 0;
1683}
1684
1685static int mana_push_wqe(struct mana_rxq *rxq)
1686{
1687 struct mana_recv_buf_oob *rx_oob;
1688 u32 buf_idx;
1689 int err;
1690
1691 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1692 rx_oob = &rxq->rx_oobs[buf_idx];
1693
1694 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1695 &rx_oob->wqe_inf);
1696 if (err)
1697 return -ENOSPC;
1698 }
1699
1700 return 0;
1701}
1702
1703static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1704 u32 rxq_idx, struct mana_eq *eq,
1705 struct net_device *ndev)
1706{
1707 struct gdma_dev *gd = apc->ac->gdma_dev;
1708 struct mana_obj_spec wq_spec;
1709 struct mana_obj_spec cq_spec;
1710 struct gdma_queue_spec spec;
1711 struct mana_cq *cq = NULL;
1712 struct gdma_context *gc;
1713 u32 cq_size, rq_size;
1714 struct mana_rxq *rxq;
1715 int err;
1716
1717 gc = gd->gdma_context;
1718
1719 rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1720 GFP_KERNEL);
1721 if (!rxq)
1722 return NULL;
1723
1724 rxq->ndev = ndev;
1725 rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1726 rxq->rxq_idx = rxq_idx;
1727 rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1728 rxq->rxobj = INVALID_MANA_HANDLE;
1729
1730 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1731 if (err)
1732 goto out;
1733
1734 rq_size = PAGE_ALIGN(rq_size);
1735 cq_size = PAGE_ALIGN(cq_size);
1736
1737 /* Create RQ */
1738 memset(&spec, 0, sizeof(spec));
1739 spec.type = GDMA_RQ;
1740 spec.monitor_avl_buf = true;
1741 spec.queue_size = rq_size;
1742 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1743 if (err)
1744 goto out;
1745
1746 /* Create RQ's CQ */
1747 cq = &rxq->rx_cq;
1748 cq->type = MANA_CQ_TYPE_RX;
1749 cq->rxq = rxq;
1750
1751 memset(&spec, 0, sizeof(spec));
1752 spec.type = GDMA_CQ;
1753 spec.monitor_avl_buf = false;
1754 spec.queue_size = cq_size;
1755 spec.cq.callback = mana_schedule_napi;
1756 spec.cq.parent_eq = eq->eq;
1757 spec.cq.context = cq;
1758 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1759 if (err)
1760 goto out;
1761
1762 memset(&wq_spec, 0, sizeof(wq_spec));
1763 memset(&cq_spec, 0, sizeof(cq_spec));
1764 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
1765 wq_spec.queue_size = rxq->gdma_rq->queue_size;
1766
1767 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1768 cq_spec.queue_size = cq->gdma_cq->queue_size;
1769 cq_spec.modr_ctx_id = 0;
1770 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1771
1772 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1773 &wq_spec, &cq_spec, &rxq->rxobj);
1774 if (err)
1775 goto out;
1776
1777 rxq->gdma_rq->id = wq_spec.queue_index;
1778 cq->gdma_cq->id = cq_spec.queue_index;
1779
1780 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
1781 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
1782
1783 rxq->gdma_id = rxq->gdma_rq->id;
1784 cq->gdma_id = cq->gdma_cq->id;
1785
1786 err = mana_push_wqe(rxq);
1787 if (err)
1788 goto out;
1789
1790 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1791 err = -EINVAL;
1792 goto out;
1793 }
1794
1795 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1796
1797 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
1798
1799 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1800 cq->napi.napi_id));
1801 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1802 MEM_TYPE_PAGE_SHARED, NULL));
1803
1804 napi_enable(&cq->napi);
1805
1806 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1807out:
1808 if (!err)
1809 return rxq;
1810
1811 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1812
1813 mana_destroy_rxq(apc, rxq, false);
1814
1815 if (cq)
1816 mana_deinit_cq(apc, cq);
1817
1818 return NULL;
1819}
1820
1821static int mana_add_rx_queues(struct mana_port_context *apc,
1822 struct net_device *ndev)
1823{
1824 struct mana_context *ac = apc->ac;
1825 struct mana_rxq *rxq;
1826 int err = 0;
1827 int i;
1828
1829 for (i = 0; i < apc->num_queues; i++) {
1830 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1831 if (!rxq) {
1832 err = -ENOMEM;
1833 goto out;
1834 }
1835
1836 u64_stats_init(&rxq->stats.syncp);
1837
1838 apc->rxqs[i] = rxq;
1839 }
1840
1841 apc->default_rxobj = apc->rxqs[0]->rxobj;
1842out:
1843 return err;
1844}
1845
1846static void mana_destroy_vport(struct mana_port_context *apc)
1847{
1848 struct gdma_dev *gd = apc->ac->gdma_dev;
1849 struct mana_rxq *rxq;
1850 u32 rxq_idx;
1851
1852 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1853 rxq = apc->rxqs[rxq_idx];
1854 if (!rxq)
1855 continue;
1856
1857 mana_destroy_rxq(apc, rxq, true);
1858 apc->rxqs[rxq_idx] = NULL;
1859 }
1860
1861 mana_destroy_txq(apc);
1862 mana_uncfg_vport(apc);
1863
1864 if (gd->gdma_context->is_pf)
1865 mana_pf_deregister_hw_vport(apc);
1866}
1867
1868static int mana_create_vport(struct mana_port_context *apc,
1869 struct net_device *net)
1870{
1871 struct gdma_dev *gd = apc->ac->gdma_dev;
1872 int err;
1873
1874 apc->default_rxobj = INVALID_MANA_HANDLE;
1875
1876 if (gd->gdma_context->is_pf) {
1877 err = mana_pf_register_hw_vport(apc);
1878 if (err)
1879 return err;
1880 }
1881
1882 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1883 if (err)
1884 return err;
1885
1886 return mana_create_txq(apc, net);
1887}
1888
1889static void mana_rss_table_init(struct mana_port_context *apc)
1890{
1891 int i;
1892
1893 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1894 apc->indir_table[i] =
1895 ethtool_rxfh_indir_default(i, apc->num_queues);
1896}
1897
1898int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1899 bool update_hash, bool update_tab)
1900{
1901 u32 queue_idx;
1902 int err;
1903 int i;
1904
1905 if (update_tab) {
1906 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1907 queue_idx = apc->indir_table[i];
1908 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1909 }
1910 }
1911
1912 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1913 if (err)
1914 return err;
1915
1916 mana_fence_rqs(apc);
1917
1918 return 0;
1919}
1920
1921static int mana_init_port(struct net_device *ndev)
1922{
1923 struct mana_port_context *apc = netdev_priv(ndev);
1924 u32 max_txq, max_rxq, max_queues;
1925 int port_idx = apc->port_idx;
1926 u32 num_indirect_entries;
1927 int err;
1928
1929 err = mana_init_port_context(apc);
1930 if (err)
1931 return err;
1932
1933 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1934 &num_indirect_entries);
1935 if (err) {
1936 netdev_err(ndev, "Failed to query info for vPort %d\n",
1937 port_idx);
1938 goto reset_apc;
1939 }
1940
1941 max_queues = min_t(u32, max_txq, max_rxq);
1942 if (apc->max_queues > max_queues)
1943 apc->max_queues = max_queues;
1944
1945 if (apc->num_queues > apc->max_queues)
1946 apc->num_queues = apc->max_queues;
1947
1948 eth_hw_addr_set(ndev, apc->mac_addr);
1949
1950 return 0;
1951
1952reset_apc:
1953 kfree(apc->rxqs);
1954 apc->rxqs = NULL;
1955 return err;
1956}
1957
1958int mana_alloc_queues(struct net_device *ndev)
1959{
1960 struct mana_port_context *apc = netdev_priv(ndev);
1961 struct gdma_dev *gd = apc->ac->gdma_dev;
1962 int err;
1963
1964 err = mana_create_vport(apc, ndev);
1965 if (err)
1966 return err;
1967
1968 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1969 if (err)
1970 goto destroy_vport;
1971
1972 err = mana_add_rx_queues(apc, ndev);
1973 if (err)
1974 goto destroy_vport;
1975
1976 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1977
1978 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1979 if (err)
1980 goto destroy_vport;
1981
1982 mana_rss_table_init(apc);
1983
1984 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1985 if (err)
1986 goto destroy_vport;
1987
1988 if (gd->gdma_context->is_pf) {
1989 err = mana_pf_register_filter(apc);
1990 if (err)
1991 goto destroy_vport;
1992 }
1993
1994 mana_chn_setxdp(apc, mana_xdp_get(apc));
1995
1996 return 0;
1997
1998destroy_vport:
1999 mana_destroy_vport(apc);
2000 return err;
2001}
2002
2003int mana_attach(struct net_device *ndev)
2004{
2005 struct mana_port_context *apc = netdev_priv(ndev);
2006 int err;
2007
2008 ASSERT_RTNL();
2009
2010 err = mana_init_port(ndev);
2011 if (err)
2012 return err;
2013
2014 if (apc->port_st_save) {
2015 err = mana_alloc_queues(ndev);
2016 if (err) {
2017 mana_cleanup_port_context(apc);
2018 return err;
2019 }
2020 }
2021
2022 apc->port_is_up = apc->port_st_save;
2023
2024 /* Ensure port state updated before txq state */
2025 smp_wmb();
2026
2027 if (apc->port_is_up)
2028 netif_carrier_on(ndev);
2029
2030 netif_device_attach(ndev);
2031
2032 return 0;
2033}
2034
2035static int mana_dealloc_queues(struct net_device *ndev)
2036{
2037 struct mana_port_context *apc = netdev_priv(ndev);
2038 struct gdma_dev *gd = apc->ac->gdma_dev;
2039 struct mana_txq *txq;
2040 int i, err;
2041
2042 if (apc->port_is_up)
2043 return -EINVAL;
2044
2045 mana_chn_setxdp(apc, NULL);
2046
2047 if (gd->gdma_context->is_pf)
2048 mana_pf_deregister_filter(apc);
2049
2050 /* No packet can be transmitted now since apc->port_is_up is false.
2051 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2052 * a txq because it may not timely see apc->port_is_up being cleared
2053 * to false, but it doesn't matter since mana_start_xmit() drops any
2054 * new packets due to apc->port_is_up being false.
2055 *
2056 * Drain all the in-flight TX packets
2057 */
2058 for (i = 0; i < apc->num_queues; i++) {
2059 txq = &apc->tx_qp[i].txq;
2060
2061 while (atomic_read(&txq->pending_sends) > 0)
2062 usleep_range(1000, 2000);
2063 }
2064
2065 /* We're 100% sure the queues can no longer be woken up, because
2066 * we're sure now mana_poll_tx_cq() can't be running.
2067 */
2068
2069 apc->rss_state = TRI_STATE_FALSE;
2070 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2071 if (err) {
2072 netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2073 return err;
2074 }
2075
2076 mana_destroy_vport(apc);
2077
2078 return 0;
2079}
2080
2081int mana_detach(struct net_device *ndev, bool from_close)
2082{
2083 struct mana_port_context *apc = netdev_priv(ndev);
2084 int err;
2085
2086 ASSERT_RTNL();
2087
2088 apc->port_st_save = apc->port_is_up;
2089 apc->port_is_up = false;
2090
2091 /* Ensure port state updated before txq state */
2092 smp_wmb();
2093
2094 netif_tx_disable(ndev);
2095 netif_carrier_off(ndev);
2096
2097 if (apc->port_st_save) {
2098 err = mana_dealloc_queues(ndev);
2099 if (err)
2100 return err;
2101 }
2102
2103 if (!from_close) {
2104 netif_device_detach(ndev);
2105 mana_cleanup_port_context(apc);
2106 }
2107
2108 return 0;
2109}
2110
2111static int mana_probe_port(struct mana_context *ac, int port_idx,
2112 struct net_device **ndev_storage)
2113{
2114 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2115 struct mana_port_context *apc;
2116 struct net_device *ndev;
2117 int err;
2118
2119 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2120 gc->max_num_queues);
2121 if (!ndev)
2122 return -ENOMEM;
2123
2124 *ndev_storage = ndev;
2125
2126 apc = netdev_priv(ndev);
2127 apc->ac = ac;
2128 apc->ndev = ndev;
2129 apc->max_queues = gc->max_num_queues;
2130 apc->num_queues = gc->max_num_queues;
2131 apc->port_handle = INVALID_MANA_HANDLE;
2132 apc->pf_filter_handle = INVALID_MANA_HANDLE;
2133 apc->port_idx = port_idx;
2134
2135 mutex_init(&apc->vport_mutex);
2136 apc->vport_use_count = 0;
2137
2138 ndev->netdev_ops = &mana_devops;
2139 ndev->ethtool_ops = &mana_ethtool_ops;
2140 ndev->mtu = ETH_DATA_LEN;
2141 ndev->max_mtu = ndev->mtu;
2142 ndev->min_mtu = ndev->mtu;
2143 ndev->needed_headroom = MANA_HEADROOM;
2144 ndev->dev_port = port_idx;
2145 SET_NETDEV_DEV(ndev, gc->dev);
2146
2147 netif_carrier_off(ndev);
2148
2149 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2150
2151 err = mana_init_port(ndev);
2152 if (err)
2153 goto free_net;
2154
2155 netdev_lockdep_set_classes(ndev);
2156
2157 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2158 ndev->hw_features |= NETIF_F_RXCSUM;
2159 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2160 ndev->hw_features |= NETIF_F_RXHASH;
2161 ndev->features = ndev->hw_features;
2162 ndev->vlan_features = 0;
2163
2164 err = register_netdev(ndev);
2165 if (err) {
2166 netdev_err(ndev, "Unable to register netdev.\n");
2167 goto reset_apc;
2168 }
2169
2170 return 0;
2171
2172reset_apc:
2173 kfree(apc->rxqs);
2174 apc->rxqs = NULL;
2175free_net:
2176 *ndev_storage = NULL;
2177 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2178 free_netdev(ndev);
2179 return err;
2180}
2181
2182static void adev_release(struct device *dev)
2183{
2184 struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2185
2186 kfree(madev);
2187}
2188
2189static void remove_adev(struct gdma_dev *gd)
2190{
2191 struct auxiliary_device *adev = gd->adev;
2192 int id = adev->id;
2193
2194 auxiliary_device_delete(adev);
2195 auxiliary_device_uninit(adev);
2196
2197 mana_adev_idx_free(id);
2198 gd->adev = NULL;
2199}
2200
2201static int add_adev(struct gdma_dev *gd)
2202{
2203 struct auxiliary_device *adev;
2204 struct mana_adev *madev;
2205 int ret;
2206
2207 madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2208 if (!madev)
2209 return -ENOMEM;
2210
2211 adev = &madev->adev;
2212 ret = mana_adev_idx_alloc();
2213 if (ret < 0)
2214 goto idx_fail;
2215 adev->id = ret;
2216
2217 adev->name = "rdma";
2218 adev->dev.parent = gd->gdma_context->dev;
2219 adev->dev.release = adev_release;
2220 madev->mdev = gd;
2221
2222 ret = auxiliary_device_init(adev);
2223 if (ret)
2224 goto init_fail;
2225
2226 ret = auxiliary_device_add(adev);
2227 if (ret)
2228 goto add_fail;
2229
2230 gd->adev = adev;
2231 return 0;
2232
2233add_fail:
2234 auxiliary_device_uninit(adev);
2235
2236init_fail:
2237 mana_adev_idx_free(adev->id);
2238
2239idx_fail:
2240 kfree(madev);
2241
2242 return ret;
2243}
2244
2245int mana_probe(struct gdma_dev *gd, bool resuming)
2246{
2247 struct gdma_context *gc = gd->gdma_context;
2248 struct mana_context *ac = gd->driver_data;
2249 struct device *dev = gc->dev;
2250 u16 num_ports = 0;
2251 int err;
2252 int i;
2253
2254 dev_info(dev,
2255 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2256 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2257
2258 err = mana_gd_register_device(gd);
2259 if (err)
2260 return err;
2261
2262 if (!resuming) {
2263 ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2264 if (!ac)
2265 return -ENOMEM;
2266
2267 ac->gdma_dev = gd;
2268 gd->driver_data = ac;
2269 }
2270
2271 err = mana_create_eq(ac);
2272 if (err)
2273 goto out;
2274
2275 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2276 MANA_MICRO_VERSION, &num_ports);
2277 if (err)
2278 goto out;
2279
2280 if (!resuming) {
2281 ac->num_ports = num_ports;
2282 } else {
2283 if (ac->num_ports != num_ports) {
2284 dev_err(dev, "The number of vPorts changed: %d->%d\n",
2285 ac->num_ports, num_ports);
2286 err = -EPROTO;
2287 goto out;
2288 }
2289 }
2290
2291 if (ac->num_ports == 0)
2292 dev_err(dev, "Failed to detect any vPort\n");
2293
2294 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2295 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2296
2297 if (!resuming) {
2298 for (i = 0; i < ac->num_ports; i++) {
2299 err = mana_probe_port(ac, i, &ac->ports[i]);
2300 if (err)
2301 break;
2302 }
2303 } else {
2304 for (i = 0; i < ac->num_ports; i++) {
2305 rtnl_lock();
2306 err = mana_attach(ac->ports[i]);
2307 rtnl_unlock();
2308 if (err)
2309 break;
2310 }
2311 }
2312
2313 err = add_adev(gd);
2314out:
2315 if (err)
2316 mana_remove(gd, false);
2317
2318 return err;
2319}
2320
2321void mana_remove(struct gdma_dev *gd, bool suspending)
2322{
2323 struct gdma_context *gc = gd->gdma_context;
2324 struct mana_context *ac = gd->driver_data;
2325 struct device *dev = gc->dev;
2326 struct net_device *ndev;
2327 int err;
2328 int i;
2329
2330 /* adev currently doesn't support suspending, always remove it */
2331 if (gd->adev)
2332 remove_adev(gd);
2333
2334 for (i = 0; i < ac->num_ports; i++) {
2335 ndev = ac->ports[i];
2336 if (!ndev) {
2337 if (i == 0)
2338 dev_err(dev, "No net device to remove\n");
2339 goto out;
2340 }
2341
2342 /* All cleanup actions should stay after rtnl_lock(), otherwise
2343 * other functions may access partially cleaned up data.
2344 */
2345 rtnl_lock();
2346
2347 err = mana_detach(ndev, false);
2348 if (err)
2349 netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2350 i, err);
2351
2352 if (suspending) {
2353 /* No need to unregister the ndev. */
2354 rtnl_unlock();
2355 continue;
2356 }
2357
2358 unregister_netdevice(ndev);
2359
2360 rtnl_unlock();
2361
2362 free_netdev(ndev);
2363 }
2364
2365 mana_destroy_eq(ac);
2366out:
2367 mana_gd_deregister_device(gd);
2368
2369 if (suspending)
2370 return;
2371
2372 gd->driver_data = NULL;
2373 gd->gdma_context = NULL;
2374 kfree(ac);
2375}