Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright (c) 2021, Microsoft Corporation. */
   3
   4#include <uapi/linux/bpf.h>
   5
   6#include <linux/debugfs.h>
   7#include <linux/inetdevice.h>
   8#include <linux/etherdevice.h>
   9#include <linux/ethtool.h>
  10#include <linux/filter.h>
  11#include <linux/mm.h>
  12#include <linux/pci.h>
  13
  14#include <net/checksum.h>
  15#include <net/ip6_checksum.h>
  16#include <net/page_pool/helpers.h>
  17#include <net/xdp.h>
  18
  19#include <net/mana/mana.h>
  20#include <net/mana/mana_auxiliary.h>
  21
  22static DEFINE_IDA(mana_adev_ida);
  23
  24static int mana_adev_idx_alloc(void)
  25{
  26	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
  27}
  28
  29static void mana_adev_idx_free(int idx)
  30{
  31	ida_free(&mana_adev_ida, idx);
  32}
  33
  34static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
  35			       loff_t *pos)
  36{
  37	struct gdma_queue *gdma_q = filp->private_data;
  38
  39	return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
  40				       gdma_q->queue_size);
  41}
  42
  43static const struct file_operations mana_dbg_q_fops = {
  44	.owner  = THIS_MODULE,
  45	.open   = simple_open,
  46	.read   = mana_dbg_q_read,
  47};
  48
  49/* Microsoft Azure Network Adapter (MANA) functions */
  50
  51static int mana_open(struct net_device *ndev)
  52{
  53	struct mana_port_context *apc = netdev_priv(ndev);
  54	int err;
  55
  56	err = mana_alloc_queues(ndev);
  57	if (err)
  58		return err;
  59
  60	apc->port_is_up = true;
  61
  62	/* Ensure port state updated before txq state */
  63	smp_wmb();
  64
  65	netif_carrier_on(ndev);
  66	netif_tx_wake_all_queues(ndev);
  67
  68	return 0;
  69}
  70
  71static int mana_close(struct net_device *ndev)
  72{
  73	struct mana_port_context *apc = netdev_priv(ndev);
  74
  75	if (!apc->port_is_up)
  76		return 0;
  77
  78	return mana_detach(ndev, true);
  79}
  80
  81static bool mana_can_tx(struct gdma_queue *wq)
  82{
  83	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
  84}
  85
  86static unsigned int mana_checksum_info(struct sk_buff *skb)
  87{
  88	if (skb->protocol == htons(ETH_P_IP)) {
  89		struct iphdr *ip = ip_hdr(skb);
  90
  91		if (ip->protocol == IPPROTO_TCP)
  92			return IPPROTO_TCP;
  93
  94		if (ip->protocol == IPPROTO_UDP)
  95			return IPPROTO_UDP;
  96	} else if (skb->protocol == htons(ETH_P_IPV6)) {
  97		struct ipv6hdr *ip6 = ipv6_hdr(skb);
  98
  99		if (ip6->nexthdr == IPPROTO_TCP)
 100			return IPPROTO_TCP;
 101
 102		if (ip6->nexthdr == IPPROTO_UDP)
 103			return IPPROTO_UDP;
 104	}
 105
 106	/* No csum offloading */
 107	return 0;
 108}
 109
 110static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
 111			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
 112{
 113	ash->dma_handle[sg_i] = da;
 114	ash->size[sg_i] = sge_len;
 115
 116	tp->wqe_req.sgl[sg_i].address = da;
 117	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
 118	tp->wqe_req.sgl[sg_i].size = sge_len;
 119}
 120
 121static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
 122			struct mana_tx_package *tp, int gso_hs)
 123{
 124	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
 125	int hsg = 1; /* num of SGEs of linear part */
 126	struct gdma_dev *gd = apc->ac->gdma_dev;
 127	int skb_hlen = skb_headlen(skb);
 128	int sge0_len, sge1_len = 0;
 129	struct gdma_context *gc;
 130	struct device *dev;
 131	skb_frag_t *frag;
 132	dma_addr_t da;
 133	int sg_i;
 134	int i;
 135
 136	gc = gd->gdma_context;
 137	dev = gc->dev;
 138
 139	if (gso_hs && gso_hs < skb_hlen) {
 140		sge0_len = gso_hs;
 141		sge1_len = skb_hlen - gso_hs;
 142	} else {
 143		sge0_len = skb_hlen;
 144	}
 145
 146	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
 147	if (dma_mapping_error(dev, da))
 148		return -ENOMEM;
 149
 150	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
 151
 152	if (sge1_len) {
 153		sg_i = 1;
 154		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
 155				    DMA_TO_DEVICE);
 156		if (dma_mapping_error(dev, da))
 157			goto frag_err;
 158
 159		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
 160		hsg = 2;
 161	}
 162
 163	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 164		sg_i = hsg + i;
 165
 166		frag = &skb_shinfo(skb)->frags[i];
 167		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
 168				      DMA_TO_DEVICE);
 169		if (dma_mapping_error(dev, da))
 170			goto frag_err;
 171
 172		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
 173			     gd->gpa_mkey);
 174	}
 175
 176	return 0;
 177
 178frag_err:
 179	for (i = sg_i - 1; i >= hsg; i--)
 180		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
 181			       DMA_TO_DEVICE);
 182
 183	for (i = hsg - 1; i >= 0; i--)
 184		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
 185				 DMA_TO_DEVICE);
 186
 187	return -ENOMEM;
 188}
 189
 190/* Handle the case when GSO SKB linear length is too large.
 191 * MANA NIC requires GSO packets to put only the packet header to SGE0.
 192 * So, we need 2 SGEs for the skb linear part which contains more than the
 193 * header.
 194 * Return a positive value for the number of SGEs, or a negative value
 195 * for an error.
 196 */
 197static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
 198			     int gso_hs)
 199{
 200	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
 201	int skb_hlen = skb_headlen(skb);
 202
 203	if (gso_hs < skb_hlen) {
 204		num_sge++;
 205	} else if (gso_hs > skb_hlen) {
 206		if (net_ratelimit())
 207			netdev_err(ndev,
 208				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
 209				   gso_hs, skb_hlen);
 210
 211		return -EINVAL;
 212	}
 213
 214	return num_sge;
 215}
 216
 217/* Get the GSO packet's header size */
 218static int mana_get_gso_hs(struct sk_buff *skb)
 219{
 220	int gso_hs;
 221
 222	if (skb->encapsulation) {
 223		gso_hs = skb_inner_tcp_all_headers(skb);
 224	} else {
 225		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
 226			gso_hs = skb_transport_offset(skb) +
 227				 sizeof(struct udphdr);
 228		} else {
 229			gso_hs = skb_tcp_all_headers(skb);
 230		}
 231	}
 232
 233	return gso_hs;
 234}
 235
 236netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 237{
 238	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
 239	struct mana_port_context *apc = netdev_priv(ndev);
 240	int gso_hs = 0; /* zero for non-GSO pkts */
 241	u16 txq_idx = skb_get_queue_mapping(skb);
 242	struct gdma_dev *gd = apc->ac->gdma_dev;
 243	bool ipv4 = false, ipv6 = false;
 244	struct mana_tx_package pkg = {};
 245	struct netdev_queue *net_txq;
 246	struct mana_stats_tx *tx_stats;
 247	struct gdma_queue *gdma_sq;
 248	unsigned int csum_type;
 249	struct mana_txq *txq;
 250	struct mana_cq *cq;
 251	int err, len;
 252
 253	if (unlikely(!apc->port_is_up))
 254		goto tx_drop;
 255
 256	if (skb_cow_head(skb, MANA_HEADROOM))
 257		goto tx_drop_count;
 258
 259	txq = &apc->tx_qp[txq_idx].txq;
 260	gdma_sq = txq->gdma_sq;
 261	cq = &apc->tx_qp[txq_idx].tx_cq;
 262	tx_stats = &txq->stats;
 263
 264	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
 265	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
 266
 267	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
 268		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
 269		pkt_fmt = MANA_LONG_PKT_FMT;
 270	} else {
 271		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
 272	}
 273
 274	if (skb_vlan_tag_present(skb)) {
 275		pkt_fmt = MANA_LONG_PKT_FMT;
 276		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
 277		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
 278		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
 279		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
 280	}
 281
 282	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
 283
 284	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
 285		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
 286		u64_stats_update_begin(&tx_stats->syncp);
 287		tx_stats->short_pkt_fmt++;
 288		u64_stats_update_end(&tx_stats->syncp);
 289	} else {
 290		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
 291		u64_stats_update_begin(&tx_stats->syncp);
 292		tx_stats->long_pkt_fmt++;
 293		u64_stats_update_end(&tx_stats->syncp);
 294	}
 295
 296	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
 297	pkg.wqe_req.flags = 0;
 298	pkg.wqe_req.client_data_unit = 0;
 299
 300	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
 301
 302	if (skb->protocol == htons(ETH_P_IP))
 303		ipv4 = true;
 304	else if (skb->protocol == htons(ETH_P_IPV6))
 305		ipv6 = true;
 306
 307	if (skb_is_gso(skb)) {
 308		int num_sge;
 309
 310		gso_hs = mana_get_gso_hs(skb);
 311
 312		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
 313		if (num_sge > 0)
 314			pkg.wqe_req.num_sge = num_sge;
 315		else
 316			goto tx_drop_count;
 317
 318		u64_stats_update_begin(&tx_stats->syncp);
 319		if (skb->encapsulation) {
 320			tx_stats->tso_inner_packets++;
 321			tx_stats->tso_inner_bytes += skb->len - gso_hs;
 322		} else {
 323			tx_stats->tso_packets++;
 324			tx_stats->tso_bytes += skb->len - gso_hs;
 325		}
 326		u64_stats_update_end(&tx_stats->syncp);
 327
 328		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 329		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 330
 331		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
 332		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 333		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 334
 335		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
 336		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
 337		if (ipv4) {
 338			ip_hdr(skb)->tot_len = 0;
 339			ip_hdr(skb)->check = 0;
 340			tcp_hdr(skb)->check =
 341				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 342						   ip_hdr(skb)->daddr, 0,
 343						   IPPROTO_TCP, 0);
 344		} else {
 345			ipv6_hdr(skb)->payload_len = 0;
 346			tcp_hdr(skb)->check =
 347				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 348						 &ipv6_hdr(skb)->daddr, 0,
 349						 IPPROTO_TCP, 0);
 350		}
 351	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 352		csum_type = mana_checksum_info(skb);
 353
 354		u64_stats_update_begin(&tx_stats->syncp);
 355		tx_stats->csum_partial++;
 356		u64_stats_update_end(&tx_stats->syncp);
 357
 358		if (csum_type == IPPROTO_TCP) {
 359			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 360			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 361
 362			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 363			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 364
 365		} else if (csum_type == IPPROTO_UDP) {
 366			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 367			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 368
 369			pkg.tx_oob.s_oob.comp_udp_csum = 1;
 370		} else {
 371			/* Can't do offload of this type of checksum */
 372			if (skb_checksum_help(skb))
 373				goto tx_drop_count;
 374		}
 375	}
 376
 377	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
 378
 379	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
 380		pkg.wqe_req.sgl = pkg.sgl_array;
 381	} else {
 382		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
 383					    sizeof(struct gdma_sge),
 384					    GFP_ATOMIC);
 385		if (!pkg.sgl_ptr)
 386			goto tx_drop_count;
 387
 388		pkg.wqe_req.sgl = pkg.sgl_ptr;
 389	}
 390
 391	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
 392		u64_stats_update_begin(&tx_stats->syncp);
 393		tx_stats->mana_map_err++;
 394		u64_stats_update_end(&tx_stats->syncp);
 395		goto free_sgl_ptr;
 396	}
 397
 398	skb_queue_tail(&txq->pending_skbs, skb);
 399
 400	len = skb->len;
 401	net_txq = netdev_get_tx_queue(ndev, txq_idx);
 402
 403	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
 404					(struct gdma_posted_wqe_info *)skb->cb);
 405	if (!mana_can_tx(gdma_sq)) {
 406		netif_tx_stop_queue(net_txq);
 407		apc->eth_stats.stop_queue++;
 408	}
 409
 410	if (err) {
 411		(void)skb_dequeue_tail(&txq->pending_skbs);
 412		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
 413		err = NETDEV_TX_BUSY;
 414		goto tx_busy;
 415	}
 416
 417	err = NETDEV_TX_OK;
 418	atomic_inc(&txq->pending_sends);
 419
 420	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
 421
 422	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
 423	skb = NULL;
 424
 425	tx_stats = &txq->stats;
 426	u64_stats_update_begin(&tx_stats->syncp);
 427	tx_stats->packets++;
 428	tx_stats->bytes += len;
 429	u64_stats_update_end(&tx_stats->syncp);
 430
 431tx_busy:
 432	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
 433		netif_tx_wake_queue(net_txq);
 434		apc->eth_stats.wake_queue++;
 435	}
 436
 437	kfree(pkg.sgl_ptr);
 438	return err;
 439
 440free_sgl_ptr:
 441	kfree(pkg.sgl_ptr);
 442tx_drop_count:
 443	ndev->stats.tx_dropped++;
 444tx_drop:
 445	dev_kfree_skb_any(skb);
 446	return NETDEV_TX_OK;
 447}
 448
 449static void mana_get_stats64(struct net_device *ndev,
 450			     struct rtnl_link_stats64 *st)
 451{
 452	struct mana_port_context *apc = netdev_priv(ndev);
 453	unsigned int num_queues = apc->num_queues;
 454	struct mana_stats_rx *rx_stats;
 455	struct mana_stats_tx *tx_stats;
 456	unsigned int start;
 457	u64 packets, bytes;
 458	int q;
 459
 460	if (!apc->port_is_up)
 461		return;
 462
 463	netdev_stats_to_stats64(st, &ndev->stats);
 464
 465	for (q = 0; q < num_queues; q++) {
 466		rx_stats = &apc->rxqs[q]->stats;
 467
 468		do {
 469			start = u64_stats_fetch_begin(&rx_stats->syncp);
 470			packets = rx_stats->packets;
 471			bytes = rx_stats->bytes;
 472		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
 473
 474		st->rx_packets += packets;
 475		st->rx_bytes += bytes;
 476	}
 477
 478	for (q = 0; q < num_queues; q++) {
 479		tx_stats = &apc->tx_qp[q].txq.stats;
 480
 481		do {
 482			start = u64_stats_fetch_begin(&tx_stats->syncp);
 483			packets = tx_stats->packets;
 484			bytes = tx_stats->bytes;
 485		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
 486
 487		st->tx_packets += packets;
 488		st->tx_bytes += bytes;
 489	}
 490}
 491
 492static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
 493			     int old_q)
 494{
 495	struct mana_port_context *apc = netdev_priv(ndev);
 496	u32 hash = skb_get_hash(skb);
 497	struct sock *sk = skb->sk;
 498	int txq;
 499
 500	txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
 501
 502	if (txq != old_q && sk && sk_fullsock(sk) &&
 503	    rcu_access_pointer(sk->sk_dst_cache))
 504		sk_tx_queue_set(sk, txq);
 505
 506	return txq;
 507}
 508
 509static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
 510			     struct net_device *sb_dev)
 511{
 512	int txq;
 513
 514	if (ndev->real_num_tx_queues == 1)
 515		return 0;
 516
 517	txq = sk_tx_queue_get(skb->sk);
 518
 519	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
 520		if (skb_rx_queue_recorded(skb))
 521			txq = skb_get_rx_queue(skb);
 522		else
 523			txq = mana_get_tx_queue(ndev, skb, txq);
 524	}
 525
 526	return txq;
 527}
 528
 529/* Release pre-allocated RX buffers */
 530void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
 531{
 532	struct device *dev;
 533	int i;
 534
 535	dev = mpc->ac->gdma_dev->gdma_context->dev;
 536
 537	if (!mpc->rxbufs_pre)
 538		goto out1;
 539
 540	if (!mpc->das_pre)
 541		goto out2;
 542
 543	while (mpc->rxbpre_total) {
 544		i = --mpc->rxbpre_total;
 545		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
 546				 DMA_FROM_DEVICE);
 547		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
 548	}
 549
 550	kfree(mpc->das_pre);
 551	mpc->das_pre = NULL;
 552
 553out2:
 554	kfree(mpc->rxbufs_pre);
 555	mpc->rxbufs_pre = NULL;
 556
 557out1:
 558	mpc->rxbpre_datasize = 0;
 559	mpc->rxbpre_alloc_size = 0;
 560	mpc->rxbpre_headroom = 0;
 561}
 562
 563/* Get a buffer from the pre-allocated RX buffers */
 564static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
 565{
 566	struct net_device *ndev = rxq->ndev;
 567	struct mana_port_context *mpc;
 568	void *va;
 569
 570	mpc = netdev_priv(ndev);
 571
 572	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
 573		netdev_err(ndev, "No RX pre-allocated bufs\n");
 574		return NULL;
 575	}
 576
 577	/* Check sizes to catch unexpected coding error */
 578	if (mpc->rxbpre_datasize != rxq->datasize) {
 579		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
 580			   mpc->rxbpre_datasize, rxq->datasize);
 581		return NULL;
 582	}
 583
 584	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
 585		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
 586			   mpc->rxbpre_alloc_size, rxq->alloc_size);
 587		return NULL;
 588	}
 589
 590	if (mpc->rxbpre_headroom != rxq->headroom) {
 591		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
 592			   mpc->rxbpre_headroom, rxq->headroom);
 593		return NULL;
 594	}
 595
 596	mpc->rxbpre_total--;
 597
 598	*da = mpc->das_pre[mpc->rxbpre_total];
 599	va = mpc->rxbufs_pre[mpc->rxbpre_total];
 600	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
 601
 602	/* Deallocate the array after all buffers are gone */
 603	if (!mpc->rxbpre_total)
 604		mana_pre_dealloc_rxbufs(mpc);
 605
 606	return va;
 607}
 608
 609/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
 610static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
 611			       u32 *headroom)
 612{
 613	if (mtu > MANA_XDP_MTU_MAX)
 614		*headroom = 0; /* no support for XDP */
 615	else
 616		*headroom = XDP_PACKET_HEADROOM;
 617
 618	*alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
 619
 620	/* Using page pool in this case, so alloc_size is PAGE_SIZE */
 621	if (*alloc_size < PAGE_SIZE)
 622		*alloc_size = PAGE_SIZE;
 623
 624	*datasize = mtu + ETH_HLEN;
 625}
 626
 627int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
 628{
 629	struct device *dev;
 630	struct page *page;
 631	dma_addr_t da;
 632	int num_rxb;
 633	void *va;
 634	int i;
 635
 636	mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
 637			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
 638
 639	dev = mpc->ac->gdma_dev->gdma_context->dev;
 640
 641	num_rxb = num_queues * mpc->rx_queue_size;
 642
 643	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
 644	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
 645	if (!mpc->rxbufs_pre)
 646		goto error;
 647
 648	mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
 649	if (!mpc->das_pre)
 650		goto error;
 651
 652	mpc->rxbpre_total = 0;
 653
 654	for (i = 0; i < num_rxb; i++) {
 655		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
 656			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
 657			if (!va)
 658				goto error;
 659
 660			page = virt_to_head_page(va);
 661			/* Check if the frag falls back to single page */
 662			if (compound_order(page) <
 663			    get_order(mpc->rxbpre_alloc_size)) {
 664				put_page(page);
 665				goto error;
 666			}
 667		} else {
 668			page = dev_alloc_page();
 669			if (!page)
 670				goto error;
 671
 672			va = page_to_virt(page);
 673		}
 674
 675		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
 676				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
 677		if (dma_mapping_error(dev, da)) {
 678			put_page(virt_to_head_page(va));
 679			goto error;
 680		}
 681
 682		mpc->rxbufs_pre[i] = va;
 683		mpc->das_pre[i] = da;
 684		mpc->rxbpre_total = i + 1;
 685	}
 686
 687	return 0;
 688
 689error:
 690	mana_pre_dealloc_rxbufs(mpc);
 691	return -ENOMEM;
 692}
 693
 694static int mana_change_mtu(struct net_device *ndev, int new_mtu)
 695{
 696	struct mana_port_context *mpc = netdev_priv(ndev);
 697	unsigned int old_mtu = ndev->mtu;
 698	int err;
 699
 700	/* Pre-allocate buffers to prevent failure in mana_attach later */
 701	err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
 702	if (err) {
 703		netdev_err(ndev, "Insufficient memory for new MTU\n");
 704		return err;
 705	}
 706
 707	err = mana_detach(ndev, false);
 708	if (err) {
 709		netdev_err(ndev, "mana_detach failed: %d\n", err);
 710		goto out;
 711	}
 712
 713	WRITE_ONCE(ndev->mtu, new_mtu);
 714
 715	err = mana_attach(ndev);
 716	if (err) {
 717		netdev_err(ndev, "mana_attach failed: %d\n", err);
 718		WRITE_ONCE(ndev->mtu, old_mtu);
 719	}
 720
 721out:
 722	mana_pre_dealloc_rxbufs(mpc);
 723	return err;
 724}
 725
 726static const struct net_device_ops mana_devops = {
 727	.ndo_open		= mana_open,
 728	.ndo_stop		= mana_close,
 729	.ndo_select_queue	= mana_select_queue,
 730	.ndo_start_xmit		= mana_start_xmit,
 731	.ndo_validate_addr	= eth_validate_addr,
 732	.ndo_get_stats64	= mana_get_stats64,
 733	.ndo_bpf		= mana_bpf,
 734	.ndo_xdp_xmit		= mana_xdp_xmit,
 735	.ndo_change_mtu		= mana_change_mtu,
 736};
 737
 738static void mana_cleanup_port_context(struct mana_port_context *apc)
 739{
 740	/*
 741	 * at this point all dir/files under the vport directory
 742	 * are already cleaned up.
 743	 * We are sure the apc->mana_port_debugfs remove will not
 744	 * cause any freed memory access issues
 745	 */
 746	debugfs_remove(apc->mana_port_debugfs);
 747	kfree(apc->rxqs);
 748	apc->rxqs = NULL;
 749}
 750
 751static void mana_cleanup_indir_table(struct mana_port_context *apc)
 752{
 753	apc->indir_table_sz = 0;
 754	kfree(apc->indir_table);
 755	kfree(apc->rxobj_table);
 756}
 757
 758static int mana_init_port_context(struct mana_port_context *apc)
 759{
 760	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
 761			    GFP_KERNEL);
 762
 763	return !apc->rxqs ? -ENOMEM : 0;
 764}
 765
 766static int mana_send_request(struct mana_context *ac, void *in_buf,
 767			     u32 in_len, void *out_buf, u32 out_len)
 768{
 769	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 770	struct gdma_resp_hdr *resp = out_buf;
 771	struct gdma_req_hdr *req = in_buf;
 772	struct device *dev = gc->dev;
 773	static atomic_t activity_id;
 774	int err;
 775
 776	req->dev_id = gc->mana.dev_id;
 777	req->activity_id = atomic_inc_return(&activity_id);
 778
 779	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
 780				   out_buf);
 781	if (err || resp->status) {
 782		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
 783			err, resp->status);
 784		return err ? err : -EPROTO;
 785	}
 786
 787	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
 788	    req->activity_id != resp->activity_id) {
 789		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
 790			req->dev_id.as_uint32, resp->dev_id.as_uint32,
 791			req->activity_id, resp->activity_id);
 792		return -EPROTO;
 793	}
 794
 795	return 0;
 796}
 797
 798static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
 799				const enum mana_command_code expected_code,
 800				const u32 min_size)
 801{
 802	if (resp_hdr->response.msg_type != expected_code)
 803		return -EPROTO;
 804
 805	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
 806		return -EPROTO;
 807
 808	if (resp_hdr->response.msg_size < min_size)
 809		return -EPROTO;
 810
 811	return 0;
 812}
 813
 814static int mana_pf_register_hw_vport(struct mana_port_context *apc)
 815{
 816	struct mana_register_hw_vport_resp resp = {};
 817	struct mana_register_hw_vport_req req = {};
 818	int err;
 819
 820	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
 821			     sizeof(req), sizeof(resp));
 822	req.attached_gfid = 1;
 823	req.is_pf_default_vport = 1;
 824	req.allow_all_ether_types = 1;
 825
 826	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 827				sizeof(resp));
 828	if (err) {
 829		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
 830		return err;
 831	}
 832
 833	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
 834				   sizeof(resp));
 835	if (err || resp.hdr.status) {
 836		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
 837			   err, resp.hdr.status);
 838		return err ? err : -EPROTO;
 839	}
 840
 841	apc->port_handle = resp.hw_vport_handle;
 842	return 0;
 843}
 844
 845static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
 846{
 847	struct mana_deregister_hw_vport_resp resp = {};
 848	struct mana_deregister_hw_vport_req req = {};
 849	int err;
 850
 851	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
 852			     sizeof(req), sizeof(resp));
 853	req.hw_vport_handle = apc->port_handle;
 854
 855	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 856				sizeof(resp));
 857	if (err) {
 858		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
 859			   err);
 860		return;
 861	}
 862
 863	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
 864				   sizeof(resp));
 865	if (err || resp.hdr.status)
 866		netdev_err(apc->ndev,
 867			   "Failed to deregister hw vPort: %d, 0x%x\n",
 868			   err, resp.hdr.status);
 869}
 870
 871static int mana_pf_register_filter(struct mana_port_context *apc)
 872{
 873	struct mana_register_filter_resp resp = {};
 874	struct mana_register_filter_req req = {};
 875	int err;
 876
 877	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
 878			     sizeof(req), sizeof(resp));
 879	req.vport = apc->port_handle;
 880	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
 881
 882	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 883				sizeof(resp));
 884	if (err) {
 885		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
 886		return err;
 887	}
 888
 889	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
 890				   sizeof(resp));
 891	if (err || resp.hdr.status) {
 892		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
 893			   err, resp.hdr.status);
 894		return err ? err : -EPROTO;
 895	}
 896
 897	apc->pf_filter_handle = resp.filter_handle;
 898	return 0;
 899}
 900
 901static void mana_pf_deregister_filter(struct mana_port_context *apc)
 902{
 903	struct mana_deregister_filter_resp resp = {};
 904	struct mana_deregister_filter_req req = {};
 905	int err;
 906
 907	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
 908			     sizeof(req), sizeof(resp));
 909	req.filter_handle = apc->pf_filter_handle;
 910
 911	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 912				sizeof(resp));
 913	if (err) {
 914		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
 915			   err);
 916		return;
 917	}
 918
 919	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
 920				   sizeof(resp));
 921	if (err || resp.hdr.status)
 922		netdev_err(apc->ndev,
 923			   "Failed to deregister filter: %d, 0x%x\n",
 924			   err, resp.hdr.status);
 925}
 926
 927static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
 928				 u32 proto_minor_ver, u32 proto_micro_ver,
 929				 u16 *max_num_vports)
 930{
 931	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 932	struct mana_query_device_cfg_resp resp = {};
 933	struct mana_query_device_cfg_req req = {};
 934	struct device *dev = gc->dev;
 935	int err = 0;
 936
 937	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
 938			     sizeof(req), sizeof(resp));
 939
 940	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
 941
 942	req.proto_major_ver = proto_major_ver;
 943	req.proto_minor_ver = proto_minor_ver;
 944	req.proto_micro_ver = proto_micro_ver;
 945
 946	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
 947	if (err) {
 948		dev_err(dev, "Failed to query config: %d", err);
 949		return err;
 950	}
 951
 952	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
 953				   sizeof(resp));
 954	if (err || resp.hdr.status) {
 955		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
 956			resp.hdr.status);
 957		if (!err)
 958			err = -EPROTO;
 959		return err;
 960	}
 961
 962	*max_num_vports = resp.max_num_vports;
 963
 964	if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
 965		gc->adapter_mtu = resp.adapter_mtu;
 966	else
 967		gc->adapter_mtu = ETH_FRAME_LEN;
 968
 969	debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
 970
 971	return 0;
 972}
 973
 974static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
 975				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
 976{
 977	struct mana_query_vport_cfg_resp resp = {};
 978	struct mana_query_vport_cfg_req req = {};
 979	int err;
 980
 981	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
 982			     sizeof(req), sizeof(resp));
 983
 984	req.vport_index = vport_index;
 985
 986	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 987				sizeof(resp));
 988	if (err)
 989		return err;
 990
 991	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
 992				   sizeof(resp));
 993	if (err)
 994		return err;
 995
 996	if (resp.hdr.status)
 997		return -EPROTO;
 998
 999	*max_sq = resp.max_num_sq;
1000	*max_rq = resp.max_num_rq;
1001	if (resp.num_indirection_ent > 0 &&
1002	    resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
1003	    is_power_of_2(resp.num_indirection_ent)) {
1004		*num_indir_entry = resp.num_indirection_ent;
1005	} else {
1006		netdev_warn(apc->ndev,
1007			    "Setting indirection table size to default %d for vPort %d\n",
1008			    MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1009		*num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
1010	}
1011
1012	apc->port_handle = resp.vport;
1013	ether_addr_copy(apc->mac_addr, resp.mac_addr);
1014
1015	return 0;
1016}
1017
1018void mana_uncfg_vport(struct mana_port_context *apc)
1019{
1020	mutex_lock(&apc->vport_mutex);
1021	apc->vport_use_count--;
1022	WARN_ON(apc->vport_use_count < 0);
1023	mutex_unlock(&apc->vport_mutex);
1024}
1025EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
1026
1027int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1028		   u32 doorbell_pg_id)
1029{
1030	struct mana_config_vport_resp resp = {};
1031	struct mana_config_vport_req req = {};
1032	int err;
1033
1034	/* This function is used to program the Ethernet port in the hardware
1035	 * table. It can be called from the Ethernet driver or the RDMA driver.
1036	 *
1037	 * For Ethernet usage, the hardware supports only one active user on a
1038	 * physical port. The driver checks on the port usage before programming
1039	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1040	 * device to kernel NET layer (Ethernet driver).
1041	 *
1042	 * Because the RDMA driver doesn't know in advance which QP type the
1043	 * user will create, it exposes the device with all its ports. The user
1044	 * may not be able to create RAW QP on a port if this port is already
1045	 * in used by the Ethernet driver from the kernel.
1046	 *
1047	 * This physical port limitation only applies to the RAW QP. For RC QP,
1048	 * the hardware doesn't have this limitation. The user can create RC
1049	 * QPs on a physical port up to the hardware limits independent of the
1050	 * Ethernet usage on the same port.
1051	 */
1052	mutex_lock(&apc->vport_mutex);
1053	if (apc->vport_use_count > 0) {
1054		mutex_unlock(&apc->vport_mutex);
1055		return -EBUSY;
1056	}
1057	apc->vport_use_count++;
1058	mutex_unlock(&apc->vport_mutex);
1059
1060	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1061			     sizeof(req), sizeof(resp));
1062	req.vport = apc->port_handle;
1063	req.pdid = protection_dom_id;
1064	req.doorbell_pageid = doorbell_pg_id;
1065
1066	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1067				sizeof(resp));
1068	if (err) {
1069		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1070		goto out;
1071	}
1072
1073	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1074				   sizeof(resp));
1075	if (err || resp.hdr.status) {
1076		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1077			   err, resp.hdr.status);
1078		if (!err)
1079			err = -EPROTO;
1080
1081		goto out;
1082	}
1083
1084	apc->tx_shortform_allowed = resp.short_form_allowed;
1085	apc->tx_vp_offset = resp.tx_vport_offset;
1086
1087	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1088		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1089out:
1090	if (err)
1091		mana_uncfg_vport(apc);
1092
1093	return err;
1094}
1095EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
1096
1097static int mana_cfg_vport_steering(struct mana_port_context *apc,
1098				   enum TRI_STATE rx,
1099				   bool update_default_rxobj, bool update_key,
1100				   bool update_tab)
1101{
 
1102	struct mana_cfg_rx_steer_req_v2 *req;
1103	struct mana_cfg_rx_steer_resp resp = {};
1104	struct net_device *ndev = apc->ndev;
 
1105	u32 req_buf_size;
1106	int err;
1107
1108	req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1109	req = kzalloc(req_buf_size, GFP_KERNEL);
1110	if (!req)
1111		return -ENOMEM;
1112
1113	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1114			     sizeof(resp));
1115
1116	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1117
1118	req->vport = apc->port_handle;
1119	req->num_indir_entries = apc->indir_table_sz;
1120	req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
1121					 indir_tab);
1122	req->rx_enable = rx;
1123	req->rss_enable = apc->rss_state;
1124	req->update_default_rxobj = update_default_rxobj;
1125	req->update_hashkey = update_key;
1126	req->update_indir_tab = update_tab;
1127	req->default_rxobj = apc->default_rxobj;
1128	req->cqe_coalescing_enable = 0;
1129
1130	if (update_key)
1131		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1132
1133	if (update_tab)
1134		memcpy(req->indir_tab, apc->rxobj_table,
1135		       flex_array_size(req, indir_tab, req->num_indir_entries));
 
 
1136
1137	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1138				sizeof(resp));
1139	if (err) {
1140		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1141		goto out;
1142	}
1143
1144	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1145				   sizeof(resp));
1146	if (err) {
1147		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1148		goto out;
1149	}
1150
1151	if (resp.hdr.status) {
1152		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1153			   resp.hdr.status);
1154		err = -EPROTO;
1155	}
1156
1157	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1158		    apc->port_handle, apc->indir_table_sz);
1159out:
1160	kfree(req);
1161	return err;
1162}
1163
1164int mana_create_wq_obj(struct mana_port_context *apc,
1165		       mana_handle_t vport,
1166		       u32 wq_type, struct mana_obj_spec *wq_spec,
1167		       struct mana_obj_spec *cq_spec,
1168		       mana_handle_t *wq_obj)
1169{
1170	struct mana_create_wqobj_resp resp = {};
1171	struct mana_create_wqobj_req req = {};
1172	struct net_device *ndev = apc->ndev;
1173	int err;
1174
1175	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1176			     sizeof(req), sizeof(resp));
1177	req.vport = vport;
1178	req.wq_type = wq_type;
1179	req.wq_gdma_region = wq_spec->gdma_region;
1180	req.cq_gdma_region = cq_spec->gdma_region;
1181	req.wq_size = wq_spec->queue_size;
1182	req.cq_size = cq_spec->queue_size;
1183	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1184	req.cq_parent_qid = cq_spec->attached_eq;
1185
1186	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1187				sizeof(resp));
1188	if (err) {
1189		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1190		goto out;
1191	}
1192
1193	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1194				   sizeof(resp));
1195	if (err || resp.hdr.status) {
1196		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1197			   resp.hdr.status);
1198		if (!err)
1199			err = -EPROTO;
1200		goto out;
1201	}
1202
1203	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1204		netdev_err(ndev, "Got an invalid WQ object handle\n");
1205		err = -EPROTO;
1206		goto out;
1207	}
1208
1209	*wq_obj = resp.wq_obj;
1210	wq_spec->queue_index = resp.wq_id;
1211	cq_spec->queue_index = resp.cq_id;
1212
1213	return 0;
1214out:
1215	return err;
1216}
1217EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
1218
1219void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1220			 mana_handle_t wq_obj)
1221{
1222	struct mana_destroy_wqobj_resp resp = {};
1223	struct mana_destroy_wqobj_req req = {};
1224	struct net_device *ndev = apc->ndev;
1225	int err;
1226
1227	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1228			     sizeof(req), sizeof(resp));
1229	req.wq_type = wq_type;
1230	req.wq_obj_handle = wq_obj;
1231
1232	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1233				sizeof(resp));
1234	if (err) {
1235		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1236		return;
1237	}
1238
1239	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1240				   sizeof(resp));
1241	if (err || resp.hdr.status)
1242		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1243			   resp.hdr.status);
1244}
1245EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
1246
1247static void mana_destroy_eq(struct mana_context *ac)
1248{
1249	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1250	struct gdma_queue *eq;
1251	int i;
1252
1253	if (!ac->eqs)
1254		return;
1255
1256	debugfs_remove_recursive(ac->mana_eqs_debugfs);
1257
1258	for (i = 0; i < gc->max_num_queues; i++) {
1259		eq = ac->eqs[i].eq;
1260		if (!eq)
1261			continue;
1262
1263		mana_gd_destroy_queue(gc, eq);
1264	}
1265
1266	kfree(ac->eqs);
1267	ac->eqs = NULL;
1268}
1269
1270static void mana_create_eq_debugfs(struct mana_context *ac, int i)
1271{
1272	struct mana_eq eq = ac->eqs[i];
1273	char eqnum[32];
1274
1275	sprintf(eqnum, "eq%d", i);
1276	eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
1277	debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
1278	debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
1279	debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
1280}
1281
1282static int mana_create_eq(struct mana_context *ac)
1283{
1284	struct gdma_dev *gd = ac->gdma_dev;
1285	struct gdma_context *gc = gd->gdma_context;
1286	struct gdma_queue_spec spec = {};
1287	int err;
1288	int i;
1289
1290	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1291			  GFP_KERNEL);
1292	if (!ac->eqs)
1293		return -ENOMEM;
1294
1295	spec.type = GDMA_EQ;
1296	spec.monitor_avl_buf = false;
1297	spec.queue_size = EQ_SIZE;
1298	spec.eq.callback = NULL;
1299	spec.eq.context = ac->eqs;
1300	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1301
1302	ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
1303
1304	for (i = 0; i < gc->max_num_queues; i++) {
1305		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1306		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1307		if (err)
1308			goto out;
1309		mana_create_eq_debugfs(ac, i);
1310	}
1311
1312	return 0;
1313out:
1314	mana_destroy_eq(ac);
1315	return err;
1316}
1317
1318static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1319{
1320	struct mana_fence_rq_resp resp = {};
1321	struct mana_fence_rq_req req = {};
1322	int err;
1323
1324	init_completion(&rxq->fence_event);
1325
1326	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1327			     sizeof(req), sizeof(resp));
1328	req.wq_obj_handle =  rxq->rxobj;
1329
1330	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1331				sizeof(resp));
1332	if (err) {
1333		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1334			   rxq->rxq_idx, err);
1335		return err;
1336	}
1337
1338	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1339	if (err || resp.hdr.status) {
1340		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1341			   rxq->rxq_idx, err, resp.hdr.status);
1342		if (!err)
1343			err = -EPROTO;
1344
1345		return err;
1346	}
1347
1348	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1349		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1350			   rxq->rxq_idx);
1351		return -ETIMEDOUT;
1352	}
1353
1354	return 0;
1355}
1356
1357static void mana_fence_rqs(struct mana_port_context *apc)
1358{
1359	unsigned int rxq_idx;
1360	struct mana_rxq *rxq;
1361	int err;
1362
1363	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1364		rxq = apc->rxqs[rxq_idx];
1365		err = mana_fence_rq(apc, rxq);
1366
1367		/* In case of any error, use sleep instead. */
1368		if (err)
1369			msleep(100);
1370	}
1371}
1372
1373static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1374{
1375	u32 used_space_old;
1376	u32 used_space_new;
1377
1378	used_space_old = wq->head - wq->tail;
1379	used_space_new = wq->head - (wq->tail + num_units);
1380
1381	if (WARN_ON_ONCE(used_space_new > used_space_old))
1382		return -ERANGE;
1383
1384	wq->tail += num_units;
1385	return 0;
1386}
1387
1388static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1389{
1390	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1391	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1392	struct device *dev = gc->dev;
1393	int hsg, i;
1394
1395	/* Number of SGEs of linear part */
1396	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1397
1398	for (i = 0; i < hsg; i++)
1399		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1400				 DMA_TO_DEVICE);
1401
1402	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1403		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1404			       DMA_TO_DEVICE);
1405}
1406
1407static void mana_poll_tx_cq(struct mana_cq *cq)
1408{
1409	struct gdma_comp *completions = cq->gdma_comp_buf;
1410	struct gdma_posted_wqe_info *wqe_info;
1411	unsigned int pkt_transmitted = 0;
1412	unsigned int wqe_unit_cnt = 0;
1413	struct mana_txq *txq = cq->txq;
1414	struct mana_port_context *apc;
1415	struct netdev_queue *net_txq;
1416	struct gdma_queue *gdma_wq;
1417	unsigned int avail_space;
1418	struct net_device *ndev;
1419	struct sk_buff *skb;
1420	bool txq_stopped;
1421	int comp_read;
1422	int i;
1423
1424	ndev = txq->ndev;
1425	apc = netdev_priv(ndev);
1426
1427	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1428				    CQE_POLLING_BUFFER);
1429
1430	if (comp_read < 1)
1431		return;
1432
1433	for (i = 0; i < comp_read; i++) {
1434		struct mana_tx_comp_oob *cqe_oob;
1435
1436		if (WARN_ON_ONCE(!completions[i].is_sq))
1437			return;
1438
1439		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1440		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1441				 MANA_CQE_COMPLETION))
1442			return;
1443
1444		switch (cqe_oob->cqe_hdr.cqe_type) {
1445		case CQE_TX_OKAY:
1446			break;
1447
1448		case CQE_TX_SA_DROP:
1449		case CQE_TX_MTU_DROP:
1450		case CQE_TX_INVALID_OOB:
1451		case CQE_TX_INVALID_ETH_TYPE:
1452		case CQE_TX_HDR_PROCESSING_ERROR:
1453		case CQE_TX_VF_DISABLED:
1454		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1455		case CQE_TX_VPORT_DISABLED:
1456		case CQE_TX_VLAN_TAGGING_VIOLATION:
1457			if (net_ratelimit())
1458				netdev_err(ndev, "TX: CQE error %d\n",
1459					   cqe_oob->cqe_hdr.cqe_type);
1460
1461			apc->eth_stats.tx_cqe_err++;
1462			break;
1463
1464		default:
1465			/* If the CQE type is unknown, log an error,
1466			 * and still free the SKB, update tail, etc.
1467			 */
1468			if (net_ratelimit())
1469				netdev_err(ndev, "TX: unknown CQE type %d\n",
1470					   cqe_oob->cqe_hdr.cqe_type);
1471
1472			apc->eth_stats.tx_cqe_unknown_type++;
1473			break;
1474		}
1475
1476		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1477			return;
1478
1479		skb = skb_dequeue(&txq->pending_skbs);
1480		if (WARN_ON_ONCE(!skb))
1481			return;
1482
1483		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1484		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1485
1486		mana_unmap_skb(skb, apc);
1487
1488		napi_consume_skb(skb, cq->budget);
1489
1490		pkt_transmitted++;
1491	}
1492
1493	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1494		return;
1495
1496	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1497
1498	gdma_wq = txq->gdma_sq;
1499	avail_space = mana_gd_wq_avail_space(gdma_wq);
1500
1501	/* Ensure tail updated before checking q stop */
1502	smp_mb();
1503
1504	net_txq = txq->net_txq;
1505	txq_stopped = netif_tx_queue_stopped(net_txq);
1506
1507	/* Ensure checking txq_stopped before apc->port_is_up. */
1508	smp_rmb();
1509
1510	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1511		netif_tx_wake_queue(net_txq);
1512		apc->eth_stats.wake_queue++;
1513	}
1514
1515	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1516		WARN_ON_ONCE(1);
1517
1518	cq->work_done = pkt_transmitted;
1519}
1520
1521static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1522{
1523	struct mana_recv_buf_oob *recv_buf_oob;
1524	u32 curr_index;
1525	int err;
1526
1527	curr_index = rxq->buf_index++;
1528	if (rxq->buf_index == rxq->num_rx_buf)
1529		rxq->buf_index = 0;
1530
1531	recv_buf_oob = &rxq->rx_oobs[curr_index];
1532
1533	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1534					&recv_buf_oob->wqe_inf);
1535	if (WARN_ON_ONCE(err))
1536		return;
1537
1538	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1539}
1540
1541static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1542				      uint pkt_len, struct xdp_buff *xdp)
1543{
1544	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1545
1546	if (!skb)
1547		return NULL;
1548
1549	if (xdp->data_hard_start) {
1550		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1551		skb_put(skb, xdp->data_end - xdp->data);
1552		return skb;
1553	}
1554
1555	skb_reserve(skb, rxq->headroom);
1556	skb_put(skb, pkt_len);
1557
1558	return skb;
1559}
1560
1561static void mana_rx_skb(void *buf_va, bool from_pool,
1562			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1563{
1564	struct mana_stats_rx *rx_stats = &rxq->stats;
1565	struct net_device *ndev = rxq->ndev;
1566	uint pkt_len = cqe->ppi[0].pkt_len;
1567	u16 rxq_idx = rxq->rxq_idx;
1568	struct napi_struct *napi;
1569	struct xdp_buff xdp = {};
1570	struct sk_buff *skb;
1571	u32 hash_value;
1572	u32 act;
1573
1574	rxq->rx_cq.work_done++;
1575	napi = &rxq->rx_cq.napi;
1576
1577	if (!buf_va) {
1578		++ndev->stats.rx_dropped;
1579		return;
1580	}
1581
1582	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1583
1584	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1585		return;
1586
1587	if (act != XDP_PASS && act != XDP_TX)
1588		goto drop_xdp;
1589
1590	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1591
1592	if (!skb)
1593		goto drop;
1594
1595	if (from_pool)
1596		skb_mark_for_recycle(skb);
1597
1598	skb->dev = napi->dev;
1599
1600	skb->protocol = eth_type_trans(skb, ndev);
1601	skb_checksum_none_assert(skb);
1602	skb_record_rx_queue(skb, rxq_idx);
1603
1604	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1605		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1606			skb->ip_summed = CHECKSUM_UNNECESSARY;
1607	}
1608
1609	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1610		hash_value = cqe->ppi[0].pkt_hash;
1611
1612		if (cqe->rx_hashtype & MANA_HASH_L4)
1613			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1614		else
1615			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1616	}
1617
1618	if (cqe->rx_vlantag_present) {
1619		u16 vlan_tci = cqe->rx_vlan_id;
1620
1621		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1622	}
1623
1624	u64_stats_update_begin(&rx_stats->syncp);
1625	rx_stats->packets++;
1626	rx_stats->bytes += pkt_len;
1627
1628	if (act == XDP_TX)
1629		rx_stats->xdp_tx++;
1630	u64_stats_update_end(&rx_stats->syncp);
1631
1632	if (act == XDP_TX) {
1633		skb_set_queue_mapping(skb, rxq_idx);
1634		mana_xdp_tx(skb, ndev);
1635		return;
1636	}
1637
1638	napi_gro_receive(napi, skb);
1639
1640	return;
1641
1642drop_xdp:
1643	u64_stats_update_begin(&rx_stats->syncp);
1644	rx_stats->xdp_drop++;
1645	u64_stats_update_end(&rx_stats->syncp);
1646
1647drop:
1648	if (from_pool) {
1649		page_pool_recycle_direct(rxq->page_pool,
1650					 virt_to_head_page(buf_va));
1651	} else {
1652		WARN_ON_ONCE(rxq->xdp_save_va);
1653		/* Save for reuse */
1654		rxq->xdp_save_va = buf_va;
1655	}
1656
1657	++ndev->stats.rx_dropped;
1658
1659	return;
1660}
1661
1662static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1663			     dma_addr_t *da, bool *from_pool, bool is_napi)
1664{
1665	struct page *page;
1666	void *va;
1667
1668	*from_pool = false;
1669
1670	/* Reuse XDP dropped page if available */
1671	if (rxq->xdp_save_va) {
1672		va = rxq->xdp_save_va;
1673		rxq->xdp_save_va = NULL;
1674	} else if (rxq->alloc_size > PAGE_SIZE) {
1675		if (is_napi)
1676			va = napi_alloc_frag(rxq->alloc_size);
1677		else
1678			va = netdev_alloc_frag(rxq->alloc_size);
1679
1680		if (!va)
1681			return NULL;
1682
1683		page = virt_to_head_page(va);
1684		/* Check if the frag falls back to single page */
1685		if (compound_order(page) < get_order(rxq->alloc_size)) {
1686			put_page(page);
1687			return NULL;
1688		}
1689	} else {
1690		page = page_pool_dev_alloc_pages(rxq->page_pool);
1691		if (!page)
1692			return NULL;
1693
1694		*from_pool = true;
1695		va = page_to_virt(page);
1696	}
1697
1698	*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1699			     DMA_FROM_DEVICE);
1700	if (dma_mapping_error(dev, *da)) {
1701		if (*from_pool)
1702			page_pool_put_full_page(rxq->page_pool, page, false);
1703		else
1704			put_page(virt_to_head_page(va));
1705
1706		return NULL;
1707	}
1708
1709	return va;
1710}
1711
1712/* Allocate frag for rx buffer, and save the old buf */
1713static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1714			       struct mana_recv_buf_oob *rxoob, void **old_buf,
1715			       bool *old_fp)
1716{
1717	bool from_pool;
1718	dma_addr_t da;
1719	void *va;
1720
1721	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1722	if (!va)
1723		return;
1724
1725	dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1726			 DMA_FROM_DEVICE);
1727	*old_buf = rxoob->buf_va;
1728	*old_fp = rxoob->from_pool;
1729
1730	rxoob->buf_va = va;
1731	rxoob->sgl[0].address = da;
1732	rxoob->from_pool = from_pool;
1733}
1734
1735static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1736				struct gdma_comp *cqe)
1737{
1738	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1739	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1740	struct net_device *ndev = rxq->ndev;
1741	struct mana_recv_buf_oob *rxbuf_oob;
1742	struct mana_port_context *apc;
1743	struct device *dev = gc->dev;
1744	void *old_buf = NULL;
1745	u32 curr, pktlen;
1746	bool old_fp;
1747
1748	apc = netdev_priv(ndev);
1749
1750	switch (oob->cqe_hdr.cqe_type) {
1751	case CQE_RX_OKAY:
1752		break;
1753
1754	case CQE_RX_TRUNCATED:
1755		++ndev->stats.rx_dropped;
1756		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1757		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1758		goto drop;
1759
1760	case CQE_RX_COALESCED_4:
1761		netdev_err(ndev, "RX coalescing is unsupported\n");
1762		apc->eth_stats.rx_coalesced_err++;
1763		return;
1764
1765	case CQE_RX_OBJECT_FENCE:
1766		complete(&rxq->fence_event);
1767		return;
1768
1769	default:
1770		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1771			   oob->cqe_hdr.cqe_type);
1772		apc->eth_stats.rx_cqe_unknown_type++;
1773		return;
1774	}
1775
1776	pktlen = oob->ppi[0].pkt_len;
1777
1778	if (pktlen == 0) {
1779		/* data packets should never have packetlength of zero */
1780		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1781			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1782		return;
1783	}
1784
1785	curr = rxq->buf_index;
1786	rxbuf_oob = &rxq->rx_oobs[curr];
1787	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1788
1789	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1790
1791	/* Unsuccessful refill will have old_buf == NULL.
1792	 * In this case, mana_rx_skb() will drop the packet.
1793	 */
1794	mana_rx_skb(old_buf, old_fp, oob, rxq);
1795
1796drop:
1797	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1798
1799	mana_post_pkt_rxq(rxq);
1800}
1801
1802static void mana_poll_rx_cq(struct mana_cq *cq)
1803{
1804	struct gdma_comp *comp = cq->gdma_comp_buf;
1805	struct mana_rxq *rxq = cq->rxq;
1806	int comp_read, i;
1807
1808	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1809	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1810
1811	rxq->xdp_flush = false;
1812
1813	for (i = 0; i < comp_read; i++) {
1814		if (WARN_ON_ONCE(comp[i].is_sq))
1815			return;
1816
1817		/* verify recv cqe references the right rxq */
1818		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1819			return;
1820
1821		mana_process_rx_cqe(rxq, cq, &comp[i]);
1822	}
1823
1824	if (comp_read > 0) {
1825		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1826
1827		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1828	}
1829
1830	if (rxq->xdp_flush)
1831		xdp_do_flush();
1832}
1833
1834static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1835{
1836	struct mana_cq *cq = context;
 
1837	int w;
1838
1839	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1840
1841	if (cq->type == MANA_CQ_TYPE_RX)
1842		mana_poll_rx_cq(cq);
1843	else
1844		mana_poll_tx_cq(cq);
1845
1846	w = cq->work_done;
1847	cq->work_done_since_doorbell += w;
1848
1849	if (w < cq->budget) {
1850		mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
1851		cq->work_done_since_doorbell = 0;
1852		napi_complete_done(&cq->napi, w);
1853	} else if (cq->work_done_since_doorbell >
1854		   cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
1855		/* MANA hardware requires at least one doorbell ring every 8
1856		 * wraparounds of CQ even if there is no need to arm the CQ.
1857		 * This driver rings the doorbell as soon as we have exceeded
1858		 * 4 wraparounds.
1859		 */
1860		mana_gd_ring_cq(gdma_queue, 0);
1861		cq->work_done_since_doorbell = 0;
1862	}
1863
 
 
1864	return w;
1865}
1866
1867static int mana_poll(struct napi_struct *napi, int budget)
1868{
1869	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1870	int w;
1871
1872	cq->work_done = 0;
1873	cq->budget = budget;
1874
1875	w = mana_cq_handler(cq, cq->gdma_cq);
1876
1877	return min(w, budget);
1878}
1879
1880static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1881{
1882	struct mana_cq *cq = context;
1883
1884	napi_schedule_irqoff(&cq->napi);
1885}
1886
1887static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1888{
1889	struct gdma_dev *gd = apc->ac->gdma_dev;
1890
1891	if (!cq->gdma_cq)
1892		return;
1893
1894	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1895}
1896
1897static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1898{
1899	struct gdma_dev *gd = apc->ac->gdma_dev;
1900
1901	if (!txq->gdma_sq)
1902		return;
1903
1904	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1905}
1906
1907static void mana_destroy_txq(struct mana_port_context *apc)
1908{
1909	struct napi_struct *napi;
1910	int i;
1911
1912	if (!apc->tx_qp)
1913		return;
1914
1915	for (i = 0; i < apc->num_queues; i++) {
1916		debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
1917
1918		napi = &apc->tx_qp[i].tx_cq.napi;
1919		if (apc->tx_qp[i].txq.napi_initialized) {
1920			napi_synchronize(napi);
1921			napi_disable(napi);
1922			netif_napi_del(napi);
1923			apc->tx_qp[i].txq.napi_initialized = false;
1924		}
1925		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1926
1927		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1928
1929		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1930	}
1931
1932	kfree(apc->tx_qp);
1933	apc->tx_qp = NULL;
1934}
1935
1936static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
1937{
1938	struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
1939	char qnum[32];
1940
1941	sprintf(qnum, "TX-%d", idx);
1942	tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
1943	debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
1944			   &tx_qp->txq.gdma_sq->head);
1945	debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
1946			   &tx_qp->txq.gdma_sq->tail);
1947	debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
1948			   &tx_qp->txq.pending_skbs.qlen);
1949	debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
1950			   &tx_qp->tx_cq.gdma_cq->head);
1951	debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
1952			   &tx_qp->tx_cq.gdma_cq->tail);
1953	debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
1954			   &tx_qp->tx_cq.budget);
1955	debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
1956			    tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
1957	debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
1958			    tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
1959}
1960
1961static int mana_create_txq(struct mana_port_context *apc,
1962			   struct net_device *net)
1963{
1964	struct mana_context *ac = apc->ac;
1965	struct gdma_dev *gd = ac->gdma_dev;
1966	struct mana_obj_spec wq_spec;
1967	struct mana_obj_spec cq_spec;
1968	struct gdma_queue_spec spec;
1969	struct gdma_context *gc;
1970	struct mana_txq *txq;
1971	struct mana_cq *cq;
1972	u32 txq_size;
1973	u32 cq_size;
1974	int err;
1975	int i;
1976
1977	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1978			     GFP_KERNEL);
1979	if (!apc->tx_qp)
1980		return -ENOMEM;
1981
1982	/*  The minimum size of the WQE is 32 bytes, hence
1983	 *  apc->tx_queue_size represents the maximum number of WQEs
1984	 *  the SQ can store. This value is then used to size other queues
1985	 *  to prevent overflow.
1986	 *  Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
1987	 *  as min val of apc->tx_queue_size is 128 and that would make
1988	 *  txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
1989	 *  are always power of two
1990	 */
1991	txq_size = apc->tx_queue_size * 32;
 
1992
1993	cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
 
1994
1995	gc = gd->gdma_context;
1996
1997	for (i = 0; i < apc->num_queues; i++) {
1998		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1999
2000		/* Create SQ */
2001		txq = &apc->tx_qp[i].txq;
2002
2003		u64_stats_init(&txq->stats.syncp);
2004		txq->ndev = net;
2005		txq->net_txq = netdev_get_tx_queue(net, i);
2006		txq->vp_offset = apc->tx_vp_offset;
2007		txq->napi_initialized = false;
2008		skb_queue_head_init(&txq->pending_skbs);
2009
2010		memset(&spec, 0, sizeof(spec));
2011		spec.type = GDMA_SQ;
2012		spec.monitor_avl_buf = true;
2013		spec.queue_size = txq_size;
2014		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2015		if (err)
2016			goto out;
2017
2018		/* Create SQ's CQ */
2019		cq = &apc->tx_qp[i].tx_cq;
2020		cq->type = MANA_CQ_TYPE_TX;
2021
2022		cq->txq = txq;
2023
2024		memset(&spec, 0, sizeof(spec));
2025		spec.type = GDMA_CQ;
2026		spec.monitor_avl_buf = false;
2027		spec.queue_size = cq_size;
2028		spec.cq.callback = mana_schedule_napi;
2029		spec.cq.parent_eq = ac->eqs[i].eq;
2030		spec.cq.context = cq;
2031		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2032		if (err)
2033			goto out;
2034
2035		memset(&wq_spec, 0, sizeof(wq_spec));
2036		memset(&cq_spec, 0, sizeof(cq_spec));
2037
2038		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2039		wq_spec.queue_size = txq->gdma_sq->queue_size;
2040
2041		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2042		cq_spec.queue_size = cq->gdma_cq->queue_size;
2043		cq_spec.modr_ctx_id = 0;
2044		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2045
2046		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2047					 &wq_spec, &cq_spec,
2048					 &apc->tx_qp[i].tx_object);
2049
2050		if (err)
2051			goto out;
2052
2053		txq->gdma_sq->id = wq_spec.queue_index;
2054		cq->gdma_cq->id = cq_spec.queue_index;
2055
2056		txq->gdma_sq->mem_info.dma_region_handle =
2057			GDMA_INVALID_DMA_REGION;
2058		cq->gdma_cq->mem_info.dma_region_handle =
2059			GDMA_INVALID_DMA_REGION;
2060
2061		txq->gdma_txq_id = txq->gdma_sq->id;
2062
2063		cq->gdma_id = cq->gdma_cq->id;
2064
2065		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2066			err = -EINVAL;
2067			goto out;
2068		}
2069
2070		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2071
2072		mana_create_txq_debugfs(apc, i);
2073
2074		netif_napi_add_tx(net, &cq->napi, mana_poll);
2075		napi_enable(&cq->napi);
2076		txq->napi_initialized = true;
2077
2078		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2079	}
2080
2081	return 0;
2082out:
2083	mana_destroy_txq(apc);
2084	return err;
2085}
2086
2087static void mana_destroy_rxq(struct mana_port_context *apc,
2088			     struct mana_rxq *rxq, bool napi_initialized)
2089
2090{
2091	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2092	struct mana_recv_buf_oob *rx_oob;
2093	struct device *dev = gc->dev;
2094	struct napi_struct *napi;
2095	struct page *page;
2096	int i;
2097
2098	if (!rxq)
2099		return;
2100
2101	debugfs_remove_recursive(rxq->mana_rx_debugfs);
2102
2103	napi = &rxq->rx_cq.napi;
2104
2105	if (napi_initialized) {
2106		napi_synchronize(napi);
2107
2108		napi_disable(napi);
2109
2110		netif_napi_del(napi);
2111	}
2112	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2113
 
 
2114	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2115
2116	mana_deinit_cq(apc, &rxq->rx_cq);
2117
2118	if (rxq->xdp_save_va)
2119		put_page(virt_to_head_page(rxq->xdp_save_va));
2120
2121	for (i = 0; i < rxq->num_rx_buf; i++) {
2122		rx_oob = &rxq->rx_oobs[i];
2123
2124		if (!rx_oob->buf_va)
2125			continue;
2126
2127		dma_unmap_single(dev, rx_oob->sgl[0].address,
2128				 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2129
2130		page = virt_to_head_page(rx_oob->buf_va);
2131
2132		if (rx_oob->from_pool)
2133			page_pool_put_full_page(rxq->page_pool, page, false);
2134		else
2135			put_page(page);
2136
2137		rx_oob->buf_va = NULL;
2138	}
2139
2140	page_pool_destroy(rxq->page_pool);
2141
2142	if (rxq->gdma_rq)
2143		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2144
2145	kfree(rxq);
2146}
2147
2148static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2149			    struct mana_rxq *rxq, struct device *dev)
2150{
2151	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2152	bool from_pool = false;
2153	dma_addr_t da;
2154	void *va;
2155
2156	if (mpc->rxbufs_pre)
2157		va = mana_get_rxbuf_pre(rxq, &da);
2158	else
2159		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2160
2161	if (!va)
2162		return -ENOMEM;
2163
2164	rx_oob->buf_va = va;
2165	rx_oob->from_pool = from_pool;
2166
2167	rx_oob->sgl[0].address = da;
2168	rx_oob->sgl[0].size = rxq->datasize;
2169	rx_oob->sgl[0].mem_key = mem_key;
2170
2171	return 0;
2172}
2173
2174#define MANA_WQE_HEADER_SIZE 16
2175#define MANA_WQE_SGE_SIZE 16
2176
2177static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2178			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2179{
2180	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2181	struct mana_recv_buf_oob *rx_oob;
2182	struct device *dev = gc->dev;
2183	u32 buf_idx;
2184	int ret;
2185
2186	WARN_ON(rxq->datasize == 0);
2187
2188	*rxq_size = 0;
2189	*cq_size = 0;
2190
2191	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2192		rx_oob = &rxq->rx_oobs[buf_idx];
2193		memset(rx_oob, 0, sizeof(*rx_oob));
2194
2195		rx_oob->num_sge = 1;
2196
2197		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2198				       dev);
2199		if (ret)
2200			return ret;
2201
2202		rx_oob->wqe_req.sgl = rx_oob->sgl;
2203		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2204		rx_oob->wqe_req.inline_oob_size = 0;
2205		rx_oob->wqe_req.inline_oob_data = NULL;
2206		rx_oob->wqe_req.flags = 0;
2207		rx_oob->wqe_req.client_data_unit = 0;
2208
2209		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2210				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2211		*cq_size += COMP_ENTRY_SIZE;
2212	}
2213
2214	return 0;
2215}
2216
2217static int mana_push_wqe(struct mana_rxq *rxq)
2218{
2219	struct mana_recv_buf_oob *rx_oob;
2220	u32 buf_idx;
2221	int err;
2222
2223	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2224		rx_oob = &rxq->rx_oobs[buf_idx];
2225
2226		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2227					    &rx_oob->wqe_inf);
2228		if (err)
2229			return -ENOSPC;
2230	}
2231
2232	return 0;
2233}
2234
2235static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2236{
2237	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2238	struct page_pool_params pprm = {};
2239	int ret;
2240
2241	pprm.pool_size = mpc->rx_queue_size;
2242	pprm.nid = gc->numa_node;
2243	pprm.napi = &rxq->rx_cq.napi;
2244	pprm.netdev = rxq->ndev;
2245
2246	rxq->page_pool = page_pool_create(&pprm);
2247
2248	if (IS_ERR(rxq->page_pool)) {
2249		ret = PTR_ERR(rxq->page_pool);
2250		rxq->page_pool = NULL;
2251		return ret;
2252	}
2253
2254	return 0;
2255}
2256
2257static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2258					u32 rxq_idx, struct mana_eq *eq,
2259					struct net_device *ndev)
2260{
2261	struct gdma_dev *gd = apc->ac->gdma_dev;
2262	struct mana_obj_spec wq_spec;
2263	struct mana_obj_spec cq_spec;
2264	struct gdma_queue_spec spec;
2265	struct mana_cq *cq = NULL;
2266	struct gdma_context *gc;
2267	u32 cq_size, rq_size;
2268	struct mana_rxq *rxq;
2269	int err;
2270
2271	gc = gd->gdma_context;
2272
2273	rxq = kzalloc(struct_size(rxq, rx_oobs, apc->rx_queue_size),
2274		      GFP_KERNEL);
2275	if (!rxq)
2276		return NULL;
2277
2278	rxq->ndev = ndev;
2279	rxq->num_rx_buf = apc->rx_queue_size;
2280	rxq->rxq_idx = rxq_idx;
2281	rxq->rxobj = INVALID_MANA_HANDLE;
2282
2283	mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2284			   &rxq->headroom);
2285
2286	/* Create page pool for RX queue */
2287	err = mana_create_page_pool(rxq, gc);
2288	if (err) {
2289		netdev_err(ndev, "Create page pool err:%d\n", err);
2290		goto out;
2291	}
2292
2293	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2294	if (err)
2295		goto out;
2296
2297	rq_size = MANA_PAGE_ALIGN(rq_size);
2298	cq_size = MANA_PAGE_ALIGN(cq_size);
2299
2300	/* Create RQ */
2301	memset(&spec, 0, sizeof(spec));
2302	spec.type = GDMA_RQ;
2303	spec.monitor_avl_buf = true;
2304	spec.queue_size = rq_size;
2305	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2306	if (err)
2307		goto out;
2308
2309	/* Create RQ's CQ */
2310	cq = &rxq->rx_cq;
2311	cq->type = MANA_CQ_TYPE_RX;
2312	cq->rxq = rxq;
2313
2314	memset(&spec, 0, sizeof(spec));
2315	spec.type = GDMA_CQ;
2316	spec.monitor_avl_buf = false;
2317	spec.queue_size = cq_size;
2318	spec.cq.callback = mana_schedule_napi;
2319	spec.cq.parent_eq = eq->eq;
2320	spec.cq.context = cq;
2321	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2322	if (err)
2323		goto out;
2324
2325	memset(&wq_spec, 0, sizeof(wq_spec));
2326	memset(&cq_spec, 0, sizeof(cq_spec));
2327	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2328	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2329
2330	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2331	cq_spec.queue_size = cq->gdma_cq->queue_size;
2332	cq_spec.modr_ctx_id = 0;
2333	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2334
2335	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2336				 &wq_spec, &cq_spec, &rxq->rxobj);
2337	if (err)
2338		goto out;
2339
2340	rxq->gdma_rq->id = wq_spec.queue_index;
2341	cq->gdma_cq->id = cq_spec.queue_index;
2342
2343	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2344	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2345
2346	rxq->gdma_id = rxq->gdma_rq->id;
2347	cq->gdma_id = cq->gdma_cq->id;
2348
2349	err = mana_push_wqe(rxq);
2350	if (err)
2351		goto out;
2352
2353	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2354		err = -EINVAL;
2355		goto out;
2356	}
2357
2358	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2359
2360	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2361
2362	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2363				 cq->napi.napi_id));
2364	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2365					   rxq->page_pool));
2366
2367	napi_enable(&cq->napi);
2368
2369	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2370out:
2371	if (!err)
2372		return rxq;
2373
2374	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2375
2376	mana_destroy_rxq(apc, rxq, false);
2377
2378	if (cq)
2379		mana_deinit_cq(apc, cq);
2380
2381	return NULL;
2382}
2383
2384static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2385{
2386	struct mana_rxq *rxq;
2387	char qnum[32];
2388
2389	rxq = apc->rxqs[idx];
2390
2391	sprintf(qnum, "RX-%d", idx);
2392	rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2393	debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
2394	debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
2395	debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
2396	debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
2397			   &rxq->rx_cq.gdma_cq->head);
2398	debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
2399			   &rxq->rx_cq.gdma_cq->tail);
2400	debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
2401	debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
2402	debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
2403			    &mana_dbg_q_fops);
2404}
2405
2406static int mana_add_rx_queues(struct mana_port_context *apc,
2407			      struct net_device *ndev)
2408{
2409	struct mana_context *ac = apc->ac;
2410	struct mana_rxq *rxq;
2411	int err = 0;
2412	int i;
2413
2414	for (i = 0; i < apc->num_queues; i++) {
2415		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2416		if (!rxq) {
2417			err = -ENOMEM;
2418			goto out;
2419		}
2420
2421		u64_stats_init(&rxq->stats.syncp);
2422
2423		apc->rxqs[i] = rxq;
2424
2425		mana_create_rxq_debugfs(apc, i);
2426	}
2427
2428	apc->default_rxobj = apc->rxqs[0]->rxobj;
2429out:
2430	return err;
2431}
2432
2433static void mana_destroy_vport(struct mana_port_context *apc)
2434{
2435	struct gdma_dev *gd = apc->ac->gdma_dev;
2436	struct mana_rxq *rxq;
2437	u32 rxq_idx;
2438
2439	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2440		rxq = apc->rxqs[rxq_idx];
2441		if (!rxq)
2442			continue;
2443
2444		mana_destroy_rxq(apc, rxq, true);
2445		apc->rxqs[rxq_idx] = NULL;
2446	}
2447
2448	mana_destroy_txq(apc);
2449	mana_uncfg_vport(apc);
2450
2451	if (gd->gdma_context->is_pf)
2452		mana_pf_deregister_hw_vport(apc);
2453}
2454
2455static int mana_create_vport(struct mana_port_context *apc,
2456			     struct net_device *net)
2457{
2458	struct gdma_dev *gd = apc->ac->gdma_dev;
2459	int err;
2460
2461	apc->default_rxobj = INVALID_MANA_HANDLE;
2462
2463	if (gd->gdma_context->is_pf) {
2464		err = mana_pf_register_hw_vport(apc);
2465		if (err)
2466			return err;
2467	}
2468
2469	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2470	if (err)
2471		return err;
2472
2473	return mana_create_txq(apc, net);
2474}
2475
2476static int mana_rss_table_alloc(struct mana_port_context *apc)
2477{
2478	if (!apc->indir_table_sz) {
2479		netdev_err(apc->ndev,
2480			   "Indirection table size not set for vPort %d\n",
2481			   apc->port_idx);
2482		return -EINVAL;
2483	}
2484
2485	apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2486	if (!apc->indir_table)
2487		return -ENOMEM;
2488
2489	apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
2490	if (!apc->rxobj_table) {
2491		kfree(apc->indir_table);
2492		return -ENOMEM;
2493	}
2494
2495	return 0;
2496}
2497
2498static void mana_rss_table_init(struct mana_port_context *apc)
2499{
2500	int i;
2501
2502	for (i = 0; i < apc->indir_table_sz; i++)
2503		apc->indir_table[i] =
2504			ethtool_rxfh_indir_default(i, apc->num_queues);
2505}
2506
2507int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2508		    bool update_hash, bool update_tab)
2509{
2510	u32 queue_idx;
2511	int err;
2512	int i;
2513
2514	if (update_tab) {
2515		for (i = 0; i < apc->indir_table_sz; i++) {
2516			queue_idx = apc->indir_table[i];
2517			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2518		}
2519	}
2520
2521	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2522	if (err)
2523		return err;
2524
2525	mana_fence_rqs(apc);
2526
2527	return 0;
2528}
2529
2530void mana_query_gf_stats(struct mana_port_context *apc)
2531{
2532	struct mana_query_gf_stat_resp resp = {};
2533	struct mana_query_gf_stat_req req = {};
2534	struct net_device *ndev = apc->ndev;
2535	int err;
2536
2537	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2538			     sizeof(req), sizeof(resp));
2539	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
2540	req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2541			STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2542			STATISTICS_FLAGS_HC_RX_BYTES |
2543			STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2544			STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2545			STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2546			STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2547			STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2548			STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2549			STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2550			STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2551			STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2552			STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2553			STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2554			STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2555			STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2556			STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2557			STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2558			STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2559			STATISTICS_FLAGS_HC_TX_BYTES |
2560			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2561			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2562			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2563			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2564			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2565			STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2566			STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2567
2568	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2569				sizeof(resp));
2570	if (err) {
2571		netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2572		return;
2573	}
2574	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2575				   sizeof(resp));
2576	if (err || resp.hdr.status) {
2577		netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2578			   resp.hdr.status);
2579		return;
2580	}
2581
2582	apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2583	apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2584	apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
2585	apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2586	apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2587	apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2588	apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2589	apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2590	apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2591	apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2592	apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2593	apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
2594					     resp.tx_err_inval_vport_offset_pkt;
2595	apc->eth_stats.hc_tx_err_vlan_enforcement =
2596					     resp.tx_err_vlan_enforcement;
2597	apc->eth_stats.hc_tx_err_eth_type_enforcement =
2598					     resp.tx_err_ethtype_enforcement;
2599	apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2600	apc->eth_stats.hc_tx_err_sqpdid_enforcement =
2601					     resp.tx_err_SQPDID_enforcement;
2602	apc->eth_stats.hc_tx_err_cqpdid_enforcement =
2603					     resp.tx_err_CQPDID_enforcement;
2604	apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2605	apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2606	apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2607	apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2608	apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2609	apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2610	apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2611	apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2612	apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2613	apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
2614}
2615
2616static int mana_init_port(struct net_device *ndev)
2617{
2618	struct mana_port_context *apc = netdev_priv(ndev);
2619	struct gdma_dev *gd = apc->ac->gdma_dev;
2620	u32 max_txq, max_rxq, max_queues;
2621	int port_idx = apc->port_idx;
2622	struct gdma_context *gc;
2623	char vport[32];
2624	int err;
2625
2626	err = mana_init_port_context(apc);
2627	if (err)
2628		return err;
2629
2630	gc = gd->gdma_context;
2631
2632	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2633				   &apc->indir_table_sz);
2634	if (err) {
2635		netdev_err(ndev, "Failed to query info for vPort %d\n",
2636			   port_idx);
2637		goto reset_apc;
2638	}
2639
2640	max_queues = min_t(u32, max_txq, max_rxq);
2641	if (apc->max_queues > max_queues)
2642		apc->max_queues = max_queues;
2643
2644	if (apc->num_queues > apc->max_queues)
2645		apc->num_queues = apc->max_queues;
2646
2647	eth_hw_addr_set(ndev, apc->mac_addr);
2648	sprintf(vport, "vport%d", port_idx);
2649	apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
2650	return 0;
2651
2652reset_apc:
2653	mana_cleanup_port_context(apc);
 
2654	return err;
2655}
2656
2657int mana_alloc_queues(struct net_device *ndev)
2658{
2659	struct mana_port_context *apc = netdev_priv(ndev);
2660	struct gdma_dev *gd = apc->ac->gdma_dev;
2661	int err;
2662
2663	err = mana_create_vport(apc, ndev);
2664	if (err)
2665		return err;
2666
2667	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2668	if (err)
2669		goto destroy_vport;
2670
2671	err = mana_add_rx_queues(apc, ndev);
2672	if (err)
2673		goto destroy_vport;
2674
2675	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2676
2677	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2678	if (err)
2679		goto destroy_vport;
2680
2681	mana_rss_table_init(apc);
2682
2683	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2684	if (err)
2685		goto destroy_vport;
2686
2687	if (gd->gdma_context->is_pf) {
2688		err = mana_pf_register_filter(apc);
2689		if (err)
2690			goto destroy_vport;
2691	}
2692
2693	mana_chn_setxdp(apc, mana_xdp_get(apc));
2694
2695	return 0;
2696
2697destroy_vport:
2698	mana_destroy_vport(apc);
2699	return err;
2700}
2701
2702int mana_attach(struct net_device *ndev)
2703{
2704	struct mana_port_context *apc = netdev_priv(ndev);
2705	int err;
2706
2707	ASSERT_RTNL();
2708
2709	err = mana_init_port(ndev);
2710	if (err)
2711		return err;
2712
2713	if (apc->port_st_save) {
2714		err = mana_alloc_queues(ndev);
2715		if (err) {
2716			mana_cleanup_port_context(apc);
2717			return err;
2718		}
2719	}
2720
2721	apc->port_is_up = apc->port_st_save;
2722
2723	/* Ensure port state updated before txq state */
2724	smp_wmb();
2725
2726	if (apc->port_is_up)
2727		netif_carrier_on(ndev);
2728
2729	netif_device_attach(ndev);
2730
2731	return 0;
2732}
2733
2734static int mana_dealloc_queues(struct net_device *ndev)
2735{
2736	struct mana_port_context *apc = netdev_priv(ndev);
2737	unsigned long timeout = jiffies + 120 * HZ;
2738	struct gdma_dev *gd = apc->ac->gdma_dev;
2739	struct mana_txq *txq;
2740	struct sk_buff *skb;
2741	int i, err;
2742	u32 tsleep;
2743
2744	if (apc->port_is_up)
2745		return -EINVAL;
2746
2747	mana_chn_setxdp(apc, NULL);
2748
2749	if (gd->gdma_context->is_pf)
2750		mana_pf_deregister_filter(apc);
2751
2752	/* No packet can be transmitted now since apc->port_is_up is false.
2753	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2754	 * a txq because it may not timely see apc->port_is_up being cleared
2755	 * to false, but it doesn't matter since mana_start_xmit() drops any
2756	 * new packets due to apc->port_is_up being false.
2757	 *
2758	 * Drain all the in-flight TX packets.
2759	 * A timeout of 120 seconds for all the queues is used.
2760	 * This will break the while loop when h/w is not responding.
2761	 * This value of 120 has been decided here considering max
2762	 * number of queues.
2763	 */
2764
2765	for (i = 0; i < apc->num_queues; i++) {
2766		txq = &apc->tx_qp[i].txq;
2767		tsleep = 1000;
2768		while (atomic_read(&txq->pending_sends) > 0 &&
2769		       time_before(jiffies, timeout)) {
2770			usleep_range(tsleep, tsleep + 1000);
2771			tsleep <<= 1;
2772		}
2773		if (atomic_read(&txq->pending_sends)) {
2774			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2775			if (err) {
2776				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2777					   err, atomic_read(&txq->pending_sends),
2778					   txq->gdma_txq_id);
2779			}
2780			break;
2781		}
2782	}
2783
2784	for (i = 0; i < apc->num_queues; i++) {
2785		txq = &apc->tx_qp[i].txq;
2786		while ((skb = skb_dequeue(&txq->pending_skbs))) {
2787			mana_unmap_skb(skb, apc);
2788			dev_kfree_skb_any(skb);
2789		}
2790		atomic_set(&txq->pending_sends, 0);
2791	}
2792	/* We're 100% sure the queues can no longer be woken up, because
2793	 * we're sure now mana_poll_tx_cq() can't be running.
2794	 */
2795
2796	apc->rss_state = TRI_STATE_FALSE;
2797	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2798	if (err) {
2799		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2800		return err;
2801	}
2802
2803	mana_destroy_vport(apc);
2804
2805	return 0;
2806}
2807
2808int mana_detach(struct net_device *ndev, bool from_close)
2809{
2810	struct mana_port_context *apc = netdev_priv(ndev);
2811	int err;
2812
2813	ASSERT_RTNL();
2814
2815	apc->port_st_save = apc->port_is_up;
2816	apc->port_is_up = false;
2817
2818	/* Ensure port state updated before txq state */
2819	smp_wmb();
2820
2821	netif_tx_disable(ndev);
2822	netif_carrier_off(ndev);
2823
2824	if (apc->port_st_save) {
2825		err = mana_dealloc_queues(ndev);
2826		if (err)
2827			return err;
2828	}
2829
2830	if (!from_close) {
2831		netif_device_detach(ndev);
2832		mana_cleanup_port_context(apc);
2833	}
2834
2835	return 0;
2836}
2837
2838static int mana_probe_port(struct mana_context *ac, int port_idx,
2839			   struct net_device **ndev_storage)
2840{
2841	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2842	struct mana_port_context *apc;
2843	struct net_device *ndev;
2844	int err;
2845
2846	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2847				 gc->max_num_queues);
2848	if (!ndev)
2849		return -ENOMEM;
2850
2851	*ndev_storage = ndev;
2852
2853	apc = netdev_priv(ndev);
2854	apc->ac = ac;
2855	apc->ndev = ndev;
2856	apc->max_queues = gc->max_num_queues;
2857	apc->num_queues = gc->max_num_queues;
2858	apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
2859	apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
2860	apc->port_handle = INVALID_MANA_HANDLE;
2861	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2862	apc->port_idx = port_idx;
2863
2864	mutex_init(&apc->vport_mutex);
2865	apc->vport_use_count = 0;
2866
2867	ndev->netdev_ops = &mana_devops;
2868	ndev->ethtool_ops = &mana_ethtool_ops;
2869	ndev->mtu = ETH_DATA_LEN;
2870	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2871	ndev->min_mtu = ETH_MIN_MTU;
2872	ndev->needed_headroom = MANA_HEADROOM;
2873	ndev->dev_port = port_idx;
2874	SET_NETDEV_DEV(ndev, gc->dev);
2875
2876	netif_carrier_off(ndev);
2877
2878	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2879
2880	err = mana_init_port(ndev);
2881	if (err)
2882		goto free_net;
2883
2884	err = mana_rss_table_alloc(apc);
2885	if (err)
2886		goto reset_apc;
2887
2888	netdev_lockdep_set_classes(ndev);
2889
2890	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2891	ndev->hw_features |= NETIF_F_RXCSUM;
2892	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2893	ndev->hw_features |= NETIF_F_RXHASH;
2894	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2895			 NETIF_F_HW_VLAN_CTAG_RX;
2896	ndev->vlan_features = ndev->features;
2897	xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
2898			      NETDEV_XDP_ACT_REDIRECT |
2899			      NETDEV_XDP_ACT_NDO_XMIT);
2900
2901	err = register_netdev(ndev);
2902	if (err) {
2903		netdev_err(ndev, "Unable to register netdev.\n");
2904		goto free_indir;
2905	}
2906
2907	return 0;
2908
2909free_indir:
2910	mana_cleanup_indir_table(apc);
2911reset_apc:
2912	mana_cleanup_port_context(apc);
 
2913free_net:
2914	*ndev_storage = NULL;
2915	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2916	free_netdev(ndev);
2917	return err;
2918}
2919
2920static void adev_release(struct device *dev)
2921{
2922	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2923
2924	kfree(madev);
2925}
2926
2927static void remove_adev(struct gdma_dev *gd)
2928{
2929	struct auxiliary_device *adev = gd->adev;
2930	int id = adev->id;
2931
2932	auxiliary_device_delete(adev);
2933	auxiliary_device_uninit(adev);
2934
2935	mana_adev_idx_free(id);
2936	gd->adev = NULL;
2937}
2938
2939static int add_adev(struct gdma_dev *gd)
2940{
2941	struct auxiliary_device *adev;
2942	struct mana_adev *madev;
2943	int ret;
2944
2945	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2946	if (!madev)
2947		return -ENOMEM;
2948
2949	adev = &madev->adev;
2950	ret = mana_adev_idx_alloc();
2951	if (ret < 0)
2952		goto idx_fail;
2953	adev->id = ret;
2954
2955	adev->name = "rdma";
2956	adev->dev.parent = gd->gdma_context->dev;
2957	adev->dev.release = adev_release;
2958	madev->mdev = gd;
2959
2960	ret = auxiliary_device_init(adev);
2961	if (ret)
2962		goto init_fail;
2963
2964	/* madev is owned by the auxiliary device */
2965	madev = NULL;
2966	ret = auxiliary_device_add(adev);
2967	if (ret)
2968		goto add_fail;
2969
2970	gd->adev = adev;
2971	return 0;
2972
2973add_fail:
2974	auxiliary_device_uninit(adev);
2975
2976init_fail:
2977	mana_adev_idx_free(adev->id);
2978
2979idx_fail:
2980	kfree(madev);
2981
2982	return ret;
2983}
2984
2985int mana_probe(struct gdma_dev *gd, bool resuming)
2986{
2987	struct gdma_context *gc = gd->gdma_context;
2988	struct mana_context *ac = gd->driver_data;
2989	struct device *dev = gc->dev;
2990	u16 num_ports = 0;
2991	int err;
2992	int i;
2993
2994	dev_info(dev,
2995		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2996		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2997
2998	err = mana_gd_register_device(gd);
2999	if (err)
3000		return err;
3001
3002	if (!resuming) {
3003		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
3004		if (!ac)
3005			return -ENOMEM;
3006
3007		ac->gdma_dev = gd;
3008		gd->driver_data = ac;
3009	}
3010
3011	err = mana_create_eq(ac);
3012	if (err)
3013		goto out;
3014
3015	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3016				    MANA_MICRO_VERSION, &num_ports);
3017	if (err)
3018		goto out;
3019
3020	if (!resuming) {
3021		ac->num_ports = num_ports;
3022	} else {
3023		if (ac->num_ports != num_ports) {
3024			dev_err(dev, "The number of vPorts changed: %d->%d\n",
3025				ac->num_ports, num_ports);
3026			err = -EPROTO;
3027			goto out;
3028		}
3029	}
3030
3031	if (ac->num_ports == 0)
3032		dev_err(dev, "Failed to detect any vPort\n");
3033
3034	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3035		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3036
3037	if (!resuming) {
3038		for (i = 0; i < ac->num_ports; i++) {
3039			err = mana_probe_port(ac, i, &ac->ports[i]);
3040			/* we log the port for which the probe failed and stop
3041			 * probes for subsequent ports.
3042			 * Note that we keep running ports, for which the probes
3043			 * were successful, unless add_adev fails too
3044			 */
3045			if (err) {
3046				dev_err(dev, "Probe Failed for port %d\n", i);
3047				break;
3048			}
3049		}
3050	} else {
3051		for (i = 0; i < ac->num_ports; i++) {
3052			rtnl_lock();
3053			err = mana_attach(ac->ports[i]);
3054			rtnl_unlock();
3055			/* we log the port for which the attach failed and stop
3056			 * attach for subsequent ports
3057			 * Note that we keep running ports, for which the attach
3058			 * were successful, unless add_adev fails too
3059			 */
3060			if (err) {
3061				dev_err(dev, "Attach Failed for port %d\n", i);
3062				break;
3063			}
3064		}
3065	}
3066
3067	err = add_adev(gd);
3068out:
3069	if (err)
3070		mana_remove(gd, false);
3071
3072	return err;
3073}
3074
3075void mana_remove(struct gdma_dev *gd, bool suspending)
3076{
3077	struct gdma_context *gc = gd->gdma_context;
3078	struct mana_context *ac = gd->driver_data;
3079	struct mana_port_context *apc;
3080	struct device *dev = gc->dev;
3081	struct net_device *ndev;
3082	int err;
3083	int i;
3084
3085	/* adev currently doesn't support suspending, always remove it */
3086	if (gd->adev)
3087		remove_adev(gd);
3088
3089	for (i = 0; i < ac->num_ports; i++) {
3090		ndev = ac->ports[i];
3091		apc = netdev_priv(ndev);
3092		if (!ndev) {
3093			if (i == 0)
3094				dev_err(dev, "No net device to remove\n");
3095			goto out;
3096		}
3097
3098		/* All cleanup actions should stay after rtnl_lock(), otherwise
3099		 * other functions may access partially cleaned up data.
3100		 */
3101		rtnl_lock();
3102
3103		err = mana_detach(ndev, false);
3104		if (err)
3105			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
3106				   i, err);
3107
3108		if (suspending) {
3109			/* No need to unregister the ndev. */
3110			rtnl_unlock();
3111			continue;
3112		}
3113
3114		unregister_netdevice(ndev);
3115		mana_cleanup_indir_table(apc);
3116
3117		rtnl_unlock();
3118
3119		free_netdev(ndev);
3120	}
3121
3122	mana_destroy_eq(ac);
3123out:
3124	mana_gd_deregister_device(gd);
3125
3126	if (suspending)
3127		return;
3128
3129	gd->driver_data = NULL;
3130	gd->gdma_context = NULL;
3131	kfree(ac);
3132}
3133
3134struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
3135{
3136	struct net_device *ndev;
3137
3138	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
3139			 "Taking primary netdev without holding the RCU read lock");
3140	if (port_index >= ac->num_ports)
3141		return NULL;
3142
3143	/* When mana is used in netvsc, the upper netdevice should be returned. */
3144	if (ac->ports[port_index]->flags & IFF_SLAVE)
3145		ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
3146	else
3147		ndev = ac->ports[port_index];
3148
3149	return ndev;
3150}
3151EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, "NET_MANA");
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright (c) 2021, Microsoft Corporation. */
   3
   4#include <uapi/linux/bpf.h>
   5
 
   6#include <linux/inetdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/ethtool.h>
   9#include <linux/filter.h>
  10#include <linux/mm.h>
  11#include <linux/pci.h>
  12
  13#include <net/checksum.h>
  14#include <net/ip6_checksum.h>
  15#include <net/page_pool/helpers.h>
  16#include <net/xdp.h>
  17
  18#include <net/mana/mana.h>
  19#include <net/mana/mana_auxiliary.h>
  20
  21static DEFINE_IDA(mana_adev_ida);
  22
  23static int mana_adev_idx_alloc(void)
  24{
  25	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
  26}
  27
  28static void mana_adev_idx_free(int idx)
  29{
  30	ida_free(&mana_adev_ida, idx);
  31}
  32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33/* Microsoft Azure Network Adapter (MANA) functions */
  34
  35static int mana_open(struct net_device *ndev)
  36{
  37	struct mana_port_context *apc = netdev_priv(ndev);
  38	int err;
  39
  40	err = mana_alloc_queues(ndev);
  41	if (err)
  42		return err;
  43
  44	apc->port_is_up = true;
  45
  46	/* Ensure port state updated before txq state */
  47	smp_wmb();
  48
  49	netif_carrier_on(ndev);
  50	netif_tx_wake_all_queues(ndev);
  51
  52	return 0;
  53}
  54
  55static int mana_close(struct net_device *ndev)
  56{
  57	struct mana_port_context *apc = netdev_priv(ndev);
  58
  59	if (!apc->port_is_up)
  60		return 0;
  61
  62	return mana_detach(ndev, true);
  63}
  64
  65static bool mana_can_tx(struct gdma_queue *wq)
  66{
  67	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
  68}
  69
  70static unsigned int mana_checksum_info(struct sk_buff *skb)
  71{
  72	if (skb->protocol == htons(ETH_P_IP)) {
  73		struct iphdr *ip = ip_hdr(skb);
  74
  75		if (ip->protocol == IPPROTO_TCP)
  76			return IPPROTO_TCP;
  77
  78		if (ip->protocol == IPPROTO_UDP)
  79			return IPPROTO_UDP;
  80	} else if (skb->protocol == htons(ETH_P_IPV6)) {
  81		struct ipv6hdr *ip6 = ipv6_hdr(skb);
  82
  83		if (ip6->nexthdr == IPPROTO_TCP)
  84			return IPPROTO_TCP;
  85
  86		if (ip6->nexthdr == IPPROTO_UDP)
  87			return IPPROTO_UDP;
  88	}
  89
  90	/* No csum offloading */
  91	return 0;
  92}
  93
  94static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
  95			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
  96{
  97	ash->dma_handle[sg_i] = da;
  98	ash->size[sg_i] = sge_len;
  99
 100	tp->wqe_req.sgl[sg_i].address = da;
 101	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
 102	tp->wqe_req.sgl[sg_i].size = sge_len;
 103}
 104
 105static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
 106			struct mana_tx_package *tp, int gso_hs)
 107{
 108	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
 109	int hsg = 1; /* num of SGEs of linear part */
 110	struct gdma_dev *gd = apc->ac->gdma_dev;
 111	int skb_hlen = skb_headlen(skb);
 112	int sge0_len, sge1_len = 0;
 113	struct gdma_context *gc;
 114	struct device *dev;
 115	skb_frag_t *frag;
 116	dma_addr_t da;
 117	int sg_i;
 118	int i;
 119
 120	gc = gd->gdma_context;
 121	dev = gc->dev;
 122
 123	if (gso_hs && gso_hs < skb_hlen) {
 124		sge0_len = gso_hs;
 125		sge1_len = skb_hlen - gso_hs;
 126	} else {
 127		sge0_len = skb_hlen;
 128	}
 129
 130	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
 131	if (dma_mapping_error(dev, da))
 132		return -ENOMEM;
 133
 134	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
 135
 136	if (sge1_len) {
 137		sg_i = 1;
 138		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
 139				    DMA_TO_DEVICE);
 140		if (dma_mapping_error(dev, da))
 141			goto frag_err;
 142
 143		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
 144		hsg = 2;
 145	}
 146
 147	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 148		sg_i = hsg + i;
 149
 150		frag = &skb_shinfo(skb)->frags[i];
 151		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
 152				      DMA_TO_DEVICE);
 153		if (dma_mapping_error(dev, da))
 154			goto frag_err;
 155
 156		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
 157			     gd->gpa_mkey);
 158	}
 159
 160	return 0;
 161
 162frag_err:
 163	for (i = sg_i - 1; i >= hsg; i--)
 164		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
 165			       DMA_TO_DEVICE);
 166
 167	for (i = hsg - 1; i >= 0; i--)
 168		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
 169				 DMA_TO_DEVICE);
 170
 171	return -ENOMEM;
 172}
 173
 174/* Handle the case when GSO SKB linear length is too large.
 175 * MANA NIC requires GSO packets to put only the packet header to SGE0.
 176 * So, we need 2 SGEs for the skb linear part which contains more than the
 177 * header.
 178 * Return a positive value for the number of SGEs, or a negative value
 179 * for an error.
 180 */
 181static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
 182			     int gso_hs)
 183{
 184	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
 185	int skb_hlen = skb_headlen(skb);
 186
 187	if (gso_hs < skb_hlen) {
 188		num_sge++;
 189	} else if (gso_hs > skb_hlen) {
 190		if (net_ratelimit())
 191			netdev_err(ndev,
 192				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
 193				   gso_hs, skb_hlen);
 194
 195		return -EINVAL;
 196	}
 197
 198	return num_sge;
 199}
 200
 201/* Get the GSO packet's header size */
 202static int mana_get_gso_hs(struct sk_buff *skb)
 203{
 204	int gso_hs;
 205
 206	if (skb->encapsulation) {
 207		gso_hs = skb_inner_tcp_all_headers(skb);
 208	} else {
 209		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
 210			gso_hs = skb_transport_offset(skb) +
 211				 sizeof(struct udphdr);
 212		} else {
 213			gso_hs = skb_tcp_all_headers(skb);
 214		}
 215	}
 216
 217	return gso_hs;
 218}
 219
 220netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 221{
 222	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
 223	struct mana_port_context *apc = netdev_priv(ndev);
 224	int gso_hs = 0; /* zero for non-GSO pkts */
 225	u16 txq_idx = skb_get_queue_mapping(skb);
 226	struct gdma_dev *gd = apc->ac->gdma_dev;
 227	bool ipv4 = false, ipv6 = false;
 228	struct mana_tx_package pkg = {};
 229	struct netdev_queue *net_txq;
 230	struct mana_stats_tx *tx_stats;
 231	struct gdma_queue *gdma_sq;
 232	unsigned int csum_type;
 233	struct mana_txq *txq;
 234	struct mana_cq *cq;
 235	int err, len;
 236
 237	if (unlikely(!apc->port_is_up))
 238		goto tx_drop;
 239
 240	if (skb_cow_head(skb, MANA_HEADROOM))
 241		goto tx_drop_count;
 242
 243	txq = &apc->tx_qp[txq_idx].txq;
 244	gdma_sq = txq->gdma_sq;
 245	cq = &apc->tx_qp[txq_idx].tx_cq;
 246	tx_stats = &txq->stats;
 247
 248	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
 249	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
 250
 251	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
 252		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
 253		pkt_fmt = MANA_LONG_PKT_FMT;
 254	} else {
 255		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
 256	}
 257
 258	if (skb_vlan_tag_present(skb)) {
 259		pkt_fmt = MANA_LONG_PKT_FMT;
 260		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
 261		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
 262		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
 263		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
 264	}
 265
 266	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
 267
 268	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
 269		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
 270		u64_stats_update_begin(&tx_stats->syncp);
 271		tx_stats->short_pkt_fmt++;
 272		u64_stats_update_end(&tx_stats->syncp);
 273	} else {
 274		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
 275		u64_stats_update_begin(&tx_stats->syncp);
 276		tx_stats->long_pkt_fmt++;
 277		u64_stats_update_end(&tx_stats->syncp);
 278	}
 279
 280	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
 281	pkg.wqe_req.flags = 0;
 282	pkg.wqe_req.client_data_unit = 0;
 283
 284	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
 285
 286	if (skb->protocol == htons(ETH_P_IP))
 287		ipv4 = true;
 288	else if (skb->protocol == htons(ETH_P_IPV6))
 289		ipv6 = true;
 290
 291	if (skb_is_gso(skb)) {
 292		int num_sge;
 293
 294		gso_hs = mana_get_gso_hs(skb);
 295
 296		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
 297		if (num_sge > 0)
 298			pkg.wqe_req.num_sge = num_sge;
 299		else
 300			goto tx_drop_count;
 301
 302		u64_stats_update_begin(&tx_stats->syncp);
 303		if (skb->encapsulation) {
 304			tx_stats->tso_inner_packets++;
 305			tx_stats->tso_inner_bytes += skb->len - gso_hs;
 306		} else {
 307			tx_stats->tso_packets++;
 308			tx_stats->tso_bytes += skb->len - gso_hs;
 309		}
 310		u64_stats_update_end(&tx_stats->syncp);
 311
 312		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 313		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 314
 315		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
 316		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 317		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 318
 319		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
 320		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
 321		if (ipv4) {
 322			ip_hdr(skb)->tot_len = 0;
 323			ip_hdr(skb)->check = 0;
 324			tcp_hdr(skb)->check =
 325				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 326						   ip_hdr(skb)->daddr, 0,
 327						   IPPROTO_TCP, 0);
 328		} else {
 329			ipv6_hdr(skb)->payload_len = 0;
 330			tcp_hdr(skb)->check =
 331				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 332						 &ipv6_hdr(skb)->daddr, 0,
 333						 IPPROTO_TCP, 0);
 334		}
 335	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 336		csum_type = mana_checksum_info(skb);
 337
 338		u64_stats_update_begin(&tx_stats->syncp);
 339		tx_stats->csum_partial++;
 340		u64_stats_update_end(&tx_stats->syncp);
 341
 342		if (csum_type == IPPROTO_TCP) {
 343			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 344			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 345
 346			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 347			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 348
 349		} else if (csum_type == IPPROTO_UDP) {
 350			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 351			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 352
 353			pkg.tx_oob.s_oob.comp_udp_csum = 1;
 354		} else {
 355			/* Can't do offload of this type of checksum */
 356			if (skb_checksum_help(skb))
 357				goto tx_drop_count;
 358		}
 359	}
 360
 361	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
 362
 363	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
 364		pkg.wqe_req.sgl = pkg.sgl_array;
 365	} else {
 366		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
 367					    sizeof(struct gdma_sge),
 368					    GFP_ATOMIC);
 369		if (!pkg.sgl_ptr)
 370			goto tx_drop_count;
 371
 372		pkg.wqe_req.sgl = pkg.sgl_ptr;
 373	}
 374
 375	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
 376		u64_stats_update_begin(&tx_stats->syncp);
 377		tx_stats->mana_map_err++;
 378		u64_stats_update_end(&tx_stats->syncp);
 379		goto free_sgl_ptr;
 380	}
 381
 382	skb_queue_tail(&txq->pending_skbs, skb);
 383
 384	len = skb->len;
 385	net_txq = netdev_get_tx_queue(ndev, txq_idx);
 386
 387	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
 388					(struct gdma_posted_wqe_info *)skb->cb);
 389	if (!mana_can_tx(gdma_sq)) {
 390		netif_tx_stop_queue(net_txq);
 391		apc->eth_stats.stop_queue++;
 392	}
 393
 394	if (err) {
 395		(void)skb_dequeue_tail(&txq->pending_skbs);
 396		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
 397		err = NETDEV_TX_BUSY;
 398		goto tx_busy;
 399	}
 400
 401	err = NETDEV_TX_OK;
 402	atomic_inc(&txq->pending_sends);
 403
 404	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
 405
 406	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
 407	skb = NULL;
 408
 409	tx_stats = &txq->stats;
 410	u64_stats_update_begin(&tx_stats->syncp);
 411	tx_stats->packets++;
 412	tx_stats->bytes += len;
 413	u64_stats_update_end(&tx_stats->syncp);
 414
 415tx_busy:
 416	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
 417		netif_tx_wake_queue(net_txq);
 418		apc->eth_stats.wake_queue++;
 419	}
 420
 421	kfree(pkg.sgl_ptr);
 422	return err;
 423
 424free_sgl_ptr:
 425	kfree(pkg.sgl_ptr);
 426tx_drop_count:
 427	ndev->stats.tx_dropped++;
 428tx_drop:
 429	dev_kfree_skb_any(skb);
 430	return NETDEV_TX_OK;
 431}
 432
 433static void mana_get_stats64(struct net_device *ndev,
 434			     struct rtnl_link_stats64 *st)
 435{
 436	struct mana_port_context *apc = netdev_priv(ndev);
 437	unsigned int num_queues = apc->num_queues;
 438	struct mana_stats_rx *rx_stats;
 439	struct mana_stats_tx *tx_stats;
 440	unsigned int start;
 441	u64 packets, bytes;
 442	int q;
 443
 444	if (!apc->port_is_up)
 445		return;
 446
 447	netdev_stats_to_stats64(st, &ndev->stats);
 448
 449	for (q = 0; q < num_queues; q++) {
 450		rx_stats = &apc->rxqs[q]->stats;
 451
 452		do {
 453			start = u64_stats_fetch_begin(&rx_stats->syncp);
 454			packets = rx_stats->packets;
 455			bytes = rx_stats->bytes;
 456		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
 457
 458		st->rx_packets += packets;
 459		st->rx_bytes += bytes;
 460	}
 461
 462	for (q = 0; q < num_queues; q++) {
 463		tx_stats = &apc->tx_qp[q].txq.stats;
 464
 465		do {
 466			start = u64_stats_fetch_begin(&tx_stats->syncp);
 467			packets = tx_stats->packets;
 468			bytes = tx_stats->bytes;
 469		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
 470
 471		st->tx_packets += packets;
 472		st->tx_bytes += bytes;
 473	}
 474}
 475
 476static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
 477			     int old_q)
 478{
 479	struct mana_port_context *apc = netdev_priv(ndev);
 480	u32 hash = skb_get_hash(skb);
 481	struct sock *sk = skb->sk;
 482	int txq;
 483
 484	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
 485
 486	if (txq != old_q && sk && sk_fullsock(sk) &&
 487	    rcu_access_pointer(sk->sk_dst_cache))
 488		sk_tx_queue_set(sk, txq);
 489
 490	return txq;
 491}
 492
 493static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
 494			     struct net_device *sb_dev)
 495{
 496	int txq;
 497
 498	if (ndev->real_num_tx_queues == 1)
 499		return 0;
 500
 501	txq = sk_tx_queue_get(skb->sk);
 502
 503	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
 504		if (skb_rx_queue_recorded(skb))
 505			txq = skb_get_rx_queue(skb);
 506		else
 507			txq = mana_get_tx_queue(ndev, skb, txq);
 508	}
 509
 510	return txq;
 511}
 512
 513/* Release pre-allocated RX buffers */
 514static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
 515{
 516	struct device *dev;
 517	int i;
 518
 519	dev = mpc->ac->gdma_dev->gdma_context->dev;
 520
 521	if (!mpc->rxbufs_pre)
 522		goto out1;
 523
 524	if (!mpc->das_pre)
 525		goto out2;
 526
 527	while (mpc->rxbpre_total) {
 528		i = --mpc->rxbpre_total;
 529		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
 530				 DMA_FROM_DEVICE);
 531		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
 532	}
 533
 534	kfree(mpc->das_pre);
 535	mpc->das_pre = NULL;
 536
 537out2:
 538	kfree(mpc->rxbufs_pre);
 539	mpc->rxbufs_pre = NULL;
 540
 541out1:
 542	mpc->rxbpre_datasize = 0;
 543	mpc->rxbpre_alloc_size = 0;
 544	mpc->rxbpre_headroom = 0;
 545}
 546
 547/* Get a buffer from the pre-allocated RX buffers */
 548static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
 549{
 550	struct net_device *ndev = rxq->ndev;
 551	struct mana_port_context *mpc;
 552	void *va;
 553
 554	mpc = netdev_priv(ndev);
 555
 556	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
 557		netdev_err(ndev, "No RX pre-allocated bufs\n");
 558		return NULL;
 559	}
 560
 561	/* Check sizes to catch unexpected coding error */
 562	if (mpc->rxbpre_datasize != rxq->datasize) {
 563		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
 564			   mpc->rxbpre_datasize, rxq->datasize);
 565		return NULL;
 566	}
 567
 568	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
 569		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
 570			   mpc->rxbpre_alloc_size, rxq->alloc_size);
 571		return NULL;
 572	}
 573
 574	if (mpc->rxbpre_headroom != rxq->headroom) {
 575		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
 576			   mpc->rxbpre_headroom, rxq->headroom);
 577		return NULL;
 578	}
 579
 580	mpc->rxbpre_total--;
 581
 582	*da = mpc->das_pre[mpc->rxbpre_total];
 583	va = mpc->rxbufs_pre[mpc->rxbpre_total];
 584	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
 585
 586	/* Deallocate the array after all buffers are gone */
 587	if (!mpc->rxbpre_total)
 588		mana_pre_dealloc_rxbufs(mpc);
 589
 590	return va;
 591}
 592
 593/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
 594static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
 595			       u32 *headroom)
 596{
 597	if (mtu > MANA_XDP_MTU_MAX)
 598		*headroom = 0; /* no support for XDP */
 599	else
 600		*headroom = XDP_PACKET_HEADROOM;
 601
 602	*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
 
 
 
 
 603
 604	*datasize = mtu + ETH_HLEN;
 605}
 606
 607static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
 608{
 609	struct device *dev;
 610	struct page *page;
 611	dma_addr_t da;
 612	int num_rxb;
 613	void *va;
 614	int i;
 615
 616	mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
 617			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
 618
 619	dev = mpc->ac->gdma_dev->gdma_context->dev;
 620
 621	num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
 622
 623	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
 624	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
 625	if (!mpc->rxbufs_pre)
 626		goto error;
 627
 628	mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
 629	if (!mpc->das_pre)
 630		goto error;
 631
 632	mpc->rxbpre_total = 0;
 633
 634	for (i = 0; i < num_rxb; i++) {
 635		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
 636			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
 637			if (!va)
 638				goto error;
 639
 640			page = virt_to_head_page(va);
 641			/* Check if the frag falls back to single page */
 642			if (compound_order(page) <
 643			    get_order(mpc->rxbpre_alloc_size)) {
 644				put_page(page);
 645				goto error;
 646			}
 647		} else {
 648			page = dev_alloc_page();
 649			if (!page)
 650				goto error;
 651
 652			va = page_to_virt(page);
 653		}
 654
 655		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
 656				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
 657		if (dma_mapping_error(dev, da)) {
 658			put_page(virt_to_head_page(va));
 659			goto error;
 660		}
 661
 662		mpc->rxbufs_pre[i] = va;
 663		mpc->das_pre[i] = da;
 664		mpc->rxbpre_total = i + 1;
 665	}
 666
 667	return 0;
 668
 669error:
 670	mana_pre_dealloc_rxbufs(mpc);
 671	return -ENOMEM;
 672}
 673
 674static int mana_change_mtu(struct net_device *ndev, int new_mtu)
 675{
 676	struct mana_port_context *mpc = netdev_priv(ndev);
 677	unsigned int old_mtu = ndev->mtu;
 678	int err;
 679
 680	/* Pre-allocate buffers to prevent failure in mana_attach later */
 681	err = mana_pre_alloc_rxbufs(mpc, new_mtu);
 682	if (err) {
 683		netdev_err(ndev, "Insufficient memory for new MTU\n");
 684		return err;
 685	}
 686
 687	err = mana_detach(ndev, false);
 688	if (err) {
 689		netdev_err(ndev, "mana_detach failed: %d\n", err);
 690		goto out;
 691	}
 692
 693	ndev->mtu = new_mtu;
 694
 695	err = mana_attach(ndev);
 696	if (err) {
 697		netdev_err(ndev, "mana_attach failed: %d\n", err);
 698		ndev->mtu = old_mtu;
 699	}
 700
 701out:
 702	mana_pre_dealloc_rxbufs(mpc);
 703	return err;
 704}
 705
 706static const struct net_device_ops mana_devops = {
 707	.ndo_open		= mana_open,
 708	.ndo_stop		= mana_close,
 709	.ndo_select_queue	= mana_select_queue,
 710	.ndo_start_xmit		= mana_start_xmit,
 711	.ndo_validate_addr	= eth_validate_addr,
 712	.ndo_get_stats64	= mana_get_stats64,
 713	.ndo_bpf		= mana_bpf,
 714	.ndo_xdp_xmit		= mana_xdp_xmit,
 715	.ndo_change_mtu		= mana_change_mtu,
 716};
 717
 718static void mana_cleanup_port_context(struct mana_port_context *apc)
 719{
 
 
 
 
 
 
 
 720	kfree(apc->rxqs);
 721	apc->rxqs = NULL;
 722}
 723
 
 
 
 
 
 
 
 724static int mana_init_port_context(struct mana_port_context *apc)
 725{
 726	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
 727			    GFP_KERNEL);
 728
 729	return !apc->rxqs ? -ENOMEM : 0;
 730}
 731
 732static int mana_send_request(struct mana_context *ac, void *in_buf,
 733			     u32 in_len, void *out_buf, u32 out_len)
 734{
 735	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 736	struct gdma_resp_hdr *resp = out_buf;
 737	struct gdma_req_hdr *req = in_buf;
 738	struct device *dev = gc->dev;
 739	static atomic_t activity_id;
 740	int err;
 741
 742	req->dev_id = gc->mana.dev_id;
 743	req->activity_id = atomic_inc_return(&activity_id);
 744
 745	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
 746				   out_buf);
 747	if (err || resp->status) {
 748		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
 749			err, resp->status);
 750		return err ? err : -EPROTO;
 751	}
 752
 753	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
 754	    req->activity_id != resp->activity_id) {
 755		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
 756			req->dev_id.as_uint32, resp->dev_id.as_uint32,
 757			req->activity_id, resp->activity_id);
 758		return -EPROTO;
 759	}
 760
 761	return 0;
 762}
 763
 764static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
 765				const enum mana_command_code expected_code,
 766				const u32 min_size)
 767{
 768	if (resp_hdr->response.msg_type != expected_code)
 769		return -EPROTO;
 770
 771	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
 772		return -EPROTO;
 773
 774	if (resp_hdr->response.msg_size < min_size)
 775		return -EPROTO;
 776
 777	return 0;
 778}
 779
 780static int mana_pf_register_hw_vport(struct mana_port_context *apc)
 781{
 782	struct mana_register_hw_vport_resp resp = {};
 783	struct mana_register_hw_vport_req req = {};
 784	int err;
 785
 786	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
 787			     sizeof(req), sizeof(resp));
 788	req.attached_gfid = 1;
 789	req.is_pf_default_vport = 1;
 790	req.allow_all_ether_types = 1;
 791
 792	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 793				sizeof(resp));
 794	if (err) {
 795		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
 796		return err;
 797	}
 798
 799	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
 800				   sizeof(resp));
 801	if (err || resp.hdr.status) {
 802		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
 803			   err, resp.hdr.status);
 804		return err ? err : -EPROTO;
 805	}
 806
 807	apc->port_handle = resp.hw_vport_handle;
 808	return 0;
 809}
 810
 811static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
 812{
 813	struct mana_deregister_hw_vport_resp resp = {};
 814	struct mana_deregister_hw_vport_req req = {};
 815	int err;
 816
 817	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
 818			     sizeof(req), sizeof(resp));
 819	req.hw_vport_handle = apc->port_handle;
 820
 821	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 822				sizeof(resp));
 823	if (err) {
 824		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
 825			   err);
 826		return;
 827	}
 828
 829	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
 830				   sizeof(resp));
 831	if (err || resp.hdr.status)
 832		netdev_err(apc->ndev,
 833			   "Failed to deregister hw vPort: %d, 0x%x\n",
 834			   err, resp.hdr.status);
 835}
 836
 837static int mana_pf_register_filter(struct mana_port_context *apc)
 838{
 839	struct mana_register_filter_resp resp = {};
 840	struct mana_register_filter_req req = {};
 841	int err;
 842
 843	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
 844			     sizeof(req), sizeof(resp));
 845	req.vport = apc->port_handle;
 846	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
 847
 848	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 849				sizeof(resp));
 850	if (err) {
 851		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
 852		return err;
 853	}
 854
 855	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
 856				   sizeof(resp));
 857	if (err || resp.hdr.status) {
 858		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
 859			   err, resp.hdr.status);
 860		return err ? err : -EPROTO;
 861	}
 862
 863	apc->pf_filter_handle = resp.filter_handle;
 864	return 0;
 865}
 866
 867static void mana_pf_deregister_filter(struct mana_port_context *apc)
 868{
 869	struct mana_deregister_filter_resp resp = {};
 870	struct mana_deregister_filter_req req = {};
 871	int err;
 872
 873	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
 874			     sizeof(req), sizeof(resp));
 875	req.filter_handle = apc->pf_filter_handle;
 876
 877	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 878				sizeof(resp));
 879	if (err) {
 880		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
 881			   err);
 882		return;
 883	}
 884
 885	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
 886				   sizeof(resp));
 887	if (err || resp.hdr.status)
 888		netdev_err(apc->ndev,
 889			   "Failed to deregister filter: %d, 0x%x\n",
 890			   err, resp.hdr.status);
 891}
 892
 893static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
 894				 u32 proto_minor_ver, u32 proto_micro_ver,
 895				 u16 *max_num_vports)
 896{
 897	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 898	struct mana_query_device_cfg_resp resp = {};
 899	struct mana_query_device_cfg_req req = {};
 900	struct device *dev = gc->dev;
 901	int err = 0;
 902
 903	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
 904			     sizeof(req), sizeof(resp));
 905
 906	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
 907
 908	req.proto_major_ver = proto_major_ver;
 909	req.proto_minor_ver = proto_minor_ver;
 910	req.proto_micro_ver = proto_micro_ver;
 911
 912	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
 913	if (err) {
 914		dev_err(dev, "Failed to query config: %d", err);
 915		return err;
 916	}
 917
 918	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
 919				   sizeof(resp));
 920	if (err || resp.hdr.status) {
 921		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
 922			resp.hdr.status);
 923		if (!err)
 924			err = -EPROTO;
 925		return err;
 926	}
 927
 928	*max_num_vports = resp.max_num_vports;
 929
 930	if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
 931		gc->adapter_mtu = resp.adapter_mtu;
 932	else
 933		gc->adapter_mtu = ETH_FRAME_LEN;
 934
 
 
 935	return 0;
 936}
 937
 938static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
 939				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
 940{
 941	struct mana_query_vport_cfg_resp resp = {};
 942	struct mana_query_vport_cfg_req req = {};
 943	int err;
 944
 945	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
 946			     sizeof(req), sizeof(resp));
 947
 948	req.vport_index = vport_index;
 949
 950	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 951				sizeof(resp));
 952	if (err)
 953		return err;
 954
 955	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
 956				   sizeof(resp));
 957	if (err)
 958		return err;
 959
 960	if (resp.hdr.status)
 961		return -EPROTO;
 962
 963	*max_sq = resp.max_num_sq;
 964	*max_rq = resp.max_num_rq;
 965	*num_indir_entry = resp.num_indirection_ent;
 
 
 
 
 
 
 
 
 
 966
 967	apc->port_handle = resp.vport;
 968	ether_addr_copy(apc->mac_addr, resp.mac_addr);
 969
 970	return 0;
 971}
 972
 973void mana_uncfg_vport(struct mana_port_context *apc)
 974{
 975	mutex_lock(&apc->vport_mutex);
 976	apc->vport_use_count--;
 977	WARN_ON(apc->vport_use_count < 0);
 978	mutex_unlock(&apc->vport_mutex);
 979}
 980EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
 981
 982int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
 983		   u32 doorbell_pg_id)
 984{
 985	struct mana_config_vport_resp resp = {};
 986	struct mana_config_vport_req req = {};
 987	int err;
 988
 989	/* This function is used to program the Ethernet port in the hardware
 990	 * table. It can be called from the Ethernet driver or the RDMA driver.
 991	 *
 992	 * For Ethernet usage, the hardware supports only one active user on a
 993	 * physical port. The driver checks on the port usage before programming
 994	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
 995	 * device to kernel NET layer (Ethernet driver).
 996	 *
 997	 * Because the RDMA driver doesn't know in advance which QP type the
 998	 * user will create, it exposes the device with all its ports. The user
 999	 * may not be able to create RAW QP on a port if this port is already
1000	 * in used by the Ethernet driver from the kernel.
1001	 *
1002	 * This physical port limitation only applies to the RAW QP. For RC QP,
1003	 * the hardware doesn't have this limitation. The user can create RC
1004	 * QPs on a physical port up to the hardware limits independent of the
1005	 * Ethernet usage on the same port.
1006	 */
1007	mutex_lock(&apc->vport_mutex);
1008	if (apc->vport_use_count > 0) {
1009		mutex_unlock(&apc->vport_mutex);
1010		return -EBUSY;
1011	}
1012	apc->vport_use_count++;
1013	mutex_unlock(&apc->vport_mutex);
1014
1015	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1016			     sizeof(req), sizeof(resp));
1017	req.vport = apc->port_handle;
1018	req.pdid = protection_dom_id;
1019	req.doorbell_pageid = doorbell_pg_id;
1020
1021	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1022				sizeof(resp));
1023	if (err) {
1024		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1025		goto out;
1026	}
1027
1028	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1029				   sizeof(resp));
1030	if (err || resp.hdr.status) {
1031		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1032			   err, resp.hdr.status);
1033		if (!err)
1034			err = -EPROTO;
1035
1036		goto out;
1037	}
1038
1039	apc->tx_shortform_allowed = resp.short_form_allowed;
1040	apc->tx_vp_offset = resp.tx_vport_offset;
1041
1042	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1043		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1044out:
1045	if (err)
1046		mana_uncfg_vport(apc);
1047
1048	return err;
1049}
1050EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
1051
1052static int mana_cfg_vport_steering(struct mana_port_context *apc,
1053				   enum TRI_STATE rx,
1054				   bool update_default_rxobj, bool update_key,
1055				   bool update_tab)
1056{
1057	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
1058	struct mana_cfg_rx_steer_req_v2 *req;
1059	struct mana_cfg_rx_steer_resp resp = {};
1060	struct net_device *ndev = apc->ndev;
1061	mana_handle_t *req_indir_tab;
1062	u32 req_buf_size;
1063	int err;
1064
1065	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1066	req = kzalloc(req_buf_size, GFP_KERNEL);
1067	if (!req)
1068		return -ENOMEM;
1069
1070	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1071			     sizeof(resp));
1072
1073	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1074
1075	req->vport = apc->port_handle;
1076	req->num_indir_entries = num_entries;
1077	req->indir_tab_offset = sizeof(*req);
 
1078	req->rx_enable = rx;
1079	req->rss_enable = apc->rss_state;
1080	req->update_default_rxobj = update_default_rxobj;
1081	req->update_hashkey = update_key;
1082	req->update_indir_tab = update_tab;
1083	req->default_rxobj = apc->default_rxobj;
1084	req->cqe_coalescing_enable = 0;
1085
1086	if (update_key)
1087		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1088
1089	if (update_tab) {
1090		req_indir_tab = (mana_handle_t *)(req + 1);
1091		memcpy(req_indir_tab, apc->rxobj_table,
1092		       req->num_indir_entries * sizeof(mana_handle_t));
1093	}
1094
1095	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1096				sizeof(resp));
1097	if (err) {
1098		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1099		goto out;
1100	}
1101
1102	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1103				   sizeof(resp));
1104	if (err) {
1105		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1106		goto out;
1107	}
1108
1109	if (resp.hdr.status) {
1110		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1111			   resp.hdr.status);
1112		err = -EPROTO;
1113	}
1114
1115	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1116		    apc->port_handle, num_entries);
1117out:
1118	kfree(req);
1119	return err;
1120}
1121
1122int mana_create_wq_obj(struct mana_port_context *apc,
1123		       mana_handle_t vport,
1124		       u32 wq_type, struct mana_obj_spec *wq_spec,
1125		       struct mana_obj_spec *cq_spec,
1126		       mana_handle_t *wq_obj)
1127{
1128	struct mana_create_wqobj_resp resp = {};
1129	struct mana_create_wqobj_req req = {};
1130	struct net_device *ndev = apc->ndev;
1131	int err;
1132
1133	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1134			     sizeof(req), sizeof(resp));
1135	req.vport = vport;
1136	req.wq_type = wq_type;
1137	req.wq_gdma_region = wq_spec->gdma_region;
1138	req.cq_gdma_region = cq_spec->gdma_region;
1139	req.wq_size = wq_spec->queue_size;
1140	req.cq_size = cq_spec->queue_size;
1141	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1142	req.cq_parent_qid = cq_spec->attached_eq;
1143
1144	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1145				sizeof(resp));
1146	if (err) {
1147		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1148		goto out;
1149	}
1150
1151	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1152				   sizeof(resp));
1153	if (err || resp.hdr.status) {
1154		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1155			   resp.hdr.status);
1156		if (!err)
1157			err = -EPROTO;
1158		goto out;
1159	}
1160
1161	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1162		netdev_err(ndev, "Got an invalid WQ object handle\n");
1163		err = -EPROTO;
1164		goto out;
1165	}
1166
1167	*wq_obj = resp.wq_obj;
1168	wq_spec->queue_index = resp.wq_id;
1169	cq_spec->queue_index = resp.cq_id;
1170
1171	return 0;
1172out:
1173	return err;
1174}
1175EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
1176
1177void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1178			 mana_handle_t wq_obj)
1179{
1180	struct mana_destroy_wqobj_resp resp = {};
1181	struct mana_destroy_wqobj_req req = {};
1182	struct net_device *ndev = apc->ndev;
1183	int err;
1184
1185	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1186			     sizeof(req), sizeof(resp));
1187	req.wq_type = wq_type;
1188	req.wq_obj_handle = wq_obj;
1189
1190	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1191				sizeof(resp));
1192	if (err) {
1193		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1194		return;
1195	}
1196
1197	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1198				   sizeof(resp));
1199	if (err || resp.hdr.status)
1200		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1201			   resp.hdr.status);
1202}
1203EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
1204
1205static void mana_destroy_eq(struct mana_context *ac)
1206{
1207	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1208	struct gdma_queue *eq;
1209	int i;
1210
1211	if (!ac->eqs)
1212		return;
1213
 
 
1214	for (i = 0; i < gc->max_num_queues; i++) {
1215		eq = ac->eqs[i].eq;
1216		if (!eq)
1217			continue;
1218
1219		mana_gd_destroy_queue(gc, eq);
1220	}
1221
1222	kfree(ac->eqs);
1223	ac->eqs = NULL;
1224}
1225
 
 
 
 
 
 
 
 
 
 
 
 
1226static int mana_create_eq(struct mana_context *ac)
1227{
1228	struct gdma_dev *gd = ac->gdma_dev;
1229	struct gdma_context *gc = gd->gdma_context;
1230	struct gdma_queue_spec spec = {};
1231	int err;
1232	int i;
1233
1234	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1235			  GFP_KERNEL);
1236	if (!ac->eqs)
1237		return -ENOMEM;
1238
1239	spec.type = GDMA_EQ;
1240	spec.monitor_avl_buf = false;
1241	spec.queue_size = EQ_SIZE;
1242	spec.eq.callback = NULL;
1243	spec.eq.context = ac->eqs;
1244	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1245
 
 
1246	for (i = 0; i < gc->max_num_queues; i++) {
1247		spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1248		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1249		if (err)
1250			goto out;
 
1251	}
1252
1253	return 0;
1254out:
1255	mana_destroy_eq(ac);
1256	return err;
1257}
1258
1259static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1260{
1261	struct mana_fence_rq_resp resp = {};
1262	struct mana_fence_rq_req req = {};
1263	int err;
1264
1265	init_completion(&rxq->fence_event);
1266
1267	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1268			     sizeof(req), sizeof(resp));
1269	req.wq_obj_handle =  rxq->rxobj;
1270
1271	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1272				sizeof(resp));
1273	if (err) {
1274		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1275			   rxq->rxq_idx, err);
1276		return err;
1277	}
1278
1279	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1280	if (err || resp.hdr.status) {
1281		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1282			   rxq->rxq_idx, err, resp.hdr.status);
1283		if (!err)
1284			err = -EPROTO;
1285
1286		return err;
1287	}
1288
1289	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1290		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1291			   rxq->rxq_idx);
1292		return -ETIMEDOUT;
1293	}
1294
1295	return 0;
1296}
1297
1298static void mana_fence_rqs(struct mana_port_context *apc)
1299{
1300	unsigned int rxq_idx;
1301	struct mana_rxq *rxq;
1302	int err;
1303
1304	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1305		rxq = apc->rxqs[rxq_idx];
1306		err = mana_fence_rq(apc, rxq);
1307
1308		/* In case of any error, use sleep instead. */
1309		if (err)
1310			msleep(100);
1311	}
1312}
1313
1314static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1315{
1316	u32 used_space_old;
1317	u32 used_space_new;
1318
1319	used_space_old = wq->head - wq->tail;
1320	used_space_new = wq->head - (wq->tail + num_units);
1321
1322	if (WARN_ON_ONCE(used_space_new > used_space_old))
1323		return -ERANGE;
1324
1325	wq->tail += num_units;
1326	return 0;
1327}
1328
1329static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1330{
1331	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1332	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1333	struct device *dev = gc->dev;
1334	int hsg, i;
1335
1336	/* Number of SGEs of linear part */
1337	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1338
1339	for (i = 0; i < hsg; i++)
1340		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1341				 DMA_TO_DEVICE);
1342
1343	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1344		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1345			       DMA_TO_DEVICE);
1346}
1347
1348static void mana_poll_tx_cq(struct mana_cq *cq)
1349{
1350	struct gdma_comp *completions = cq->gdma_comp_buf;
1351	struct gdma_posted_wqe_info *wqe_info;
1352	unsigned int pkt_transmitted = 0;
1353	unsigned int wqe_unit_cnt = 0;
1354	struct mana_txq *txq = cq->txq;
1355	struct mana_port_context *apc;
1356	struct netdev_queue *net_txq;
1357	struct gdma_queue *gdma_wq;
1358	unsigned int avail_space;
1359	struct net_device *ndev;
1360	struct sk_buff *skb;
1361	bool txq_stopped;
1362	int comp_read;
1363	int i;
1364
1365	ndev = txq->ndev;
1366	apc = netdev_priv(ndev);
1367
1368	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1369				    CQE_POLLING_BUFFER);
1370
1371	if (comp_read < 1)
1372		return;
1373
1374	for (i = 0; i < comp_read; i++) {
1375		struct mana_tx_comp_oob *cqe_oob;
1376
1377		if (WARN_ON_ONCE(!completions[i].is_sq))
1378			return;
1379
1380		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1381		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1382				 MANA_CQE_COMPLETION))
1383			return;
1384
1385		switch (cqe_oob->cqe_hdr.cqe_type) {
1386		case CQE_TX_OKAY:
1387			break;
1388
1389		case CQE_TX_SA_DROP:
1390		case CQE_TX_MTU_DROP:
1391		case CQE_TX_INVALID_OOB:
1392		case CQE_TX_INVALID_ETH_TYPE:
1393		case CQE_TX_HDR_PROCESSING_ERROR:
1394		case CQE_TX_VF_DISABLED:
1395		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1396		case CQE_TX_VPORT_DISABLED:
1397		case CQE_TX_VLAN_TAGGING_VIOLATION:
1398			if (net_ratelimit())
1399				netdev_err(ndev, "TX: CQE error %d\n",
1400					   cqe_oob->cqe_hdr.cqe_type);
1401
1402			apc->eth_stats.tx_cqe_err++;
1403			break;
1404
1405		default:
1406			/* If the CQE type is unknown, log an error,
1407			 * and still free the SKB, update tail, etc.
1408			 */
1409			if (net_ratelimit())
1410				netdev_err(ndev, "TX: unknown CQE type %d\n",
1411					   cqe_oob->cqe_hdr.cqe_type);
1412
1413			apc->eth_stats.tx_cqe_unknown_type++;
1414			break;
1415		}
1416
1417		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1418			return;
1419
1420		skb = skb_dequeue(&txq->pending_skbs);
1421		if (WARN_ON_ONCE(!skb))
1422			return;
1423
1424		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1425		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1426
1427		mana_unmap_skb(skb, apc);
1428
1429		napi_consume_skb(skb, cq->budget);
1430
1431		pkt_transmitted++;
1432	}
1433
1434	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1435		return;
1436
1437	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1438
1439	gdma_wq = txq->gdma_sq;
1440	avail_space = mana_gd_wq_avail_space(gdma_wq);
1441
1442	/* Ensure tail updated before checking q stop */
1443	smp_mb();
1444
1445	net_txq = txq->net_txq;
1446	txq_stopped = netif_tx_queue_stopped(net_txq);
1447
1448	/* Ensure checking txq_stopped before apc->port_is_up. */
1449	smp_rmb();
1450
1451	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1452		netif_tx_wake_queue(net_txq);
1453		apc->eth_stats.wake_queue++;
1454	}
1455
1456	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1457		WARN_ON_ONCE(1);
1458
1459	cq->work_done = pkt_transmitted;
1460}
1461
1462static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1463{
1464	struct mana_recv_buf_oob *recv_buf_oob;
1465	u32 curr_index;
1466	int err;
1467
1468	curr_index = rxq->buf_index++;
1469	if (rxq->buf_index == rxq->num_rx_buf)
1470		rxq->buf_index = 0;
1471
1472	recv_buf_oob = &rxq->rx_oobs[curr_index];
1473
1474	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1475					&recv_buf_oob->wqe_inf);
1476	if (WARN_ON_ONCE(err))
1477		return;
1478
1479	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1480}
1481
1482static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1483				      uint pkt_len, struct xdp_buff *xdp)
1484{
1485	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1486
1487	if (!skb)
1488		return NULL;
1489
1490	if (xdp->data_hard_start) {
1491		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1492		skb_put(skb, xdp->data_end - xdp->data);
1493		return skb;
1494	}
1495
1496	skb_reserve(skb, rxq->headroom);
1497	skb_put(skb, pkt_len);
1498
1499	return skb;
1500}
1501
1502static void mana_rx_skb(void *buf_va, bool from_pool,
1503			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1504{
1505	struct mana_stats_rx *rx_stats = &rxq->stats;
1506	struct net_device *ndev = rxq->ndev;
1507	uint pkt_len = cqe->ppi[0].pkt_len;
1508	u16 rxq_idx = rxq->rxq_idx;
1509	struct napi_struct *napi;
1510	struct xdp_buff xdp = {};
1511	struct sk_buff *skb;
1512	u32 hash_value;
1513	u32 act;
1514
1515	rxq->rx_cq.work_done++;
1516	napi = &rxq->rx_cq.napi;
1517
1518	if (!buf_va) {
1519		++ndev->stats.rx_dropped;
1520		return;
1521	}
1522
1523	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1524
1525	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1526		return;
1527
1528	if (act != XDP_PASS && act != XDP_TX)
1529		goto drop_xdp;
1530
1531	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1532
1533	if (!skb)
1534		goto drop;
1535
1536	if (from_pool)
1537		skb_mark_for_recycle(skb);
1538
1539	skb->dev = napi->dev;
1540
1541	skb->protocol = eth_type_trans(skb, ndev);
1542	skb_checksum_none_assert(skb);
1543	skb_record_rx_queue(skb, rxq_idx);
1544
1545	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1546		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1547			skb->ip_summed = CHECKSUM_UNNECESSARY;
1548	}
1549
1550	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1551		hash_value = cqe->ppi[0].pkt_hash;
1552
1553		if (cqe->rx_hashtype & MANA_HASH_L4)
1554			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1555		else
1556			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1557	}
1558
1559	if (cqe->rx_vlantag_present) {
1560		u16 vlan_tci = cqe->rx_vlan_id;
1561
1562		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1563	}
1564
1565	u64_stats_update_begin(&rx_stats->syncp);
1566	rx_stats->packets++;
1567	rx_stats->bytes += pkt_len;
1568
1569	if (act == XDP_TX)
1570		rx_stats->xdp_tx++;
1571	u64_stats_update_end(&rx_stats->syncp);
1572
1573	if (act == XDP_TX) {
1574		skb_set_queue_mapping(skb, rxq_idx);
1575		mana_xdp_tx(skb, ndev);
1576		return;
1577	}
1578
1579	napi_gro_receive(napi, skb);
1580
1581	return;
1582
1583drop_xdp:
1584	u64_stats_update_begin(&rx_stats->syncp);
1585	rx_stats->xdp_drop++;
1586	u64_stats_update_end(&rx_stats->syncp);
1587
1588drop:
1589	if (from_pool) {
1590		page_pool_recycle_direct(rxq->page_pool,
1591					 virt_to_head_page(buf_va));
1592	} else {
1593		WARN_ON_ONCE(rxq->xdp_save_va);
1594		/* Save for reuse */
1595		rxq->xdp_save_va = buf_va;
1596	}
1597
1598	++ndev->stats.rx_dropped;
1599
1600	return;
1601}
1602
1603static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1604			     dma_addr_t *da, bool *from_pool, bool is_napi)
1605{
1606	struct page *page;
1607	void *va;
1608
1609	*from_pool = false;
1610
1611	/* Reuse XDP dropped page if available */
1612	if (rxq->xdp_save_va) {
1613		va = rxq->xdp_save_va;
1614		rxq->xdp_save_va = NULL;
1615	} else if (rxq->alloc_size > PAGE_SIZE) {
1616		if (is_napi)
1617			va = napi_alloc_frag(rxq->alloc_size);
1618		else
1619			va = netdev_alloc_frag(rxq->alloc_size);
1620
1621		if (!va)
1622			return NULL;
1623
1624		page = virt_to_head_page(va);
1625		/* Check if the frag falls back to single page */
1626		if (compound_order(page) < get_order(rxq->alloc_size)) {
1627			put_page(page);
1628			return NULL;
1629		}
1630	} else {
1631		page = page_pool_dev_alloc_pages(rxq->page_pool);
1632		if (!page)
1633			return NULL;
1634
1635		*from_pool = true;
1636		va = page_to_virt(page);
1637	}
1638
1639	*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1640			     DMA_FROM_DEVICE);
1641	if (dma_mapping_error(dev, *da)) {
1642		if (*from_pool)
1643			page_pool_put_full_page(rxq->page_pool, page, false);
1644		else
1645			put_page(virt_to_head_page(va));
1646
1647		return NULL;
1648	}
1649
1650	return va;
1651}
1652
1653/* Allocate frag for rx buffer, and save the old buf */
1654static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1655			       struct mana_recv_buf_oob *rxoob, void **old_buf,
1656			       bool *old_fp)
1657{
1658	bool from_pool;
1659	dma_addr_t da;
1660	void *va;
1661
1662	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1663	if (!va)
1664		return;
1665
1666	dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1667			 DMA_FROM_DEVICE);
1668	*old_buf = rxoob->buf_va;
1669	*old_fp = rxoob->from_pool;
1670
1671	rxoob->buf_va = va;
1672	rxoob->sgl[0].address = da;
1673	rxoob->from_pool = from_pool;
1674}
1675
1676static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1677				struct gdma_comp *cqe)
1678{
1679	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1680	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1681	struct net_device *ndev = rxq->ndev;
1682	struct mana_recv_buf_oob *rxbuf_oob;
1683	struct mana_port_context *apc;
1684	struct device *dev = gc->dev;
1685	void *old_buf = NULL;
1686	u32 curr, pktlen;
1687	bool old_fp;
1688
1689	apc = netdev_priv(ndev);
1690
1691	switch (oob->cqe_hdr.cqe_type) {
1692	case CQE_RX_OKAY:
1693		break;
1694
1695	case CQE_RX_TRUNCATED:
1696		++ndev->stats.rx_dropped;
1697		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1698		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1699		goto drop;
1700
1701	case CQE_RX_COALESCED_4:
1702		netdev_err(ndev, "RX coalescing is unsupported\n");
1703		apc->eth_stats.rx_coalesced_err++;
1704		return;
1705
1706	case CQE_RX_OBJECT_FENCE:
1707		complete(&rxq->fence_event);
1708		return;
1709
1710	default:
1711		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1712			   oob->cqe_hdr.cqe_type);
1713		apc->eth_stats.rx_cqe_unknown_type++;
1714		return;
1715	}
1716
1717	pktlen = oob->ppi[0].pkt_len;
1718
1719	if (pktlen == 0) {
1720		/* data packets should never have packetlength of zero */
1721		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1722			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1723		return;
1724	}
1725
1726	curr = rxq->buf_index;
1727	rxbuf_oob = &rxq->rx_oobs[curr];
1728	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1729
1730	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1731
1732	/* Unsuccessful refill will have old_buf == NULL.
1733	 * In this case, mana_rx_skb() will drop the packet.
1734	 */
1735	mana_rx_skb(old_buf, old_fp, oob, rxq);
1736
1737drop:
1738	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1739
1740	mana_post_pkt_rxq(rxq);
1741}
1742
1743static void mana_poll_rx_cq(struct mana_cq *cq)
1744{
1745	struct gdma_comp *comp = cq->gdma_comp_buf;
1746	struct mana_rxq *rxq = cq->rxq;
1747	int comp_read, i;
1748
1749	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1750	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1751
1752	rxq->xdp_flush = false;
1753
1754	for (i = 0; i < comp_read; i++) {
1755		if (WARN_ON_ONCE(comp[i].is_sq))
1756			return;
1757
1758		/* verify recv cqe references the right rxq */
1759		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1760			return;
1761
1762		mana_process_rx_cqe(rxq, cq, &comp[i]);
1763	}
1764
1765	if (comp_read > 0) {
1766		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1767
1768		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1769	}
1770
1771	if (rxq->xdp_flush)
1772		xdp_do_flush();
1773}
1774
1775static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1776{
1777	struct mana_cq *cq = context;
1778	u8 arm_bit;
1779	int w;
1780
1781	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1782
1783	if (cq->type == MANA_CQ_TYPE_RX)
1784		mana_poll_rx_cq(cq);
1785	else
1786		mana_poll_tx_cq(cq);
1787
1788	w = cq->work_done;
 
1789
1790	if (w < cq->budget &&
1791	    napi_complete_done(&cq->napi, w)) {
1792		arm_bit = SET_ARM_BIT;
1793	} else {
1794		arm_bit = 0;
 
 
 
 
 
 
 
 
1795	}
1796
1797	mana_gd_ring_cq(gdma_queue, arm_bit);
1798
1799	return w;
1800}
1801
1802static int mana_poll(struct napi_struct *napi, int budget)
1803{
1804	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1805	int w;
1806
1807	cq->work_done = 0;
1808	cq->budget = budget;
1809
1810	w = mana_cq_handler(cq, cq->gdma_cq);
1811
1812	return min(w, budget);
1813}
1814
1815static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1816{
1817	struct mana_cq *cq = context;
1818
1819	napi_schedule_irqoff(&cq->napi);
1820}
1821
1822static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1823{
1824	struct gdma_dev *gd = apc->ac->gdma_dev;
1825
1826	if (!cq->gdma_cq)
1827		return;
1828
1829	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1830}
1831
1832static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1833{
1834	struct gdma_dev *gd = apc->ac->gdma_dev;
1835
1836	if (!txq->gdma_sq)
1837		return;
1838
1839	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1840}
1841
1842static void mana_destroy_txq(struct mana_port_context *apc)
1843{
1844	struct napi_struct *napi;
1845	int i;
1846
1847	if (!apc->tx_qp)
1848		return;
1849
1850	for (i = 0; i < apc->num_queues; i++) {
 
 
1851		napi = &apc->tx_qp[i].tx_cq.napi;
1852		napi_synchronize(napi);
1853		napi_disable(napi);
1854		netif_napi_del(napi);
1855
 
 
1856		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1857
1858		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1859
1860		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1861	}
1862
1863	kfree(apc->tx_qp);
1864	apc->tx_qp = NULL;
1865}
1866
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1867static int mana_create_txq(struct mana_port_context *apc,
1868			   struct net_device *net)
1869{
1870	struct mana_context *ac = apc->ac;
1871	struct gdma_dev *gd = ac->gdma_dev;
1872	struct mana_obj_spec wq_spec;
1873	struct mana_obj_spec cq_spec;
1874	struct gdma_queue_spec spec;
1875	struct gdma_context *gc;
1876	struct mana_txq *txq;
1877	struct mana_cq *cq;
1878	u32 txq_size;
1879	u32 cq_size;
1880	int err;
1881	int i;
1882
1883	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1884			     GFP_KERNEL);
1885	if (!apc->tx_qp)
1886		return -ENOMEM;
1887
1888	/*  The minimum size of the WQE is 32 bytes, hence
1889	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1890	 *  the SQ can store. This value is then used to size other queues
1891	 *  to prevent overflow.
 
 
 
 
1892	 */
1893	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1894	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1895
1896	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1897	cq_size = PAGE_ALIGN(cq_size);
1898
1899	gc = gd->gdma_context;
1900
1901	for (i = 0; i < apc->num_queues; i++) {
1902		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1903
1904		/* Create SQ */
1905		txq = &apc->tx_qp[i].txq;
1906
1907		u64_stats_init(&txq->stats.syncp);
1908		txq->ndev = net;
1909		txq->net_txq = netdev_get_tx_queue(net, i);
1910		txq->vp_offset = apc->tx_vp_offset;
 
1911		skb_queue_head_init(&txq->pending_skbs);
1912
1913		memset(&spec, 0, sizeof(spec));
1914		spec.type = GDMA_SQ;
1915		spec.monitor_avl_buf = true;
1916		spec.queue_size = txq_size;
1917		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1918		if (err)
1919			goto out;
1920
1921		/* Create SQ's CQ */
1922		cq = &apc->tx_qp[i].tx_cq;
1923		cq->type = MANA_CQ_TYPE_TX;
1924
1925		cq->txq = txq;
1926
1927		memset(&spec, 0, sizeof(spec));
1928		spec.type = GDMA_CQ;
1929		spec.monitor_avl_buf = false;
1930		spec.queue_size = cq_size;
1931		spec.cq.callback = mana_schedule_napi;
1932		spec.cq.parent_eq = ac->eqs[i].eq;
1933		spec.cq.context = cq;
1934		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1935		if (err)
1936			goto out;
1937
1938		memset(&wq_spec, 0, sizeof(wq_spec));
1939		memset(&cq_spec, 0, sizeof(cq_spec));
1940
1941		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
1942		wq_spec.queue_size = txq->gdma_sq->queue_size;
1943
1944		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1945		cq_spec.queue_size = cq->gdma_cq->queue_size;
1946		cq_spec.modr_ctx_id = 0;
1947		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1948
1949		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1950					 &wq_spec, &cq_spec,
1951					 &apc->tx_qp[i].tx_object);
1952
1953		if (err)
1954			goto out;
1955
1956		txq->gdma_sq->id = wq_spec.queue_index;
1957		cq->gdma_cq->id = cq_spec.queue_index;
1958
1959		txq->gdma_sq->mem_info.dma_region_handle =
1960			GDMA_INVALID_DMA_REGION;
1961		cq->gdma_cq->mem_info.dma_region_handle =
1962			GDMA_INVALID_DMA_REGION;
1963
1964		txq->gdma_txq_id = txq->gdma_sq->id;
1965
1966		cq->gdma_id = cq->gdma_cq->id;
1967
1968		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1969			err = -EINVAL;
1970			goto out;
1971		}
1972
1973		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1974
 
 
1975		netif_napi_add_tx(net, &cq->napi, mana_poll);
1976		napi_enable(&cq->napi);
 
1977
1978		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1979	}
1980
1981	return 0;
1982out:
1983	mana_destroy_txq(apc);
1984	return err;
1985}
1986
1987static void mana_destroy_rxq(struct mana_port_context *apc,
1988			     struct mana_rxq *rxq, bool validate_state)
1989
1990{
1991	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1992	struct mana_recv_buf_oob *rx_oob;
1993	struct device *dev = gc->dev;
1994	struct napi_struct *napi;
1995	struct page *page;
1996	int i;
1997
1998	if (!rxq)
1999		return;
2000
 
 
2001	napi = &rxq->rx_cq.napi;
2002
2003	if (validate_state)
2004		napi_synchronize(napi);
2005
2006	napi_disable(napi);
2007
 
 
2008	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2009
2010	netif_napi_del(napi);
2011
2012	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2013
2014	mana_deinit_cq(apc, &rxq->rx_cq);
2015
2016	if (rxq->xdp_save_va)
2017		put_page(virt_to_head_page(rxq->xdp_save_va));
2018
2019	for (i = 0; i < rxq->num_rx_buf; i++) {
2020		rx_oob = &rxq->rx_oobs[i];
2021
2022		if (!rx_oob->buf_va)
2023			continue;
2024
2025		dma_unmap_single(dev, rx_oob->sgl[0].address,
2026				 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2027
2028		page = virt_to_head_page(rx_oob->buf_va);
2029
2030		if (rx_oob->from_pool)
2031			page_pool_put_full_page(rxq->page_pool, page, false);
2032		else
2033			put_page(page);
2034
2035		rx_oob->buf_va = NULL;
2036	}
2037
2038	page_pool_destroy(rxq->page_pool);
2039
2040	if (rxq->gdma_rq)
2041		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2042
2043	kfree(rxq);
2044}
2045
2046static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2047			    struct mana_rxq *rxq, struct device *dev)
2048{
2049	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2050	bool from_pool = false;
2051	dma_addr_t da;
2052	void *va;
2053
2054	if (mpc->rxbufs_pre)
2055		va = mana_get_rxbuf_pre(rxq, &da);
2056	else
2057		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2058
2059	if (!va)
2060		return -ENOMEM;
2061
2062	rx_oob->buf_va = va;
2063	rx_oob->from_pool = from_pool;
2064
2065	rx_oob->sgl[0].address = da;
2066	rx_oob->sgl[0].size = rxq->datasize;
2067	rx_oob->sgl[0].mem_key = mem_key;
2068
2069	return 0;
2070}
2071
2072#define MANA_WQE_HEADER_SIZE 16
2073#define MANA_WQE_SGE_SIZE 16
2074
2075static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2076			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2077{
2078	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2079	struct mana_recv_buf_oob *rx_oob;
2080	struct device *dev = gc->dev;
2081	u32 buf_idx;
2082	int ret;
2083
2084	WARN_ON(rxq->datasize == 0);
2085
2086	*rxq_size = 0;
2087	*cq_size = 0;
2088
2089	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2090		rx_oob = &rxq->rx_oobs[buf_idx];
2091		memset(rx_oob, 0, sizeof(*rx_oob));
2092
2093		rx_oob->num_sge = 1;
2094
2095		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2096				       dev);
2097		if (ret)
2098			return ret;
2099
2100		rx_oob->wqe_req.sgl = rx_oob->sgl;
2101		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2102		rx_oob->wqe_req.inline_oob_size = 0;
2103		rx_oob->wqe_req.inline_oob_data = NULL;
2104		rx_oob->wqe_req.flags = 0;
2105		rx_oob->wqe_req.client_data_unit = 0;
2106
2107		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2108				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2109		*cq_size += COMP_ENTRY_SIZE;
2110	}
2111
2112	return 0;
2113}
2114
2115static int mana_push_wqe(struct mana_rxq *rxq)
2116{
2117	struct mana_recv_buf_oob *rx_oob;
2118	u32 buf_idx;
2119	int err;
2120
2121	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2122		rx_oob = &rxq->rx_oobs[buf_idx];
2123
2124		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2125					    &rx_oob->wqe_inf);
2126		if (err)
2127			return -ENOSPC;
2128	}
2129
2130	return 0;
2131}
2132
2133static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2134{
 
2135	struct page_pool_params pprm = {};
2136	int ret;
2137
2138	pprm.pool_size = RX_BUFFERS_PER_QUEUE;
2139	pprm.nid = gc->numa_node;
2140	pprm.napi = &rxq->rx_cq.napi;
2141	pprm.netdev = rxq->ndev;
2142
2143	rxq->page_pool = page_pool_create(&pprm);
2144
2145	if (IS_ERR(rxq->page_pool)) {
2146		ret = PTR_ERR(rxq->page_pool);
2147		rxq->page_pool = NULL;
2148		return ret;
2149	}
2150
2151	return 0;
2152}
2153
2154static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2155					u32 rxq_idx, struct mana_eq *eq,
2156					struct net_device *ndev)
2157{
2158	struct gdma_dev *gd = apc->ac->gdma_dev;
2159	struct mana_obj_spec wq_spec;
2160	struct mana_obj_spec cq_spec;
2161	struct gdma_queue_spec spec;
2162	struct mana_cq *cq = NULL;
2163	struct gdma_context *gc;
2164	u32 cq_size, rq_size;
2165	struct mana_rxq *rxq;
2166	int err;
2167
2168	gc = gd->gdma_context;
2169
2170	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
2171		      GFP_KERNEL);
2172	if (!rxq)
2173		return NULL;
2174
2175	rxq->ndev = ndev;
2176	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2177	rxq->rxq_idx = rxq_idx;
2178	rxq->rxobj = INVALID_MANA_HANDLE;
2179
2180	mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2181			   &rxq->headroom);
2182
2183	/* Create page pool for RX queue */
2184	err = mana_create_page_pool(rxq, gc);
2185	if (err) {
2186		netdev_err(ndev, "Create page pool err:%d\n", err);
2187		goto out;
2188	}
2189
2190	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2191	if (err)
2192		goto out;
2193
2194	rq_size = PAGE_ALIGN(rq_size);
2195	cq_size = PAGE_ALIGN(cq_size);
2196
2197	/* Create RQ */
2198	memset(&spec, 0, sizeof(spec));
2199	spec.type = GDMA_RQ;
2200	spec.monitor_avl_buf = true;
2201	spec.queue_size = rq_size;
2202	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2203	if (err)
2204		goto out;
2205
2206	/* Create RQ's CQ */
2207	cq = &rxq->rx_cq;
2208	cq->type = MANA_CQ_TYPE_RX;
2209	cq->rxq = rxq;
2210
2211	memset(&spec, 0, sizeof(spec));
2212	spec.type = GDMA_CQ;
2213	spec.monitor_avl_buf = false;
2214	spec.queue_size = cq_size;
2215	spec.cq.callback = mana_schedule_napi;
2216	spec.cq.parent_eq = eq->eq;
2217	spec.cq.context = cq;
2218	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2219	if (err)
2220		goto out;
2221
2222	memset(&wq_spec, 0, sizeof(wq_spec));
2223	memset(&cq_spec, 0, sizeof(cq_spec));
2224	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2225	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2226
2227	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2228	cq_spec.queue_size = cq->gdma_cq->queue_size;
2229	cq_spec.modr_ctx_id = 0;
2230	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2231
2232	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2233				 &wq_spec, &cq_spec, &rxq->rxobj);
2234	if (err)
2235		goto out;
2236
2237	rxq->gdma_rq->id = wq_spec.queue_index;
2238	cq->gdma_cq->id = cq_spec.queue_index;
2239
2240	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2241	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2242
2243	rxq->gdma_id = rxq->gdma_rq->id;
2244	cq->gdma_id = cq->gdma_cq->id;
2245
2246	err = mana_push_wqe(rxq);
2247	if (err)
2248		goto out;
2249
2250	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2251		err = -EINVAL;
2252		goto out;
2253	}
2254
2255	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2256
2257	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2258
2259	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2260				 cq->napi.napi_id));
2261	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2262					   rxq->page_pool));
2263
2264	napi_enable(&cq->napi);
2265
2266	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2267out:
2268	if (!err)
2269		return rxq;
2270
2271	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2272
2273	mana_destroy_rxq(apc, rxq, false);
2274
2275	if (cq)
2276		mana_deinit_cq(apc, cq);
2277
2278	return NULL;
2279}
2280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2281static int mana_add_rx_queues(struct mana_port_context *apc,
2282			      struct net_device *ndev)
2283{
2284	struct mana_context *ac = apc->ac;
2285	struct mana_rxq *rxq;
2286	int err = 0;
2287	int i;
2288
2289	for (i = 0; i < apc->num_queues; i++) {
2290		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2291		if (!rxq) {
2292			err = -ENOMEM;
2293			goto out;
2294		}
2295
2296		u64_stats_init(&rxq->stats.syncp);
2297
2298		apc->rxqs[i] = rxq;
 
 
2299	}
2300
2301	apc->default_rxobj = apc->rxqs[0]->rxobj;
2302out:
2303	return err;
2304}
2305
2306static void mana_destroy_vport(struct mana_port_context *apc)
2307{
2308	struct gdma_dev *gd = apc->ac->gdma_dev;
2309	struct mana_rxq *rxq;
2310	u32 rxq_idx;
2311
2312	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2313		rxq = apc->rxqs[rxq_idx];
2314		if (!rxq)
2315			continue;
2316
2317		mana_destroy_rxq(apc, rxq, true);
2318		apc->rxqs[rxq_idx] = NULL;
2319	}
2320
2321	mana_destroy_txq(apc);
2322	mana_uncfg_vport(apc);
2323
2324	if (gd->gdma_context->is_pf)
2325		mana_pf_deregister_hw_vport(apc);
2326}
2327
2328static int mana_create_vport(struct mana_port_context *apc,
2329			     struct net_device *net)
2330{
2331	struct gdma_dev *gd = apc->ac->gdma_dev;
2332	int err;
2333
2334	apc->default_rxobj = INVALID_MANA_HANDLE;
2335
2336	if (gd->gdma_context->is_pf) {
2337		err = mana_pf_register_hw_vport(apc);
2338		if (err)
2339			return err;
2340	}
2341
2342	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2343	if (err)
2344		return err;
2345
2346	return mana_create_txq(apc, net);
2347}
2348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2349static void mana_rss_table_init(struct mana_port_context *apc)
2350{
2351	int i;
2352
2353	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2354		apc->indir_table[i] =
2355			ethtool_rxfh_indir_default(i, apc->num_queues);
2356}
2357
2358int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2359		    bool update_hash, bool update_tab)
2360{
2361	u32 queue_idx;
2362	int err;
2363	int i;
2364
2365	if (update_tab) {
2366		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2367			queue_idx = apc->indir_table[i];
2368			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2369		}
2370	}
2371
2372	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2373	if (err)
2374		return err;
2375
2376	mana_fence_rqs(apc);
2377
2378	return 0;
2379}
2380
2381void mana_query_gf_stats(struct mana_port_context *apc)
2382{
2383	struct mana_query_gf_stat_resp resp = {};
2384	struct mana_query_gf_stat_req req = {};
2385	struct net_device *ndev = apc->ndev;
2386	int err;
2387
2388	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2389			     sizeof(req), sizeof(resp));
 
2390	req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2391			STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2392			STATISTICS_FLAGS_HC_RX_BYTES |
2393			STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2394			STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2395			STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2396			STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2397			STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2398			STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2399			STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2400			STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2401			STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2402			STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2403			STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2404			STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2405			STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2406			STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2407			STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2408			STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2409			STATISTICS_FLAGS_HC_TX_BYTES |
2410			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2411			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2412			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2413			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2414			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2415			STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2416			STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2417
2418	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2419				sizeof(resp));
2420	if (err) {
2421		netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2422		return;
2423	}
2424	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2425				   sizeof(resp));
2426	if (err || resp.hdr.status) {
2427		netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2428			   resp.hdr.status);
2429		return;
2430	}
2431
2432	apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
2433	apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
2434	apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes;
2435	apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
2436	apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
2437	apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
2438	apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
2439	apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
2440	apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
2441	apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
2442	apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
2443	apc->eth_stats.hc_tx_err_inval_vportoffset_pkt =
2444					     resp.tx_err_inval_vport_offset_pkt;
2445	apc->eth_stats.hc_tx_err_vlan_enforcement =
2446					     resp.tx_err_vlan_enforcement;
2447	apc->eth_stats.hc_tx_err_eth_type_enforcement =
2448					     resp.tx_err_ethtype_enforcement;
2449	apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
2450	apc->eth_stats.hc_tx_err_sqpdid_enforcement =
2451					     resp.tx_err_SQPDID_enforcement;
2452	apc->eth_stats.hc_tx_err_cqpdid_enforcement =
2453					     resp.tx_err_CQPDID_enforcement;
2454	apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
2455	apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
2456	apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2457	apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2458	apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2459	apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2460	apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2461	apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2462	apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2463	apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
2464}
2465
2466static int mana_init_port(struct net_device *ndev)
2467{
2468	struct mana_port_context *apc = netdev_priv(ndev);
 
2469	u32 max_txq, max_rxq, max_queues;
2470	int port_idx = apc->port_idx;
2471	u32 num_indirect_entries;
 
2472	int err;
2473
2474	err = mana_init_port_context(apc);
2475	if (err)
2476		return err;
2477
 
 
2478	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2479				   &num_indirect_entries);
2480	if (err) {
2481		netdev_err(ndev, "Failed to query info for vPort %d\n",
2482			   port_idx);
2483		goto reset_apc;
2484	}
2485
2486	max_queues = min_t(u32, max_txq, max_rxq);
2487	if (apc->max_queues > max_queues)
2488		apc->max_queues = max_queues;
2489
2490	if (apc->num_queues > apc->max_queues)
2491		apc->num_queues = apc->max_queues;
2492
2493	eth_hw_addr_set(ndev, apc->mac_addr);
2494
 
2495	return 0;
2496
2497reset_apc:
2498	kfree(apc->rxqs);
2499	apc->rxqs = NULL;
2500	return err;
2501}
2502
2503int mana_alloc_queues(struct net_device *ndev)
2504{
2505	struct mana_port_context *apc = netdev_priv(ndev);
2506	struct gdma_dev *gd = apc->ac->gdma_dev;
2507	int err;
2508
2509	err = mana_create_vport(apc, ndev);
2510	if (err)
2511		return err;
2512
2513	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2514	if (err)
2515		goto destroy_vport;
2516
2517	err = mana_add_rx_queues(apc, ndev);
2518	if (err)
2519		goto destroy_vport;
2520
2521	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2522
2523	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2524	if (err)
2525		goto destroy_vport;
2526
2527	mana_rss_table_init(apc);
2528
2529	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2530	if (err)
2531		goto destroy_vport;
2532
2533	if (gd->gdma_context->is_pf) {
2534		err = mana_pf_register_filter(apc);
2535		if (err)
2536			goto destroy_vport;
2537	}
2538
2539	mana_chn_setxdp(apc, mana_xdp_get(apc));
2540
2541	return 0;
2542
2543destroy_vport:
2544	mana_destroy_vport(apc);
2545	return err;
2546}
2547
2548int mana_attach(struct net_device *ndev)
2549{
2550	struct mana_port_context *apc = netdev_priv(ndev);
2551	int err;
2552
2553	ASSERT_RTNL();
2554
2555	err = mana_init_port(ndev);
2556	if (err)
2557		return err;
2558
2559	if (apc->port_st_save) {
2560		err = mana_alloc_queues(ndev);
2561		if (err) {
2562			mana_cleanup_port_context(apc);
2563			return err;
2564		}
2565	}
2566
2567	apc->port_is_up = apc->port_st_save;
2568
2569	/* Ensure port state updated before txq state */
2570	smp_wmb();
2571
2572	if (apc->port_is_up)
2573		netif_carrier_on(ndev);
2574
2575	netif_device_attach(ndev);
2576
2577	return 0;
2578}
2579
2580static int mana_dealloc_queues(struct net_device *ndev)
2581{
2582	struct mana_port_context *apc = netdev_priv(ndev);
2583	unsigned long timeout = jiffies + 120 * HZ;
2584	struct gdma_dev *gd = apc->ac->gdma_dev;
2585	struct mana_txq *txq;
2586	struct sk_buff *skb;
2587	int i, err;
2588	u32 tsleep;
2589
2590	if (apc->port_is_up)
2591		return -EINVAL;
2592
2593	mana_chn_setxdp(apc, NULL);
2594
2595	if (gd->gdma_context->is_pf)
2596		mana_pf_deregister_filter(apc);
2597
2598	/* No packet can be transmitted now since apc->port_is_up is false.
2599	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2600	 * a txq because it may not timely see apc->port_is_up being cleared
2601	 * to false, but it doesn't matter since mana_start_xmit() drops any
2602	 * new packets due to apc->port_is_up being false.
2603	 *
2604	 * Drain all the in-flight TX packets.
2605	 * A timeout of 120 seconds for all the queues is used.
2606	 * This will break the while loop when h/w is not responding.
2607	 * This value of 120 has been decided here considering max
2608	 * number of queues.
2609	 */
2610
2611	for (i = 0; i < apc->num_queues; i++) {
2612		txq = &apc->tx_qp[i].txq;
2613		tsleep = 1000;
2614		while (atomic_read(&txq->pending_sends) > 0 &&
2615		       time_before(jiffies, timeout)) {
2616			usleep_range(tsleep, tsleep + 1000);
2617			tsleep <<= 1;
2618		}
2619		if (atomic_read(&txq->pending_sends)) {
2620			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2621			if (err) {
2622				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2623					   err, atomic_read(&txq->pending_sends),
2624					   txq->gdma_txq_id);
2625			}
2626			break;
2627		}
2628	}
2629
2630	for (i = 0; i < apc->num_queues; i++) {
2631		txq = &apc->tx_qp[i].txq;
2632		while ((skb = skb_dequeue(&txq->pending_skbs))) {
2633			mana_unmap_skb(skb, apc);
2634			dev_kfree_skb_any(skb);
2635		}
2636		atomic_set(&txq->pending_sends, 0);
2637	}
2638	/* We're 100% sure the queues can no longer be woken up, because
2639	 * we're sure now mana_poll_tx_cq() can't be running.
2640	 */
2641
2642	apc->rss_state = TRI_STATE_FALSE;
2643	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2644	if (err) {
2645		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2646		return err;
2647	}
2648
2649	mana_destroy_vport(apc);
2650
2651	return 0;
2652}
2653
2654int mana_detach(struct net_device *ndev, bool from_close)
2655{
2656	struct mana_port_context *apc = netdev_priv(ndev);
2657	int err;
2658
2659	ASSERT_RTNL();
2660
2661	apc->port_st_save = apc->port_is_up;
2662	apc->port_is_up = false;
2663
2664	/* Ensure port state updated before txq state */
2665	smp_wmb();
2666
2667	netif_tx_disable(ndev);
2668	netif_carrier_off(ndev);
2669
2670	if (apc->port_st_save) {
2671		err = mana_dealloc_queues(ndev);
2672		if (err)
2673			return err;
2674	}
2675
2676	if (!from_close) {
2677		netif_device_detach(ndev);
2678		mana_cleanup_port_context(apc);
2679	}
2680
2681	return 0;
2682}
2683
2684static int mana_probe_port(struct mana_context *ac, int port_idx,
2685			   struct net_device **ndev_storage)
2686{
2687	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2688	struct mana_port_context *apc;
2689	struct net_device *ndev;
2690	int err;
2691
2692	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2693				 gc->max_num_queues);
2694	if (!ndev)
2695		return -ENOMEM;
2696
2697	*ndev_storage = ndev;
2698
2699	apc = netdev_priv(ndev);
2700	apc->ac = ac;
2701	apc->ndev = ndev;
2702	apc->max_queues = gc->max_num_queues;
2703	apc->num_queues = gc->max_num_queues;
 
 
2704	apc->port_handle = INVALID_MANA_HANDLE;
2705	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2706	apc->port_idx = port_idx;
2707
2708	mutex_init(&apc->vport_mutex);
2709	apc->vport_use_count = 0;
2710
2711	ndev->netdev_ops = &mana_devops;
2712	ndev->ethtool_ops = &mana_ethtool_ops;
2713	ndev->mtu = ETH_DATA_LEN;
2714	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2715	ndev->min_mtu = ETH_MIN_MTU;
2716	ndev->needed_headroom = MANA_HEADROOM;
2717	ndev->dev_port = port_idx;
2718	SET_NETDEV_DEV(ndev, gc->dev);
2719
2720	netif_carrier_off(ndev);
2721
2722	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2723
2724	err = mana_init_port(ndev);
2725	if (err)
2726		goto free_net;
2727
 
 
 
 
2728	netdev_lockdep_set_classes(ndev);
2729
2730	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2731	ndev->hw_features |= NETIF_F_RXCSUM;
2732	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2733	ndev->hw_features |= NETIF_F_RXHASH;
2734	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2735			 NETIF_F_HW_VLAN_CTAG_RX;
2736	ndev->vlan_features = ndev->features;
2737	xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
2738			      NETDEV_XDP_ACT_REDIRECT |
2739			      NETDEV_XDP_ACT_NDO_XMIT);
2740
2741	err = register_netdev(ndev);
2742	if (err) {
2743		netdev_err(ndev, "Unable to register netdev.\n");
2744		goto reset_apc;
2745	}
2746
2747	return 0;
2748
 
 
2749reset_apc:
2750	kfree(apc->rxqs);
2751	apc->rxqs = NULL;
2752free_net:
2753	*ndev_storage = NULL;
2754	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2755	free_netdev(ndev);
2756	return err;
2757}
2758
2759static void adev_release(struct device *dev)
2760{
2761	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2762
2763	kfree(madev);
2764}
2765
2766static void remove_adev(struct gdma_dev *gd)
2767{
2768	struct auxiliary_device *adev = gd->adev;
2769	int id = adev->id;
2770
2771	auxiliary_device_delete(adev);
2772	auxiliary_device_uninit(adev);
2773
2774	mana_adev_idx_free(id);
2775	gd->adev = NULL;
2776}
2777
2778static int add_adev(struct gdma_dev *gd)
2779{
2780	struct auxiliary_device *adev;
2781	struct mana_adev *madev;
2782	int ret;
2783
2784	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2785	if (!madev)
2786		return -ENOMEM;
2787
2788	adev = &madev->adev;
2789	ret = mana_adev_idx_alloc();
2790	if (ret < 0)
2791		goto idx_fail;
2792	adev->id = ret;
2793
2794	adev->name = "rdma";
2795	adev->dev.parent = gd->gdma_context->dev;
2796	adev->dev.release = adev_release;
2797	madev->mdev = gd;
2798
2799	ret = auxiliary_device_init(adev);
2800	if (ret)
2801		goto init_fail;
2802
 
 
2803	ret = auxiliary_device_add(adev);
2804	if (ret)
2805		goto add_fail;
2806
2807	gd->adev = adev;
2808	return 0;
2809
2810add_fail:
2811	auxiliary_device_uninit(adev);
2812
2813init_fail:
2814	mana_adev_idx_free(adev->id);
2815
2816idx_fail:
2817	kfree(madev);
2818
2819	return ret;
2820}
2821
2822int mana_probe(struct gdma_dev *gd, bool resuming)
2823{
2824	struct gdma_context *gc = gd->gdma_context;
2825	struct mana_context *ac = gd->driver_data;
2826	struct device *dev = gc->dev;
2827	u16 num_ports = 0;
2828	int err;
2829	int i;
2830
2831	dev_info(dev,
2832		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2833		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2834
2835	err = mana_gd_register_device(gd);
2836	if (err)
2837		return err;
2838
2839	if (!resuming) {
2840		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2841		if (!ac)
2842			return -ENOMEM;
2843
2844		ac->gdma_dev = gd;
2845		gd->driver_data = ac;
2846	}
2847
2848	err = mana_create_eq(ac);
2849	if (err)
2850		goto out;
2851
2852	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2853				    MANA_MICRO_VERSION, &num_ports);
2854	if (err)
2855		goto out;
2856
2857	if (!resuming) {
2858		ac->num_ports = num_ports;
2859	} else {
2860		if (ac->num_ports != num_ports) {
2861			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2862				ac->num_ports, num_ports);
2863			err = -EPROTO;
2864			goto out;
2865		}
2866	}
2867
2868	if (ac->num_ports == 0)
2869		dev_err(dev, "Failed to detect any vPort\n");
2870
2871	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2872		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2873
2874	if (!resuming) {
2875		for (i = 0; i < ac->num_ports; i++) {
2876			err = mana_probe_port(ac, i, &ac->ports[i]);
2877			if (err)
 
 
 
 
 
 
2878				break;
 
2879		}
2880	} else {
2881		for (i = 0; i < ac->num_ports; i++) {
2882			rtnl_lock();
2883			err = mana_attach(ac->ports[i]);
2884			rtnl_unlock();
2885			if (err)
 
 
 
 
 
 
2886				break;
 
2887		}
2888	}
2889
2890	err = add_adev(gd);
2891out:
2892	if (err)
2893		mana_remove(gd, false);
2894
2895	return err;
2896}
2897
2898void mana_remove(struct gdma_dev *gd, bool suspending)
2899{
2900	struct gdma_context *gc = gd->gdma_context;
2901	struct mana_context *ac = gd->driver_data;
 
2902	struct device *dev = gc->dev;
2903	struct net_device *ndev;
2904	int err;
2905	int i;
2906
2907	/* adev currently doesn't support suspending, always remove it */
2908	if (gd->adev)
2909		remove_adev(gd);
2910
2911	for (i = 0; i < ac->num_ports; i++) {
2912		ndev = ac->ports[i];
 
2913		if (!ndev) {
2914			if (i == 0)
2915				dev_err(dev, "No net device to remove\n");
2916			goto out;
2917		}
2918
2919		/* All cleanup actions should stay after rtnl_lock(), otherwise
2920		 * other functions may access partially cleaned up data.
2921		 */
2922		rtnl_lock();
2923
2924		err = mana_detach(ndev, false);
2925		if (err)
2926			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2927				   i, err);
2928
2929		if (suspending) {
2930			/* No need to unregister the ndev. */
2931			rtnl_unlock();
2932			continue;
2933		}
2934
2935		unregister_netdevice(ndev);
 
2936
2937		rtnl_unlock();
2938
2939		free_netdev(ndev);
2940	}
2941
2942	mana_destroy_eq(ac);
2943out:
2944	mana_gd_deregister_device(gd);
2945
2946	if (suspending)
2947		return;
2948
2949	gd->driver_data = NULL;
2950	gd->gdma_context = NULL;
2951	kfree(ac);
2952}