Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright (c) 2021, Microsoft Corporation. */
   3
   4#include <uapi/linux/bpf.h>
   5
   6#include <linux/inetdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/ethtool.h>
   9#include <linux/filter.h>
  10#include <linux/mm.h>
  11
  12#include <net/checksum.h>
  13#include <net/ip6_checksum.h>
  14
  15#include <net/mana/mana.h>
  16#include <net/mana/mana_auxiliary.h>
  17
  18static DEFINE_IDA(mana_adev_ida);
  19
  20static int mana_adev_idx_alloc(void)
  21{
  22	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
  23}
  24
  25static void mana_adev_idx_free(int idx)
  26{
  27	ida_free(&mana_adev_ida, idx);
  28}
  29
  30/* Microsoft Azure Network Adapter (MANA) functions */
  31
  32static int mana_open(struct net_device *ndev)
  33{
  34	struct mana_port_context *apc = netdev_priv(ndev);
  35	int err;
  36
  37	err = mana_alloc_queues(ndev);
  38	if (err)
  39		return err;
  40
  41	apc->port_is_up = true;
  42
  43	/* Ensure port state updated before txq state */
  44	smp_wmb();
  45
  46	netif_carrier_on(ndev);
  47	netif_tx_wake_all_queues(ndev);
  48
  49	return 0;
  50}
  51
  52static int mana_close(struct net_device *ndev)
  53{
  54	struct mana_port_context *apc = netdev_priv(ndev);
  55
  56	if (!apc->port_is_up)
  57		return 0;
  58
  59	return mana_detach(ndev, true);
  60}
  61
  62static bool mana_can_tx(struct gdma_queue *wq)
  63{
  64	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
  65}
  66
  67static unsigned int mana_checksum_info(struct sk_buff *skb)
  68{
  69	if (skb->protocol == htons(ETH_P_IP)) {
  70		struct iphdr *ip = ip_hdr(skb);
  71
  72		if (ip->protocol == IPPROTO_TCP)
  73			return IPPROTO_TCP;
  74
  75		if (ip->protocol == IPPROTO_UDP)
  76			return IPPROTO_UDP;
  77	} else if (skb->protocol == htons(ETH_P_IPV6)) {
  78		struct ipv6hdr *ip6 = ipv6_hdr(skb);
  79
  80		if (ip6->nexthdr == IPPROTO_TCP)
  81			return IPPROTO_TCP;
  82
  83		if (ip6->nexthdr == IPPROTO_UDP)
  84			return IPPROTO_UDP;
  85	}
  86
  87	/* No csum offloading */
  88	return 0;
  89}
  90
  91static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
  92			struct mana_tx_package *tp)
  93{
  94	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
  95	struct gdma_dev *gd = apc->ac->gdma_dev;
  96	struct gdma_context *gc;
  97	struct device *dev;
  98	skb_frag_t *frag;
  99	dma_addr_t da;
 100	int i;
 101
 102	gc = gd->gdma_context;
 103	dev = gc->dev;
 104	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 105
 106	if (dma_mapping_error(dev, da))
 107		return -ENOMEM;
 108
 109	ash->dma_handle[0] = da;
 110	ash->size[0] = skb_headlen(skb);
 111
 112	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
 113	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
 114	tp->wqe_req.sgl[0].size = ash->size[0];
 115
 116	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 117		frag = &skb_shinfo(skb)->frags[i];
 118		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
 119				      DMA_TO_DEVICE);
 120
 121		if (dma_mapping_error(dev, da))
 122			goto frag_err;
 123
 124		ash->dma_handle[i + 1] = da;
 125		ash->size[i + 1] = skb_frag_size(frag);
 126
 127		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
 128		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
 129		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
 130	}
 131
 132	return 0;
 133
 134frag_err:
 135	for (i = i - 1; i >= 0; i--)
 136		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
 137			       DMA_TO_DEVICE);
 138
 139	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
 140
 141	return -ENOMEM;
 142}
 143
 144netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 145{
 146	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
 147	struct mana_port_context *apc = netdev_priv(ndev);
 148	u16 txq_idx = skb_get_queue_mapping(skb);
 149	struct gdma_dev *gd = apc->ac->gdma_dev;
 150	bool ipv4 = false, ipv6 = false;
 151	struct mana_tx_package pkg = {};
 152	struct netdev_queue *net_txq;
 153	struct mana_stats_tx *tx_stats;
 154	struct gdma_queue *gdma_sq;
 155	unsigned int csum_type;
 156	struct mana_txq *txq;
 157	struct mana_cq *cq;
 158	int err, len;
 159
 160	if (unlikely(!apc->port_is_up))
 161		goto tx_drop;
 162
 163	if (skb_cow_head(skb, MANA_HEADROOM))
 164		goto tx_drop_count;
 165
 166	txq = &apc->tx_qp[txq_idx].txq;
 167	gdma_sq = txq->gdma_sq;
 168	cq = &apc->tx_qp[txq_idx].tx_cq;
 169
 170	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
 171	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
 172
 173	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
 174		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
 175		pkt_fmt = MANA_LONG_PKT_FMT;
 176	} else {
 177		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
 178	}
 179
 180	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
 181
 182	if (pkt_fmt == MANA_SHORT_PKT_FMT)
 183		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
 184	else
 185		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
 186
 187	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
 188	pkg.wqe_req.flags = 0;
 189	pkg.wqe_req.client_data_unit = 0;
 190
 191	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
 192	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
 193
 194	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
 195		pkg.wqe_req.sgl = pkg.sgl_array;
 196	} else {
 197		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
 198					    sizeof(struct gdma_sge),
 199					    GFP_ATOMIC);
 200		if (!pkg.sgl_ptr)
 201			goto tx_drop_count;
 202
 203		pkg.wqe_req.sgl = pkg.sgl_ptr;
 204	}
 205
 206	if (skb->protocol == htons(ETH_P_IP))
 207		ipv4 = true;
 208	else if (skb->protocol == htons(ETH_P_IPV6))
 209		ipv6 = true;
 210
 211	if (skb_is_gso(skb)) {
 212		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 213		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 214
 215		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
 216		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 217		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 218
 219		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
 220		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
 221		if (ipv4) {
 222			ip_hdr(skb)->tot_len = 0;
 223			ip_hdr(skb)->check = 0;
 224			tcp_hdr(skb)->check =
 225				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 226						   ip_hdr(skb)->daddr, 0,
 227						   IPPROTO_TCP, 0);
 228		} else {
 229			ipv6_hdr(skb)->payload_len = 0;
 230			tcp_hdr(skb)->check =
 231				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 232						 &ipv6_hdr(skb)->daddr, 0,
 233						 IPPROTO_TCP, 0);
 234		}
 235	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 236		csum_type = mana_checksum_info(skb);
 237
 238		if (csum_type == IPPROTO_TCP) {
 239			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 240			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 241
 242			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 243			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 244
 245		} else if (csum_type == IPPROTO_UDP) {
 246			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 247			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 248
 249			pkg.tx_oob.s_oob.comp_udp_csum = 1;
 250		} else {
 251			/* Can't do offload of this type of checksum */
 252			if (skb_checksum_help(skb))
 253				goto free_sgl_ptr;
 254		}
 255	}
 256
 257	if (mana_map_skb(skb, apc, &pkg))
 258		goto free_sgl_ptr;
 259
 260	skb_queue_tail(&txq->pending_skbs, skb);
 261
 262	len = skb->len;
 263	net_txq = netdev_get_tx_queue(ndev, txq_idx);
 264
 265	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
 266					(struct gdma_posted_wqe_info *)skb->cb);
 267	if (!mana_can_tx(gdma_sq)) {
 268		netif_tx_stop_queue(net_txq);
 269		apc->eth_stats.stop_queue++;
 270	}
 271
 272	if (err) {
 273		(void)skb_dequeue_tail(&txq->pending_skbs);
 274		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
 275		err = NETDEV_TX_BUSY;
 276		goto tx_busy;
 277	}
 278
 279	err = NETDEV_TX_OK;
 280	atomic_inc(&txq->pending_sends);
 281
 282	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
 283
 284	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
 285	skb = NULL;
 286
 287	tx_stats = &txq->stats;
 288	u64_stats_update_begin(&tx_stats->syncp);
 289	tx_stats->packets++;
 290	tx_stats->bytes += len;
 291	u64_stats_update_end(&tx_stats->syncp);
 292
 293tx_busy:
 294	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
 295		netif_tx_wake_queue(net_txq);
 296		apc->eth_stats.wake_queue++;
 297	}
 298
 299	kfree(pkg.sgl_ptr);
 300	return err;
 301
 302free_sgl_ptr:
 303	kfree(pkg.sgl_ptr);
 304tx_drop_count:
 305	ndev->stats.tx_dropped++;
 306tx_drop:
 307	dev_kfree_skb_any(skb);
 308	return NETDEV_TX_OK;
 309}
 310
 311static void mana_get_stats64(struct net_device *ndev,
 312			     struct rtnl_link_stats64 *st)
 313{
 314	struct mana_port_context *apc = netdev_priv(ndev);
 315	unsigned int num_queues = apc->num_queues;
 316	struct mana_stats_rx *rx_stats;
 317	struct mana_stats_tx *tx_stats;
 318	unsigned int start;
 319	u64 packets, bytes;
 320	int q;
 321
 322	if (!apc->port_is_up)
 323		return;
 324
 325	netdev_stats_to_stats64(st, &ndev->stats);
 326
 327	for (q = 0; q < num_queues; q++) {
 328		rx_stats = &apc->rxqs[q]->stats;
 329
 330		do {
 331			start = u64_stats_fetch_begin(&rx_stats->syncp);
 332			packets = rx_stats->packets;
 333			bytes = rx_stats->bytes;
 334		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
 335
 336		st->rx_packets += packets;
 337		st->rx_bytes += bytes;
 338	}
 339
 340	for (q = 0; q < num_queues; q++) {
 341		tx_stats = &apc->tx_qp[q].txq.stats;
 342
 343		do {
 344			start = u64_stats_fetch_begin(&tx_stats->syncp);
 345			packets = tx_stats->packets;
 346			bytes = tx_stats->bytes;
 347		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
 348
 349		st->tx_packets += packets;
 350		st->tx_bytes += bytes;
 351	}
 352}
 353
 354static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
 355			     int old_q)
 356{
 357	struct mana_port_context *apc = netdev_priv(ndev);
 358	u32 hash = skb_get_hash(skb);
 359	struct sock *sk = skb->sk;
 360	int txq;
 361
 362	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
 363
 364	if (txq != old_q && sk && sk_fullsock(sk) &&
 365	    rcu_access_pointer(sk->sk_dst_cache))
 366		sk_tx_queue_set(sk, txq);
 367
 368	return txq;
 369}
 370
 371static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
 372			     struct net_device *sb_dev)
 373{
 374	int txq;
 375
 376	if (ndev->real_num_tx_queues == 1)
 377		return 0;
 378
 379	txq = sk_tx_queue_get(skb->sk);
 380
 381	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
 382		if (skb_rx_queue_recorded(skb))
 383			txq = skb_get_rx_queue(skb);
 384		else
 385			txq = mana_get_tx_queue(ndev, skb, txq);
 386	}
 387
 388	return txq;
 389}
 390
 391static const struct net_device_ops mana_devops = {
 392	.ndo_open		= mana_open,
 393	.ndo_stop		= mana_close,
 394	.ndo_select_queue	= mana_select_queue,
 395	.ndo_start_xmit		= mana_start_xmit,
 396	.ndo_validate_addr	= eth_validate_addr,
 397	.ndo_get_stats64	= mana_get_stats64,
 398	.ndo_bpf		= mana_bpf,
 399	.ndo_xdp_xmit		= mana_xdp_xmit,
 400};
 401
 402static void mana_cleanup_port_context(struct mana_port_context *apc)
 403{
 404	kfree(apc->rxqs);
 405	apc->rxqs = NULL;
 406}
 407
 408static int mana_init_port_context(struct mana_port_context *apc)
 409{
 410	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
 411			    GFP_KERNEL);
 412
 413	return !apc->rxqs ? -ENOMEM : 0;
 414}
 415
 416static int mana_send_request(struct mana_context *ac, void *in_buf,
 417			     u32 in_len, void *out_buf, u32 out_len)
 418{
 419	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 420	struct gdma_resp_hdr *resp = out_buf;
 421	struct gdma_req_hdr *req = in_buf;
 422	struct device *dev = gc->dev;
 423	static atomic_t activity_id;
 424	int err;
 425
 426	req->dev_id = gc->mana.dev_id;
 427	req->activity_id = atomic_inc_return(&activity_id);
 428
 429	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
 430				   out_buf);
 431	if (err || resp->status) {
 432		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
 433			err, resp->status);
 434		return err ? err : -EPROTO;
 435	}
 436
 437	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
 438	    req->activity_id != resp->activity_id) {
 439		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
 440			req->dev_id.as_uint32, resp->dev_id.as_uint32,
 441			req->activity_id, resp->activity_id);
 442		return -EPROTO;
 443	}
 444
 445	return 0;
 446}
 447
 448static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
 449				const enum mana_command_code expected_code,
 450				const u32 min_size)
 451{
 452	if (resp_hdr->response.msg_type != expected_code)
 453		return -EPROTO;
 454
 455	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
 456		return -EPROTO;
 457
 458	if (resp_hdr->response.msg_size < min_size)
 459		return -EPROTO;
 460
 461	return 0;
 462}
 463
 464static int mana_pf_register_hw_vport(struct mana_port_context *apc)
 465{
 466	struct mana_register_hw_vport_resp resp = {};
 467	struct mana_register_hw_vport_req req = {};
 468	int err;
 469
 470	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
 471			     sizeof(req), sizeof(resp));
 472	req.attached_gfid = 1;
 473	req.is_pf_default_vport = 1;
 474	req.allow_all_ether_types = 1;
 475
 476	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 477				sizeof(resp));
 478	if (err) {
 479		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
 480		return err;
 481	}
 482
 483	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
 484				   sizeof(resp));
 485	if (err || resp.hdr.status) {
 486		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
 487			   err, resp.hdr.status);
 488		return err ? err : -EPROTO;
 489	}
 490
 491	apc->port_handle = resp.hw_vport_handle;
 492	return 0;
 493}
 494
 495static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
 496{
 497	struct mana_deregister_hw_vport_resp resp = {};
 498	struct mana_deregister_hw_vport_req req = {};
 499	int err;
 500
 501	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
 502			     sizeof(req), sizeof(resp));
 503	req.hw_vport_handle = apc->port_handle;
 504
 505	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 506				sizeof(resp));
 507	if (err) {
 508		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
 509			   err);
 510		return;
 511	}
 512
 513	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
 514				   sizeof(resp));
 515	if (err || resp.hdr.status)
 516		netdev_err(apc->ndev,
 517			   "Failed to deregister hw vPort: %d, 0x%x\n",
 518			   err, resp.hdr.status);
 519}
 520
 521static int mana_pf_register_filter(struct mana_port_context *apc)
 522{
 523	struct mana_register_filter_resp resp = {};
 524	struct mana_register_filter_req req = {};
 525	int err;
 526
 527	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
 528			     sizeof(req), sizeof(resp));
 529	req.vport = apc->port_handle;
 530	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
 531
 532	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 533				sizeof(resp));
 534	if (err) {
 535		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
 536		return err;
 537	}
 538
 539	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
 540				   sizeof(resp));
 541	if (err || resp.hdr.status) {
 542		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
 543			   err, resp.hdr.status);
 544		return err ? err : -EPROTO;
 545	}
 546
 547	apc->pf_filter_handle = resp.filter_handle;
 548	return 0;
 549}
 550
 551static void mana_pf_deregister_filter(struct mana_port_context *apc)
 552{
 553	struct mana_deregister_filter_resp resp = {};
 554	struct mana_deregister_filter_req req = {};
 555	int err;
 556
 557	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
 558			     sizeof(req), sizeof(resp));
 559	req.filter_handle = apc->pf_filter_handle;
 560
 561	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 562				sizeof(resp));
 563	if (err) {
 564		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
 565			   err);
 566		return;
 567	}
 568
 569	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
 570				   sizeof(resp));
 571	if (err || resp.hdr.status)
 572		netdev_err(apc->ndev,
 573			   "Failed to deregister filter: %d, 0x%x\n",
 574			   err, resp.hdr.status);
 575}
 576
 577static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
 578				 u32 proto_minor_ver, u32 proto_micro_ver,
 579				 u16 *max_num_vports)
 580{
 581	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 582	struct mana_query_device_cfg_resp resp = {};
 583	struct mana_query_device_cfg_req req = {};
 584	struct device *dev = gc->dev;
 585	int err = 0;
 586
 587	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
 588			     sizeof(req), sizeof(resp));
 589	req.proto_major_ver = proto_major_ver;
 590	req.proto_minor_ver = proto_minor_ver;
 591	req.proto_micro_ver = proto_micro_ver;
 592
 593	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
 594	if (err) {
 595		dev_err(dev, "Failed to query config: %d", err);
 596		return err;
 597	}
 598
 599	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
 600				   sizeof(resp));
 601	if (err || resp.hdr.status) {
 602		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
 603			resp.hdr.status);
 604		if (!err)
 605			err = -EPROTO;
 606		return err;
 607	}
 608
 609	*max_num_vports = resp.max_num_vports;
 610
 611	return 0;
 612}
 613
 614static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
 615				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
 616{
 617	struct mana_query_vport_cfg_resp resp = {};
 618	struct mana_query_vport_cfg_req req = {};
 619	int err;
 620
 621	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
 622			     sizeof(req), sizeof(resp));
 623
 624	req.vport_index = vport_index;
 625
 626	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 627				sizeof(resp));
 628	if (err)
 629		return err;
 630
 631	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
 632				   sizeof(resp));
 633	if (err)
 634		return err;
 635
 636	if (resp.hdr.status)
 637		return -EPROTO;
 638
 639	*max_sq = resp.max_num_sq;
 640	*max_rq = resp.max_num_rq;
 641	*num_indir_entry = resp.num_indirection_ent;
 642
 643	apc->port_handle = resp.vport;
 644	ether_addr_copy(apc->mac_addr, resp.mac_addr);
 645
 646	return 0;
 647}
 648
 649void mana_uncfg_vport(struct mana_port_context *apc)
 650{
 651	mutex_lock(&apc->vport_mutex);
 652	apc->vport_use_count--;
 653	WARN_ON(apc->vport_use_count < 0);
 654	mutex_unlock(&apc->vport_mutex);
 655}
 656EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
 657
 658int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
 659		   u32 doorbell_pg_id)
 660{
 661	struct mana_config_vport_resp resp = {};
 662	struct mana_config_vport_req req = {};
 663	int err;
 664
 665	/* This function is used to program the Ethernet port in the hardware
 666	 * table. It can be called from the Ethernet driver or the RDMA driver.
 667	 *
 668	 * For Ethernet usage, the hardware supports only one active user on a
 669	 * physical port. The driver checks on the port usage before programming
 670	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
 671	 * device to kernel NET layer (Ethernet driver).
 672	 *
 673	 * Because the RDMA driver doesn't know in advance which QP type the
 674	 * user will create, it exposes the device with all its ports. The user
 675	 * may not be able to create RAW QP on a port if this port is already
 676	 * in used by the Ethernet driver from the kernel.
 677	 *
 678	 * This physical port limitation only applies to the RAW QP. For RC QP,
 679	 * the hardware doesn't have this limitation. The user can create RC
 680	 * QPs on a physical port up to the hardware limits independent of the
 681	 * Ethernet usage on the same port.
 682	 */
 683	mutex_lock(&apc->vport_mutex);
 684	if (apc->vport_use_count > 0) {
 685		mutex_unlock(&apc->vport_mutex);
 686		return -EBUSY;
 687	}
 688	apc->vport_use_count++;
 689	mutex_unlock(&apc->vport_mutex);
 690
 691	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
 692			     sizeof(req), sizeof(resp));
 693	req.vport = apc->port_handle;
 694	req.pdid = protection_dom_id;
 695	req.doorbell_pageid = doorbell_pg_id;
 696
 697	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 698				sizeof(resp));
 699	if (err) {
 700		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
 701		goto out;
 702	}
 703
 704	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
 705				   sizeof(resp));
 706	if (err || resp.hdr.status) {
 707		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
 708			   err, resp.hdr.status);
 709		if (!err)
 710			err = -EPROTO;
 711
 712		goto out;
 713	}
 714
 715	apc->tx_shortform_allowed = resp.short_form_allowed;
 716	apc->tx_vp_offset = resp.tx_vport_offset;
 717
 718	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
 719		    apc->port_handle, protection_dom_id, doorbell_pg_id);
 720out:
 721	if (err)
 722		mana_uncfg_vport(apc);
 723
 724	return err;
 725}
 726EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
 727
 728static int mana_cfg_vport_steering(struct mana_port_context *apc,
 729				   enum TRI_STATE rx,
 730				   bool update_default_rxobj, bool update_key,
 731				   bool update_tab)
 732{
 733	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
 734	struct mana_cfg_rx_steer_req *req = NULL;
 735	struct mana_cfg_rx_steer_resp resp = {};
 736	struct net_device *ndev = apc->ndev;
 737	mana_handle_t *req_indir_tab;
 738	u32 req_buf_size;
 739	int err;
 740
 741	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
 742	req = kzalloc(req_buf_size, GFP_KERNEL);
 743	if (!req)
 744		return -ENOMEM;
 745
 746	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
 747			     sizeof(resp));
 748
 749	req->vport = apc->port_handle;
 750	req->num_indir_entries = num_entries;
 751	req->indir_tab_offset = sizeof(*req);
 752	req->rx_enable = rx;
 753	req->rss_enable = apc->rss_state;
 754	req->update_default_rxobj = update_default_rxobj;
 755	req->update_hashkey = update_key;
 756	req->update_indir_tab = update_tab;
 757	req->default_rxobj = apc->default_rxobj;
 758
 759	if (update_key)
 760		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
 761
 762	if (update_tab) {
 763		req_indir_tab = (mana_handle_t *)(req + 1);
 764		memcpy(req_indir_tab, apc->rxobj_table,
 765		       req->num_indir_entries * sizeof(mana_handle_t));
 766	}
 767
 768	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
 769				sizeof(resp));
 770	if (err) {
 771		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
 772		goto out;
 773	}
 774
 775	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
 776				   sizeof(resp));
 777	if (err) {
 778		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
 779		goto out;
 780	}
 781
 782	if (resp.hdr.status) {
 783		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
 784			   resp.hdr.status);
 785		err = -EPROTO;
 786	}
 787
 788	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
 789		    apc->port_handle, num_entries);
 790out:
 791	kfree(req);
 792	return err;
 793}
 794
 795int mana_create_wq_obj(struct mana_port_context *apc,
 796		       mana_handle_t vport,
 797		       u32 wq_type, struct mana_obj_spec *wq_spec,
 798		       struct mana_obj_spec *cq_spec,
 799		       mana_handle_t *wq_obj)
 800{
 801	struct mana_create_wqobj_resp resp = {};
 802	struct mana_create_wqobj_req req = {};
 803	struct net_device *ndev = apc->ndev;
 804	int err;
 805
 806	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
 807			     sizeof(req), sizeof(resp));
 808	req.vport = vport;
 809	req.wq_type = wq_type;
 810	req.wq_gdma_region = wq_spec->gdma_region;
 811	req.cq_gdma_region = cq_spec->gdma_region;
 812	req.wq_size = wq_spec->queue_size;
 813	req.cq_size = cq_spec->queue_size;
 814	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
 815	req.cq_parent_qid = cq_spec->attached_eq;
 816
 817	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 818				sizeof(resp));
 819	if (err) {
 820		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
 821		goto out;
 822	}
 823
 824	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
 825				   sizeof(resp));
 826	if (err || resp.hdr.status) {
 827		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
 828			   resp.hdr.status);
 829		if (!err)
 830			err = -EPROTO;
 831		goto out;
 832	}
 833
 834	if (resp.wq_obj == INVALID_MANA_HANDLE) {
 835		netdev_err(ndev, "Got an invalid WQ object handle\n");
 836		err = -EPROTO;
 837		goto out;
 838	}
 839
 840	*wq_obj = resp.wq_obj;
 841	wq_spec->queue_index = resp.wq_id;
 842	cq_spec->queue_index = resp.cq_id;
 843
 844	return 0;
 845out:
 846	return err;
 847}
 848EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
 849
 850void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
 851			 mana_handle_t wq_obj)
 852{
 853	struct mana_destroy_wqobj_resp resp = {};
 854	struct mana_destroy_wqobj_req req = {};
 855	struct net_device *ndev = apc->ndev;
 856	int err;
 857
 858	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
 859			     sizeof(req), sizeof(resp));
 860	req.wq_type = wq_type;
 861	req.wq_obj_handle = wq_obj;
 862
 863	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 864				sizeof(resp));
 865	if (err) {
 866		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
 867		return;
 868	}
 869
 870	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
 871				   sizeof(resp));
 872	if (err || resp.hdr.status)
 873		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
 874			   resp.hdr.status);
 875}
 876EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
 877
 878static void mana_destroy_eq(struct mana_context *ac)
 
 
 
 
 
 
 
 
 
 879{
 880	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 881	struct gdma_queue *eq;
 882	int i;
 883
 884	if (!ac->eqs)
 885		return;
 886
 887	for (i = 0; i < gc->max_num_queues; i++) {
 888		eq = ac->eqs[i].eq;
 889		if (!eq)
 890			continue;
 891
 892		mana_gd_destroy_queue(gc, eq);
 893	}
 894
 895	kfree(ac->eqs);
 896	ac->eqs = NULL;
 897}
 898
 899static int mana_create_eq(struct mana_context *ac)
 900{
 901	struct gdma_dev *gd = ac->gdma_dev;
 902	struct gdma_context *gc = gd->gdma_context;
 903	struct gdma_queue_spec spec = {};
 904	int err;
 905	int i;
 906
 907	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
 908			  GFP_KERNEL);
 909	if (!ac->eqs)
 910		return -ENOMEM;
 911
 912	spec.type = GDMA_EQ;
 913	spec.monitor_avl_buf = false;
 914	spec.queue_size = EQ_SIZE;
 915	spec.eq.callback = NULL;
 916	spec.eq.context = ac->eqs;
 917	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
 
 
 
 
 918
 919	for (i = 0; i < gc->max_num_queues; i++) {
 920		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
 921		if (err)
 922			goto out;
 923	}
 924
 925	return 0;
 926out:
 927	mana_destroy_eq(ac);
 928	return err;
 929}
 930
 931static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
 932{
 933	struct mana_fence_rq_resp resp = {};
 934	struct mana_fence_rq_req req = {};
 935	int err;
 936
 937	init_completion(&rxq->fence_event);
 938
 939	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
 940			     sizeof(req), sizeof(resp));
 941	req.wq_obj_handle =  rxq->rxobj;
 942
 943	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 944				sizeof(resp));
 945	if (err) {
 946		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
 947			   rxq->rxq_idx, err);
 948		return err;
 949	}
 950
 951	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
 952	if (err || resp.hdr.status) {
 953		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
 954			   rxq->rxq_idx, err, resp.hdr.status);
 955		if (!err)
 956			err = -EPROTO;
 957
 958		return err;
 959	}
 960
 961	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
 962		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
 963			   rxq->rxq_idx);
 964		return -ETIMEDOUT;
 965	}
 966
 967	return 0;
 968}
 969
 970static void mana_fence_rqs(struct mana_port_context *apc)
 971{
 972	unsigned int rxq_idx;
 973	struct mana_rxq *rxq;
 974	int err;
 975
 976	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
 977		rxq = apc->rxqs[rxq_idx];
 978		err = mana_fence_rq(apc, rxq);
 979
 980		/* In case of any error, use sleep instead. */
 981		if (err)
 982			msleep(100);
 983	}
 984}
 985
 986static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
 987{
 988	u32 used_space_old;
 989	u32 used_space_new;
 990
 991	used_space_old = wq->head - wq->tail;
 992	used_space_new = wq->head - (wq->tail + num_units);
 993
 994	if (WARN_ON_ONCE(used_space_new > used_space_old))
 995		return -ERANGE;
 996
 997	wq->tail += num_units;
 998	return 0;
 999}
1000
1001static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1002{
1003	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1004	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1005	struct device *dev = gc->dev;
1006	int i;
1007
1008	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
1009
1010	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1011		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1012			       DMA_TO_DEVICE);
1013}
1014
1015static void mana_poll_tx_cq(struct mana_cq *cq)
1016{
 
1017	struct gdma_comp *completions = cq->gdma_comp_buf;
1018	struct gdma_posted_wqe_info *wqe_info;
1019	unsigned int pkt_transmitted = 0;
1020	unsigned int wqe_unit_cnt = 0;
1021	struct mana_txq *txq = cq->txq;
1022	struct mana_port_context *apc;
1023	struct netdev_queue *net_txq;
1024	struct gdma_queue *gdma_wq;
1025	unsigned int avail_space;
1026	struct net_device *ndev;
1027	struct sk_buff *skb;
1028	bool txq_stopped;
1029	int comp_read;
1030	int i;
1031
1032	ndev = txq->ndev;
1033	apc = netdev_priv(ndev);
1034
1035	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1036				    CQE_POLLING_BUFFER);
1037
1038	if (comp_read < 1)
1039		return;
1040
1041	for (i = 0; i < comp_read; i++) {
1042		struct mana_tx_comp_oob *cqe_oob;
1043
1044		if (WARN_ON_ONCE(!completions[i].is_sq))
1045			return;
1046
1047		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1048		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1049				 MANA_CQE_COMPLETION))
1050			return;
1051
1052		switch (cqe_oob->cqe_hdr.cqe_type) {
1053		case CQE_TX_OKAY:
1054			break;
1055
1056		case CQE_TX_SA_DROP:
1057		case CQE_TX_MTU_DROP:
1058		case CQE_TX_INVALID_OOB:
1059		case CQE_TX_INVALID_ETH_TYPE:
1060		case CQE_TX_HDR_PROCESSING_ERROR:
1061		case CQE_TX_VF_DISABLED:
1062		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1063		case CQE_TX_VPORT_DISABLED:
1064		case CQE_TX_VLAN_TAGGING_VIOLATION:
1065			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
1066				  cqe_oob->cqe_hdr.cqe_type);
1067			break;
1068
1069		default:
1070			/* If the CQE type is unexpected, log an error, assert,
1071			 * and go through the error path.
1072			 */
1073			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
1074				  cqe_oob->cqe_hdr.cqe_type);
1075			return;
1076		}
1077
1078		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1079			return;
1080
1081		skb = skb_dequeue(&txq->pending_skbs);
1082		if (WARN_ON_ONCE(!skb))
1083			return;
1084
1085		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1086		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1087
1088		mana_unmap_skb(skb, apc);
1089
1090		napi_consume_skb(skb, cq->budget);
1091
1092		pkt_transmitted++;
1093	}
1094
1095	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1096		return;
1097
1098	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1099
1100	gdma_wq = txq->gdma_sq;
1101	avail_space = mana_gd_wq_avail_space(gdma_wq);
1102
1103	/* Ensure tail updated before checking q stop */
1104	smp_mb();
1105
1106	net_txq = txq->net_txq;
1107	txq_stopped = netif_tx_queue_stopped(net_txq);
1108
1109	/* Ensure checking txq_stopped before apc->port_is_up. */
1110	smp_rmb();
1111
1112	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1113		netif_tx_wake_queue(net_txq);
1114		apc->eth_stats.wake_queue++;
1115	}
1116
1117	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1118		WARN_ON_ONCE(1);
1119
1120	cq->work_done = pkt_transmitted;
1121}
1122
1123static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1124{
1125	struct mana_recv_buf_oob *recv_buf_oob;
1126	u32 curr_index;
1127	int err;
1128
1129	curr_index = rxq->buf_index++;
1130	if (rxq->buf_index == rxq->num_rx_buf)
1131		rxq->buf_index = 0;
1132
1133	recv_buf_oob = &rxq->rx_oobs[curr_index];
1134
1135	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1136				    &recv_buf_oob->wqe_inf);
1137	if (WARN_ON_ONCE(err))
1138		return;
1139
1140	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1141}
1142
1143static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
1144				      struct xdp_buff *xdp)
1145{
1146	struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
1147
1148	if (!skb)
1149		return NULL;
1150
1151	if (xdp->data_hard_start) {
1152		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1153		skb_put(skb, xdp->data_end - xdp->data);
1154	} else {
1155		skb_reserve(skb, XDP_PACKET_HEADROOM);
1156		skb_put(skb, pkt_len);
1157	}
1158
1159	return skb;
1160}
1161
1162static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
1163			struct mana_rxq *rxq)
1164{
1165	struct mana_stats_rx *rx_stats = &rxq->stats;
1166	struct net_device *ndev = rxq->ndev;
1167	uint pkt_len = cqe->ppi[0].pkt_len;
 
1168	u16 rxq_idx = rxq->rxq_idx;
1169	struct napi_struct *napi;
1170	struct xdp_buff xdp = {};
1171	struct sk_buff *skb;
1172	u32 hash_value;
1173	u32 act;
1174
1175	rxq->rx_cq.work_done++;
1176	napi = &rxq->rx_cq.napi;
 
 
1177
1178	if (!buf_va) {
1179		++ndev->stats.rx_dropped;
1180		return;
1181	}
1182
1183	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1184
1185	if (act == XDP_REDIRECT && !rxq->xdp_rc)
 
 
1186		return;
 
1187
1188	if (act != XDP_PASS && act != XDP_TX)
1189		goto drop_xdp;
1190
1191	skb = mana_build_skb(buf_va, pkt_len, &xdp);
1192
1193	if (!skb)
1194		goto drop;
1195
1196	skb->dev = napi->dev;
1197
1198	skb->protocol = eth_type_trans(skb, ndev);
1199	skb_checksum_none_assert(skb);
1200	skb_record_rx_queue(skb, rxq_idx);
1201
1202	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1203		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1204			skb->ip_summed = CHECKSUM_UNNECESSARY;
1205	}
1206
1207	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1208		hash_value = cqe->ppi[0].pkt_hash;
1209
1210		if (cqe->rx_hashtype & MANA_HASH_L4)
1211			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1212		else
1213			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1214	}
1215
1216	u64_stats_update_begin(&rx_stats->syncp);
1217	rx_stats->packets++;
1218	rx_stats->bytes += pkt_len;
1219
1220	if (act == XDP_TX)
1221		rx_stats->xdp_tx++;
1222	u64_stats_update_end(&rx_stats->syncp);
1223
1224	if (act == XDP_TX) {
1225		skb_set_queue_mapping(skb, rxq_idx);
1226		mana_xdp_tx(skb, ndev);
1227		return;
1228	}
1229
1230	napi_gro_receive(napi, skb);
1231
1232	return;
1233
1234drop_xdp:
1235	u64_stats_update_begin(&rx_stats->syncp);
1236	rx_stats->xdp_drop++;
 
1237	u64_stats_update_end(&rx_stats->syncp);
1238
1239drop:
1240	WARN_ON_ONCE(rxq->xdp_save_page);
1241	rxq->xdp_save_page = virt_to_page(buf_va);
1242
1243	++ndev->stats.rx_dropped;
1244
1245	return;
1246}
1247
1248static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1249				struct gdma_comp *cqe)
1250{
1251	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1252	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1253	struct net_device *ndev = rxq->ndev;
1254	struct mana_recv_buf_oob *rxbuf_oob;
1255	struct device *dev = gc->dev;
1256	void *new_buf, *old_buf;
1257	struct page *new_page;
1258	u32 curr, pktlen;
1259	dma_addr_t da;
1260
1261	switch (oob->cqe_hdr.cqe_type) {
1262	case CQE_RX_OKAY:
1263		break;
1264
1265	case CQE_RX_TRUNCATED:
1266		++ndev->stats.rx_dropped;
1267		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1268		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1269		goto drop;
1270
1271	case CQE_RX_COALESCED_4:
1272		netdev_err(ndev, "RX coalescing is unsupported\n");
1273		return;
1274
1275	case CQE_RX_OBJECT_FENCE:
1276		complete(&rxq->fence_event);
1277		return;
1278
1279	default:
1280		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1281			   oob->cqe_hdr.cqe_type);
1282		return;
1283	}
1284
 
 
 
1285	pktlen = oob->ppi[0].pkt_len;
1286
1287	if (pktlen == 0) {
1288		/* data packets should never have packetlength of zero */
1289		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1290			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1291		return;
1292	}
1293
1294	curr = rxq->buf_index;
1295	rxbuf_oob = &rxq->rx_oobs[curr];
1296	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1297
1298	/* Reuse XDP dropped page if available */
1299	if (rxq->xdp_save_page) {
1300		new_page = rxq->xdp_save_page;
1301		rxq->xdp_save_page = NULL;
1302	} else {
1303		new_page = alloc_page(GFP_ATOMIC);
1304	}
1305
1306	if (new_page) {
1307		da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1308				  DMA_FROM_DEVICE);
1309
1310		if (dma_mapping_error(dev, da)) {
1311			__free_page(new_page);
1312			new_page = NULL;
1313		}
1314	}
1315
1316	new_buf = new_page ? page_to_virt(new_page) : NULL;
1317
1318	if (new_buf) {
1319		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1320			       DMA_FROM_DEVICE);
1321
1322		old_buf = rxbuf_oob->buf_va;
1323
1324		/* refresh the rxbuf_oob with the new page */
1325		rxbuf_oob->buf_va = new_buf;
1326		rxbuf_oob->buf_dma_addr = da;
1327		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1328	} else {
1329		old_buf = NULL; /* drop the packet if no memory */
1330	}
1331
1332	mana_rx_skb(old_buf, oob, rxq);
1333
1334drop:
1335	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1336
1337	mana_post_pkt_rxq(rxq);
1338}
1339
1340static void mana_poll_rx_cq(struct mana_cq *cq)
1341{
1342	struct gdma_comp *comp = cq->gdma_comp_buf;
1343	struct mana_rxq *rxq = cq->rxq;
1344	int comp_read, i;
1345
1346	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1347	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1348
1349	rxq->xdp_flush = false;
1350
1351	for (i = 0; i < comp_read; i++) {
1352		if (WARN_ON_ONCE(comp[i].is_sq))
1353			return;
1354
1355		/* verify recv cqe references the right rxq */
1356		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1357			return;
1358
1359		mana_process_rx_cqe(rxq, cq, &comp[i]);
1360	}
1361
1362	if (rxq->xdp_flush)
1363		xdp_do_flush();
1364}
1365
1366static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1367{
1368	struct mana_cq *cq = context;
1369	u8 arm_bit;
1370	int w;
1371
1372	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1373
1374	if (cq->type == MANA_CQ_TYPE_RX)
1375		mana_poll_rx_cq(cq);
1376	else
1377		mana_poll_tx_cq(cq);
1378
1379	w = cq->work_done;
1380
1381	if (w < cq->budget &&
1382	    napi_complete_done(&cq->napi, w)) {
1383		arm_bit = SET_ARM_BIT;
1384	} else {
1385		arm_bit = 0;
1386	}
1387
1388	mana_gd_ring_cq(gdma_queue, arm_bit);
1389
1390	return w;
1391}
1392
1393static int mana_poll(struct napi_struct *napi, int budget)
1394{
1395	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1396	int w;
1397
1398	cq->work_done = 0;
1399	cq->budget = budget;
1400
1401	w = mana_cq_handler(cq, cq->gdma_cq);
1402
1403	return min(w, budget);
1404}
1405
1406static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1407{
1408	struct mana_cq *cq = context;
1409
1410	napi_schedule_irqoff(&cq->napi);
1411}
1412
1413static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1414{
1415	struct gdma_dev *gd = apc->ac->gdma_dev;
1416
1417	if (!cq->gdma_cq)
1418		return;
1419
1420	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1421}
1422
1423static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1424{
1425	struct gdma_dev *gd = apc->ac->gdma_dev;
1426
1427	if (!txq->gdma_sq)
1428		return;
1429
1430	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1431}
1432
1433static void mana_destroy_txq(struct mana_port_context *apc)
1434{
1435	struct napi_struct *napi;
1436	int i;
1437
1438	if (!apc->tx_qp)
1439		return;
1440
1441	for (i = 0; i < apc->num_queues; i++) {
1442		napi = &apc->tx_qp[i].tx_cq.napi;
1443		napi_synchronize(napi);
1444		napi_disable(napi);
1445		netif_napi_del(napi);
1446
1447		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1448
1449		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1450
1451		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1452	}
1453
1454	kfree(apc->tx_qp);
1455	apc->tx_qp = NULL;
1456}
1457
1458static int mana_create_txq(struct mana_port_context *apc,
1459			   struct net_device *net)
1460{
1461	struct mana_context *ac = apc->ac;
1462	struct gdma_dev *gd = ac->gdma_dev;
1463	struct mana_obj_spec wq_spec;
1464	struct mana_obj_spec cq_spec;
1465	struct gdma_queue_spec spec;
1466	struct gdma_context *gc;
1467	struct mana_txq *txq;
1468	struct mana_cq *cq;
1469	u32 txq_size;
1470	u32 cq_size;
1471	int err;
1472	int i;
1473
1474	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1475			     GFP_KERNEL);
1476	if (!apc->tx_qp)
1477		return -ENOMEM;
1478
1479	/*  The minimum size of the WQE is 32 bytes, hence
1480	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1481	 *  the SQ can store. This value is then used to size other queues
1482	 *  to prevent overflow.
1483	 */
1484	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1485	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1486
1487	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1488	cq_size = PAGE_ALIGN(cq_size);
1489
1490	gc = gd->gdma_context;
1491
1492	for (i = 0; i < apc->num_queues; i++) {
1493		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1494
1495		/* Create SQ */
1496		txq = &apc->tx_qp[i].txq;
1497
1498		u64_stats_init(&txq->stats.syncp);
1499		txq->ndev = net;
1500		txq->net_txq = netdev_get_tx_queue(net, i);
1501		txq->vp_offset = apc->tx_vp_offset;
1502		skb_queue_head_init(&txq->pending_skbs);
1503
1504		memset(&spec, 0, sizeof(spec));
1505		spec.type = GDMA_SQ;
1506		spec.monitor_avl_buf = true;
1507		spec.queue_size = txq_size;
1508		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1509		if (err)
1510			goto out;
1511
1512		/* Create SQ's CQ */
1513		cq = &apc->tx_qp[i].tx_cq;
 
1514		cq->type = MANA_CQ_TYPE_TX;
1515
1516		cq->txq = txq;
1517
1518		memset(&spec, 0, sizeof(spec));
1519		spec.type = GDMA_CQ;
1520		spec.monitor_avl_buf = false;
1521		spec.queue_size = cq_size;
1522		spec.cq.callback = mana_schedule_napi;
1523		spec.cq.parent_eq = ac->eqs[i].eq;
1524		spec.cq.context = cq;
1525		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1526		if (err)
1527			goto out;
1528
1529		memset(&wq_spec, 0, sizeof(wq_spec));
1530		memset(&cq_spec, 0, sizeof(cq_spec));
1531
1532		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
1533		wq_spec.queue_size = txq->gdma_sq->queue_size;
1534
1535		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1536		cq_spec.queue_size = cq->gdma_cq->queue_size;
1537		cq_spec.modr_ctx_id = 0;
1538		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1539
1540		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1541					 &wq_spec, &cq_spec,
1542					 &apc->tx_qp[i].tx_object);
1543
1544		if (err)
1545			goto out;
1546
1547		txq->gdma_sq->id = wq_spec.queue_index;
1548		cq->gdma_cq->id = cq_spec.queue_index;
1549
1550		txq->gdma_sq->mem_info.dma_region_handle =
1551			GDMA_INVALID_DMA_REGION;
1552		cq->gdma_cq->mem_info.dma_region_handle =
1553			GDMA_INVALID_DMA_REGION;
1554
1555		txq->gdma_txq_id = txq->gdma_sq->id;
1556
1557		cq->gdma_id = cq->gdma_cq->id;
1558
1559		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1560			err = -EINVAL;
1561			goto out;
1562		}
1563
1564		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1565
1566		netif_napi_add_tx(net, &cq->napi, mana_poll);
1567		napi_enable(&cq->napi);
1568
1569		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1570	}
1571
1572	return 0;
1573out:
1574	mana_destroy_txq(apc);
1575	return err;
1576}
1577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1578static void mana_destroy_rxq(struct mana_port_context *apc,
1579			     struct mana_rxq *rxq, bool validate_state)
1580
1581{
1582	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1583	struct mana_recv_buf_oob *rx_oob;
1584	struct device *dev = gc->dev;
1585	struct napi_struct *napi;
1586	int i;
1587
1588	if (!rxq)
1589		return;
1590
1591	napi = &rxq->rx_cq.napi;
1592
1593	if (validate_state)
1594		napi_synchronize(napi);
1595
1596	napi_disable(napi);
1597
1598	xdp_rxq_info_unreg(&rxq->xdp_rxq);
1599
1600	netif_napi_del(napi);
1601
1602	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1603
1604	mana_deinit_cq(apc, &rxq->rx_cq);
1605
1606	if (rxq->xdp_save_page)
1607		__free_page(rxq->xdp_save_page);
1608
1609	for (i = 0; i < rxq->num_rx_buf; i++) {
1610		rx_oob = &rxq->rx_oobs[i];
1611
1612		if (!rx_oob->buf_va)
1613			continue;
1614
1615		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1616			       DMA_FROM_DEVICE);
1617
1618		free_page((unsigned long)rx_oob->buf_va);
1619		rx_oob->buf_va = NULL;
1620	}
1621
1622	if (rxq->gdma_rq)
1623		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1624
1625	kfree(rxq);
1626}
1627
1628#define MANA_WQE_HEADER_SIZE 16
1629#define MANA_WQE_SGE_SIZE 16
1630
1631static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1632			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1633{
1634	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1635	struct mana_recv_buf_oob *rx_oob;
1636	struct device *dev = gc->dev;
1637	struct page *page;
1638	dma_addr_t da;
1639	u32 buf_idx;
1640
1641	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1642
1643	*rxq_size = 0;
1644	*cq_size = 0;
1645
1646	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1647		rx_oob = &rxq->rx_oobs[buf_idx];
1648		memset(rx_oob, 0, sizeof(*rx_oob));
1649
1650		page = alloc_page(GFP_KERNEL);
1651		if (!page)
1652			return -ENOMEM;
1653
1654		da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1655				  DMA_FROM_DEVICE);
1656
1657		if (dma_mapping_error(dev, da)) {
1658			__free_page(page);
1659			return -ENOMEM;
1660		}
1661
1662		rx_oob->buf_va = page_to_virt(page);
1663		rx_oob->buf_dma_addr = da;
1664
1665		rx_oob->num_sge = 1;
1666		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1667		rx_oob->sgl[0].size = rxq->datasize;
1668		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1669
1670		rx_oob->wqe_req.sgl = rx_oob->sgl;
1671		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1672		rx_oob->wqe_req.inline_oob_size = 0;
1673		rx_oob->wqe_req.inline_oob_data = NULL;
1674		rx_oob->wqe_req.flags = 0;
1675		rx_oob->wqe_req.client_data_unit = 0;
1676
1677		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1678				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1679		*cq_size += COMP_ENTRY_SIZE;
1680	}
1681
1682	return 0;
1683}
1684
1685static int mana_push_wqe(struct mana_rxq *rxq)
1686{
1687	struct mana_recv_buf_oob *rx_oob;
1688	u32 buf_idx;
1689	int err;
1690
1691	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1692		rx_oob = &rxq->rx_oobs[buf_idx];
1693
1694		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1695					    &rx_oob->wqe_inf);
1696		if (err)
1697			return -ENOSPC;
1698	}
1699
1700	return 0;
1701}
1702
1703static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1704					u32 rxq_idx, struct mana_eq *eq,
1705					struct net_device *ndev)
1706{
1707	struct gdma_dev *gd = apc->ac->gdma_dev;
1708	struct mana_obj_spec wq_spec;
1709	struct mana_obj_spec cq_spec;
1710	struct gdma_queue_spec spec;
1711	struct mana_cq *cq = NULL;
1712	struct gdma_context *gc;
1713	u32 cq_size, rq_size;
1714	struct mana_rxq *rxq;
1715	int err;
1716
1717	gc = gd->gdma_context;
1718
1719	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1720		      GFP_KERNEL);
1721	if (!rxq)
1722		return NULL;
1723
1724	rxq->ndev = ndev;
1725	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1726	rxq->rxq_idx = rxq_idx;
1727	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1728	rxq->rxobj = INVALID_MANA_HANDLE;
1729
1730	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1731	if (err)
1732		goto out;
1733
1734	rq_size = PAGE_ALIGN(rq_size);
1735	cq_size = PAGE_ALIGN(cq_size);
1736
1737	/* Create RQ */
1738	memset(&spec, 0, sizeof(spec));
1739	spec.type = GDMA_RQ;
1740	spec.monitor_avl_buf = true;
1741	spec.queue_size = rq_size;
1742	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1743	if (err)
1744		goto out;
1745
1746	/* Create RQ's CQ */
1747	cq = &rxq->rx_cq;
 
1748	cq->type = MANA_CQ_TYPE_RX;
1749	cq->rxq = rxq;
1750
1751	memset(&spec, 0, sizeof(spec));
1752	spec.type = GDMA_CQ;
1753	spec.monitor_avl_buf = false;
1754	spec.queue_size = cq_size;
1755	spec.cq.callback = mana_schedule_napi;
1756	spec.cq.parent_eq = eq->eq;
1757	spec.cq.context = cq;
1758	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1759	if (err)
1760		goto out;
1761
1762	memset(&wq_spec, 0, sizeof(wq_spec));
1763	memset(&cq_spec, 0, sizeof(cq_spec));
1764	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
1765	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1766
1767	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1768	cq_spec.queue_size = cq->gdma_cq->queue_size;
1769	cq_spec.modr_ctx_id = 0;
1770	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1771
1772	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1773				 &wq_spec, &cq_spec, &rxq->rxobj);
1774	if (err)
1775		goto out;
1776
1777	rxq->gdma_rq->id = wq_spec.queue_index;
1778	cq->gdma_cq->id = cq_spec.queue_index;
1779
1780	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
1781	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
1782
1783	rxq->gdma_id = rxq->gdma_rq->id;
1784	cq->gdma_id = cq->gdma_cq->id;
1785
1786	err = mana_push_wqe(rxq);
1787	if (err)
1788		goto out;
1789
1790	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1791		err = -EINVAL;
1792		goto out;
1793	}
1794
1795	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1796
1797	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
1798
1799	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1800				 cq->napi.napi_id));
1801	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1802					   MEM_TYPE_PAGE_SHARED, NULL));
1803
1804	napi_enable(&cq->napi);
1805
1806	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1807out:
1808	if (!err)
1809		return rxq;
1810
1811	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1812
1813	mana_destroy_rxq(apc, rxq, false);
1814
1815	if (cq)
1816		mana_deinit_cq(apc, cq);
1817
1818	return NULL;
1819}
1820
1821static int mana_add_rx_queues(struct mana_port_context *apc,
1822			      struct net_device *ndev)
1823{
1824	struct mana_context *ac = apc->ac;
1825	struct mana_rxq *rxq;
1826	int err = 0;
1827	int i;
1828
1829	for (i = 0; i < apc->num_queues; i++) {
1830		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1831		if (!rxq) {
1832			err = -ENOMEM;
1833			goto out;
1834		}
1835
1836		u64_stats_init(&rxq->stats.syncp);
1837
1838		apc->rxqs[i] = rxq;
1839	}
1840
1841	apc->default_rxobj = apc->rxqs[0]->rxobj;
1842out:
1843	return err;
1844}
1845
1846static void mana_destroy_vport(struct mana_port_context *apc)
1847{
1848	struct gdma_dev *gd = apc->ac->gdma_dev;
1849	struct mana_rxq *rxq;
1850	u32 rxq_idx;
1851
1852	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1853		rxq = apc->rxqs[rxq_idx];
1854		if (!rxq)
1855			continue;
1856
1857		mana_destroy_rxq(apc, rxq, true);
1858		apc->rxqs[rxq_idx] = NULL;
1859	}
1860
1861	mana_destroy_txq(apc);
1862	mana_uncfg_vport(apc);
1863
1864	if (gd->gdma_context->is_pf)
1865		mana_pf_deregister_hw_vport(apc);
1866}
1867
1868static int mana_create_vport(struct mana_port_context *apc,
1869			     struct net_device *net)
1870{
1871	struct gdma_dev *gd = apc->ac->gdma_dev;
1872	int err;
1873
1874	apc->default_rxobj = INVALID_MANA_HANDLE;
1875
1876	if (gd->gdma_context->is_pf) {
1877		err = mana_pf_register_hw_vport(apc);
1878		if (err)
1879			return err;
1880	}
1881
1882	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1883	if (err)
1884		return err;
1885
1886	return mana_create_txq(apc, net);
1887}
1888
1889static void mana_rss_table_init(struct mana_port_context *apc)
1890{
1891	int i;
1892
1893	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1894		apc->indir_table[i] =
1895			ethtool_rxfh_indir_default(i, apc->num_queues);
1896}
1897
1898int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1899		    bool update_hash, bool update_tab)
1900{
1901	u32 queue_idx;
1902	int err;
1903	int i;
1904
1905	if (update_tab) {
1906		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1907			queue_idx = apc->indir_table[i];
1908			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1909		}
1910	}
1911
1912	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1913	if (err)
1914		return err;
1915
1916	mana_fence_rqs(apc);
1917
1918	return 0;
1919}
1920
1921static int mana_init_port(struct net_device *ndev)
1922{
1923	struct mana_port_context *apc = netdev_priv(ndev);
1924	u32 max_txq, max_rxq, max_queues;
1925	int port_idx = apc->port_idx;
1926	u32 num_indirect_entries;
1927	int err;
1928
1929	err = mana_init_port_context(apc);
1930	if (err)
1931		return err;
1932
1933	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1934				   &num_indirect_entries);
1935	if (err) {
1936		netdev_err(ndev, "Failed to query info for vPort %d\n",
1937			   port_idx);
1938		goto reset_apc;
1939	}
1940
1941	max_queues = min_t(u32, max_txq, max_rxq);
1942	if (apc->max_queues > max_queues)
1943		apc->max_queues = max_queues;
1944
1945	if (apc->num_queues > apc->max_queues)
1946		apc->num_queues = apc->max_queues;
1947
1948	eth_hw_addr_set(ndev, apc->mac_addr);
1949
1950	return 0;
1951
1952reset_apc:
1953	kfree(apc->rxqs);
1954	apc->rxqs = NULL;
1955	return err;
1956}
1957
1958int mana_alloc_queues(struct net_device *ndev)
1959{
1960	struct mana_port_context *apc = netdev_priv(ndev);
1961	struct gdma_dev *gd = apc->ac->gdma_dev;
1962	int err;
1963
1964	err = mana_create_vport(apc, ndev);
1965	if (err)
1966		return err;
1967
 
 
 
 
1968	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1969	if (err)
1970		goto destroy_vport;
1971
1972	err = mana_add_rx_queues(apc, ndev);
1973	if (err)
1974		goto destroy_vport;
1975
1976	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1977
1978	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1979	if (err)
1980		goto destroy_vport;
1981
1982	mana_rss_table_init(apc);
1983
1984	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1985	if (err)
1986		goto destroy_vport;
1987
1988	if (gd->gdma_context->is_pf) {
1989		err = mana_pf_register_filter(apc);
1990		if (err)
1991			goto destroy_vport;
1992	}
1993
1994	mana_chn_setxdp(apc, mana_xdp_get(apc));
1995
1996	return 0;
1997
1998destroy_vport:
1999	mana_destroy_vport(apc);
 
 
2000	return err;
2001}
2002
2003int mana_attach(struct net_device *ndev)
2004{
2005	struct mana_port_context *apc = netdev_priv(ndev);
2006	int err;
2007
2008	ASSERT_RTNL();
2009
2010	err = mana_init_port(ndev);
2011	if (err)
2012		return err;
2013
2014	if (apc->port_st_save) {
2015		err = mana_alloc_queues(ndev);
2016		if (err) {
2017			mana_cleanup_port_context(apc);
2018			return err;
2019		}
2020	}
2021
 
 
2022	apc->port_is_up = apc->port_st_save;
2023
2024	/* Ensure port state updated before txq state */
2025	smp_wmb();
2026
2027	if (apc->port_is_up)
2028		netif_carrier_on(ndev);
2029
2030	netif_device_attach(ndev);
2031
2032	return 0;
2033}
2034
2035static int mana_dealloc_queues(struct net_device *ndev)
2036{
2037	struct mana_port_context *apc = netdev_priv(ndev);
2038	struct gdma_dev *gd = apc->ac->gdma_dev;
2039	struct mana_txq *txq;
2040	int i, err;
2041
2042	if (apc->port_is_up)
2043		return -EINVAL;
2044
2045	mana_chn_setxdp(apc, NULL);
2046
2047	if (gd->gdma_context->is_pf)
2048		mana_pf_deregister_filter(apc);
2049
2050	/* No packet can be transmitted now since apc->port_is_up is false.
2051	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2052	 * a txq because it may not timely see apc->port_is_up being cleared
2053	 * to false, but it doesn't matter since mana_start_xmit() drops any
2054	 * new packets due to apc->port_is_up being false.
2055	 *
2056	 * Drain all the in-flight TX packets
2057	 */
2058	for (i = 0; i < apc->num_queues; i++) {
2059		txq = &apc->tx_qp[i].txq;
2060
2061		while (atomic_read(&txq->pending_sends) > 0)
2062			usleep_range(1000, 2000);
2063	}
2064
2065	/* We're 100% sure the queues can no longer be woken up, because
2066	 * we're sure now mana_poll_tx_cq() can't be running.
2067	 */
2068
2069	apc->rss_state = TRI_STATE_FALSE;
2070	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2071	if (err) {
2072		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2073		return err;
2074	}
2075
 
 
 
2076	mana_destroy_vport(apc);
2077
 
 
2078	return 0;
2079}
2080
2081int mana_detach(struct net_device *ndev, bool from_close)
2082{
2083	struct mana_port_context *apc = netdev_priv(ndev);
2084	int err;
2085
2086	ASSERT_RTNL();
2087
2088	apc->port_st_save = apc->port_is_up;
2089	apc->port_is_up = false;
2090
2091	/* Ensure port state updated before txq state */
2092	smp_wmb();
2093
2094	netif_tx_disable(ndev);
2095	netif_carrier_off(ndev);
2096
2097	if (apc->port_st_save) {
2098		err = mana_dealloc_queues(ndev);
2099		if (err)
2100			return err;
2101	}
2102
2103	if (!from_close) {
2104		netif_device_detach(ndev);
2105		mana_cleanup_port_context(apc);
2106	}
2107
2108	return 0;
2109}
2110
2111static int mana_probe_port(struct mana_context *ac, int port_idx,
2112			   struct net_device **ndev_storage)
2113{
2114	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2115	struct mana_port_context *apc;
2116	struct net_device *ndev;
2117	int err;
2118
2119	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2120				 gc->max_num_queues);
2121	if (!ndev)
2122		return -ENOMEM;
2123
2124	*ndev_storage = ndev;
2125
2126	apc = netdev_priv(ndev);
2127	apc->ac = ac;
2128	apc->ndev = ndev;
2129	apc->max_queues = gc->max_num_queues;
2130	apc->num_queues = gc->max_num_queues;
2131	apc->port_handle = INVALID_MANA_HANDLE;
2132	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2133	apc->port_idx = port_idx;
2134
2135	mutex_init(&apc->vport_mutex);
2136	apc->vport_use_count = 0;
2137
2138	ndev->netdev_ops = &mana_devops;
2139	ndev->ethtool_ops = &mana_ethtool_ops;
2140	ndev->mtu = ETH_DATA_LEN;
2141	ndev->max_mtu = ndev->mtu;
2142	ndev->min_mtu = ndev->mtu;
2143	ndev->needed_headroom = MANA_HEADROOM;
2144	ndev->dev_port = port_idx;
2145	SET_NETDEV_DEV(ndev, gc->dev);
2146
2147	netif_carrier_off(ndev);
2148
2149	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2150
2151	err = mana_init_port(ndev);
2152	if (err)
2153		goto free_net;
2154
2155	netdev_lockdep_set_classes(ndev);
2156
2157	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2158	ndev->hw_features |= NETIF_F_RXCSUM;
2159	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2160	ndev->hw_features |= NETIF_F_RXHASH;
2161	ndev->features = ndev->hw_features;
2162	ndev->vlan_features = 0;
2163
2164	err = register_netdev(ndev);
2165	if (err) {
2166		netdev_err(ndev, "Unable to register netdev.\n");
2167		goto reset_apc;
2168	}
2169
2170	return 0;
2171
2172reset_apc:
2173	kfree(apc->rxqs);
2174	apc->rxqs = NULL;
2175free_net:
2176	*ndev_storage = NULL;
2177	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2178	free_netdev(ndev);
2179	return err;
2180}
2181
2182static void adev_release(struct device *dev)
2183{
2184	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2185
2186	kfree(madev);
2187}
2188
2189static void remove_adev(struct gdma_dev *gd)
2190{
2191	struct auxiliary_device *adev = gd->adev;
2192	int id = adev->id;
2193
2194	auxiliary_device_delete(adev);
2195	auxiliary_device_uninit(adev);
2196
2197	mana_adev_idx_free(id);
2198	gd->adev = NULL;
2199}
2200
2201static int add_adev(struct gdma_dev *gd)
2202{
2203	struct auxiliary_device *adev;
2204	struct mana_adev *madev;
2205	int ret;
2206
2207	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2208	if (!madev)
2209		return -ENOMEM;
2210
2211	adev = &madev->adev;
2212	ret = mana_adev_idx_alloc();
2213	if (ret < 0)
2214		goto idx_fail;
2215	adev->id = ret;
2216
2217	adev->name = "rdma";
2218	adev->dev.parent = gd->gdma_context->dev;
2219	adev->dev.release = adev_release;
2220	madev->mdev = gd;
2221
2222	ret = auxiliary_device_init(adev);
2223	if (ret)
2224		goto init_fail;
2225
2226	ret = auxiliary_device_add(adev);
2227	if (ret)
2228		goto add_fail;
2229
2230	gd->adev = adev;
2231	return 0;
2232
2233add_fail:
2234	auxiliary_device_uninit(adev);
2235
2236init_fail:
2237	mana_adev_idx_free(adev->id);
2238
2239idx_fail:
2240	kfree(madev);
2241
2242	return ret;
2243}
2244
2245int mana_probe(struct gdma_dev *gd, bool resuming)
2246{
2247	struct gdma_context *gc = gd->gdma_context;
2248	struct mana_context *ac = gd->driver_data;
2249	struct device *dev = gc->dev;
2250	u16 num_ports = 0;
2251	int err;
2252	int i;
2253
2254	dev_info(dev,
2255		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2256		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2257
2258	err = mana_gd_register_device(gd);
2259	if (err)
2260		return err;
2261
2262	if (!resuming) {
2263		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2264		if (!ac)
2265			return -ENOMEM;
2266
2267		ac->gdma_dev = gd;
2268		gd->driver_data = ac;
2269	}
2270
2271	err = mana_create_eq(ac);
2272	if (err)
2273		goto out;
2274
2275	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2276				    MANA_MICRO_VERSION, &num_ports);
2277	if (err)
2278		goto out;
2279
2280	if (!resuming) {
2281		ac->num_ports = num_ports;
2282	} else {
2283		if (ac->num_ports != num_ports) {
2284			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2285				ac->num_ports, num_ports);
2286			err = -EPROTO;
2287			goto out;
2288		}
2289	}
2290
2291	if (ac->num_ports == 0)
2292		dev_err(dev, "Failed to detect any vPort\n");
2293
2294	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2295		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2296
2297	if (!resuming) {
2298		for (i = 0; i < ac->num_ports; i++) {
2299			err = mana_probe_port(ac, i, &ac->ports[i]);
2300			if (err)
2301				break;
2302		}
2303	} else {
2304		for (i = 0; i < ac->num_ports; i++) {
2305			rtnl_lock();
2306			err = mana_attach(ac->ports[i]);
2307			rtnl_unlock();
2308			if (err)
2309				break;
2310		}
2311	}
2312
2313	err = add_adev(gd);
2314out:
2315	if (err)
2316		mana_remove(gd, false);
2317
2318	return err;
2319}
2320
2321void mana_remove(struct gdma_dev *gd, bool suspending)
2322{
2323	struct gdma_context *gc = gd->gdma_context;
2324	struct mana_context *ac = gd->driver_data;
2325	struct device *dev = gc->dev;
2326	struct net_device *ndev;
2327	int err;
2328	int i;
2329
2330	/* adev currently doesn't support suspending, always remove it */
2331	if (gd->adev)
2332		remove_adev(gd);
2333
2334	for (i = 0; i < ac->num_ports; i++) {
2335		ndev = ac->ports[i];
2336		if (!ndev) {
2337			if (i == 0)
2338				dev_err(dev, "No net device to remove\n");
2339			goto out;
2340		}
2341
2342		/* All cleanup actions should stay after rtnl_lock(), otherwise
2343		 * other functions may access partially cleaned up data.
2344		 */
2345		rtnl_lock();
2346
2347		err = mana_detach(ndev, false);
2348		if (err)
2349			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2350				   i, err);
2351
2352		if (suspending) {
2353			/* No need to unregister the ndev. */
2354			rtnl_unlock();
2355			continue;
2356		}
2357
2358		unregister_netdevice(ndev);
2359
2360		rtnl_unlock();
2361
2362		free_netdev(ndev);
2363	}
2364
2365	mana_destroy_eq(ac);
2366out:
2367	mana_gd_deregister_device(gd);
2368
2369	if (suspending)
2370		return;
2371
2372	gd->driver_data = NULL;
2373	gd->gdma_context = NULL;
2374	kfree(ac);
2375}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright (c) 2021, Microsoft Corporation. */
   3
 
 
   4#include <linux/inetdevice.h>
   5#include <linux/etherdevice.h>
   6#include <linux/ethtool.h>
 
   7#include <linux/mm.h>
   8
   9#include <net/checksum.h>
  10#include <net/ip6_checksum.h>
  11
  12#include "mana.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
  13
  14/* Microsoft Azure Network Adapter (MANA) functions */
  15
  16static int mana_open(struct net_device *ndev)
  17{
  18	struct mana_port_context *apc = netdev_priv(ndev);
  19	int err;
  20
  21	err = mana_alloc_queues(ndev);
  22	if (err)
  23		return err;
  24
  25	apc->port_is_up = true;
  26
  27	/* Ensure port state updated before txq state */
  28	smp_wmb();
  29
  30	netif_carrier_on(ndev);
  31	netif_tx_wake_all_queues(ndev);
  32
  33	return 0;
  34}
  35
  36static int mana_close(struct net_device *ndev)
  37{
  38	struct mana_port_context *apc = netdev_priv(ndev);
  39
  40	if (!apc->port_is_up)
  41		return 0;
  42
  43	return mana_detach(ndev, true);
  44}
  45
  46static bool mana_can_tx(struct gdma_queue *wq)
  47{
  48	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
  49}
  50
  51static unsigned int mana_checksum_info(struct sk_buff *skb)
  52{
  53	if (skb->protocol == htons(ETH_P_IP)) {
  54		struct iphdr *ip = ip_hdr(skb);
  55
  56		if (ip->protocol == IPPROTO_TCP)
  57			return IPPROTO_TCP;
  58
  59		if (ip->protocol == IPPROTO_UDP)
  60			return IPPROTO_UDP;
  61	} else if (skb->protocol == htons(ETH_P_IPV6)) {
  62		struct ipv6hdr *ip6 = ipv6_hdr(skb);
  63
  64		if (ip6->nexthdr == IPPROTO_TCP)
  65			return IPPROTO_TCP;
  66
  67		if (ip6->nexthdr == IPPROTO_UDP)
  68			return IPPROTO_UDP;
  69	}
  70
  71	/* No csum offloading */
  72	return 0;
  73}
  74
  75static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
  76			struct mana_tx_package *tp)
  77{
  78	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
  79	struct gdma_dev *gd = apc->ac->gdma_dev;
  80	struct gdma_context *gc;
  81	struct device *dev;
  82	skb_frag_t *frag;
  83	dma_addr_t da;
  84	int i;
  85
  86	gc = gd->gdma_context;
  87	dev = gc->dev;
  88	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
  89
  90	if (dma_mapping_error(dev, da))
  91		return -ENOMEM;
  92
  93	ash->dma_handle[0] = da;
  94	ash->size[0] = skb_headlen(skb);
  95
  96	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
  97	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
  98	tp->wqe_req.sgl[0].size = ash->size[0];
  99
 100	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 101		frag = &skb_shinfo(skb)->frags[i];
 102		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
 103				      DMA_TO_DEVICE);
 104
 105		if (dma_mapping_error(dev, da))
 106			goto frag_err;
 107
 108		ash->dma_handle[i + 1] = da;
 109		ash->size[i + 1] = skb_frag_size(frag);
 110
 111		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
 112		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
 113		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
 114	}
 115
 116	return 0;
 117
 118frag_err:
 119	for (i = i - 1; i >= 0; i--)
 120		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
 121			       DMA_TO_DEVICE);
 122
 123	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
 124
 125	return -ENOMEM;
 126}
 127
 128static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 129{
 130	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
 131	struct mana_port_context *apc = netdev_priv(ndev);
 132	u16 txq_idx = skb_get_queue_mapping(skb);
 133	struct gdma_dev *gd = apc->ac->gdma_dev;
 134	bool ipv4 = false, ipv6 = false;
 135	struct mana_tx_package pkg = {};
 136	struct netdev_queue *net_txq;
 137	struct mana_stats *tx_stats;
 138	struct gdma_queue *gdma_sq;
 139	unsigned int csum_type;
 140	struct mana_txq *txq;
 141	struct mana_cq *cq;
 142	int err, len;
 143
 144	if (unlikely(!apc->port_is_up))
 145		goto tx_drop;
 146
 147	if (skb_cow_head(skb, MANA_HEADROOM))
 148		goto tx_drop_count;
 149
 150	txq = &apc->tx_qp[txq_idx].txq;
 151	gdma_sq = txq->gdma_sq;
 152	cq = &apc->tx_qp[txq_idx].tx_cq;
 153
 154	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
 155	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
 156
 157	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
 158		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
 159		pkt_fmt = MANA_LONG_PKT_FMT;
 160	} else {
 161		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
 162	}
 163
 164	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
 165
 166	if (pkt_fmt == MANA_SHORT_PKT_FMT)
 167		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
 168	else
 169		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
 170
 171	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
 172	pkg.wqe_req.flags = 0;
 173	pkg.wqe_req.client_data_unit = 0;
 174
 175	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
 176	WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
 177
 178	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
 179		pkg.wqe_req.sgl = pkg.sgl_array;
 180	} else {
 181		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
 182					    sizeof(struct gdma_sge),
 183					    GFP_ATOMIC);
 184		if (!pkg.sgl_ptr)
 185			goto tx_drop_count;
 186
 187		pkg.wqe_req.sgl = pkg.sgl_ptr;
 188	}
 189
 190	if (skb->protocol == htons(ETH_P_IP))
 191		ipv4 = true;
 192	else if (skb->protocol == htons(ETH_P_IPV6))
 193		ipv6 = true;
 194
 195	if (skb_is_gso(skb)) {
 196		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 197		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 198
 199		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
 200		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 201		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 202
 203		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
 204		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
 205		if (ipv4) {
 206			ip_hdr(skb)->tot_len = 0;
 207			ip_hdr(skb)->check = 0;
 208			tcp_hdr(skb)->check =
 209				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 210						   ip_hdr(skb)->daddr, 0,
 211						   IPPROTO_TCP, 0);
 212		} else {
 213			ipv6_hdr(skb)->payload_len = 0;
 214			tcp_hdr(skb)->check =
 215				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 216						 &ipv6_hdr(skb)->daddr, 0,
 217						 IPPROTO_TCP, 0);
 218		}
 219	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 220		csum_type = mana_checksum_info(skb);
 221
 222		if (csum_type == IPPROTO_TCP) {
 223			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 224			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 225
 226			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
 227			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
 228
 229		} else if (csum_type == IPPROTO_UDP) {
 230			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
 231			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
 232
 233			pkg.tx_oob.s_oob.comp_udp_csum = 1;
 234		} else {
 235			/* Can't do offload of this type of checksum */
 236			if (skb_checksum_help(skb))
 237				goto free_sgl_ptr;
 238		}
 239	}
 240
 241	if (mana_map_skb(skb, apc, &pkg))
 242		goto free_sgl_ptr;
 243
 244	skb_queue_tail(&txq->pending_skbs, skb);
 245
 246	len = skb->len;
 247	net_txq = netdev_get_tx_queue(ndev, txq_idx);
 248
 249	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
 250					(struct gdma_posted_wqe_info *)skb->cb);
 251	if (!mana_can_tx(gdma_sq)) {
 252		netif_tx_stop_queue(net_txq);
 253		apc->eth_stats.stop_queue++;
 254	}
 255
 256	if (err) {
 257		(void)skb_dequeue_tail(&txq->pending_skbs);
 258		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
 259		err = NETDEV_TX_BUSY;
 260		goto tx_busy;
 261	}
 262
 263	err = NETDEV_TX_OK;
 264	atomic_inc(&txq->pending_sends);
 265
 266	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
 267
 268	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
 269	skb = NULL;
 270
 271	tx_stats = &txq->stats;
 272	u64_stats_update_begin(&tx_stats->syncp);
 273	tx_stats->packets++;
 274	tx_stats->bytes += len;
 275	u64_stats_update_end(&tx_stats->syncp);
 276
 277tx_busy:
 278	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
 279		netif_tx_wake_queue(net_txq);
 280		apc->eth_stats.wake_queue++;
 281	}
 282
 283	kfree(pkg.sgl_ptr);
 284	return err;
 285
 286free_sgl_ptr:
 287	kfree(pkg.sgl_ptr);
 288tx_drop_count:
 289	ndev->stats.tx_dropped++;
 290tx_drop:
 291	dev_kfree_skb_any(skb);
 292	return NETDEV_TX_OK;
 293}
 294
 295static void mana_get_stats64(struct net_device *ndev,
 296			     struct rtnl_link_stats64 *st)
 297{
 298	struct mana_port_context *apc = netdev_priv(ndev);
 299	unsigned int num_queues = apc->num_queues;
 300	struct mana_stats *stats;
 
 301	unsigned int start;
 302	u64 packets, bytes;
 303	int q;
 304
 305	if (!apc->port_is_up)
 306		return;
 307
 308	netdev_stats_to_stats64(st, &ndev->stats);
 309
 310	for (q = 0; q < num_queues; q++) {
 311		stats = &apc->rxqs[q]->stats;
 312
 313		do {
 314			start = u64_stats_fetch_begin_irq(&stats->syncp);
 315			packets = stats->packets;
 316			bytes = stats->bytes;
 317		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
 318
 319		st->rx_packets += packets;
 320		st->rx_bytes += bytes;
 321	}
 322
 323	for (q = 0; q < num_queues; q++) {
 324		stats = &apc->tx_qp[q].txq.stats;
 325
 326		do {
 327			start = u64_stats_fetch_begin_irq(&stats->syncp);
 328			packets = stats->packets;
 329			bytes = stats->bytes;
 330		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
 331
 332		st->tx_packets += packets;
 333		st->tx_bytes += bytes;
 334	}
 335}
 336
 337static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
 338			     int old_q)
 339{
 340	struct mana_port_context *apc = netdev_priv(ndev);
 341	u32 hash = skb_get_hash(skb);
 342	struct sock *sk = skb->sk;
 343	int txq;
 344
 345	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
 346
 347	if (txq != old_q && sk && sk_fullsock(sk) &&
 348	    rcu_access_pointer(sk->sk_dst_cache))
 349		sk_tx_queue_set(sk, txq);
 350
 351	return txq;
 352}
 353
 354static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
 355			     struct net_device *sb_dev)
 356{
 357	int txq;
 358
 359	if (ndev->real_num_tx_queues == 1)
 360		return 0;
 361
 362	txq = sk_tx_queue_get(skb->sk);
 363
 364	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
 365		if (skb_rx_queue_recorded(skb))
 366			txq = skb_get_rx_queue(skb);
 367		else
 368			txq = mana_get_tx_queue(ndev, skb, txq);
 369	}
 370
 371	return txq;
 372}
 373
 374static const struct net_device_ops mana_devops = {
 375	.ndo_open		= mana_open,
 376	.ndo_stop		= mana_close,
 377	.ndo_select_queue	= mana_select_queue,
 378	.ndo_start_xmit		= mana_start_xmit,
 379	.ndo_validate_addr	= eth_validate_addr,
 380	.ndo_get_stats64	= mana_get_stats64,
 
 
 381};
 382
 383static void mana_cleanup_port_context(struct mana_port_context *apc)
 384{
 385	kfree(apc->rxqs);
 386	apc->rxqs = NULL;
 387}
 388
 389static int mana_init_port_context(struct mana_port_context *apc)
 390{
 391	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
 392			    GFP_KERNEL);
 393
 394	return !apc->rxqs ? -ENOMEM : 0;
 395}
 396
 397static int mana_send_request(struct mana_context *ac, void *in_buf,
 398			     u32 in_len, void *out_buf, u32 out_len)
 399{
 400	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 401	struct gdma_resp_hdr *resp = out_buf;
 402	struct gdma_req_hdr *req = in_buf;
 403	struct device *dev = gc->dev;
 404	static atomic_t activity_id;
 405	int err;
 406
 407	req->dev_id = gc->mana.dev_id;
 408	req->activity_id = atomic_inc_return(&activity_id);
 409
 410	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
 411				   out_buf);
 412	if (err || resp->status) {
 413		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
 414			err, resp->status);
 415		return err ? err : -EPROTO;
 416	}
 417
 418	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
 419	    req->activity_id != resp->activity_id) {
 420		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
 421			req->dev_id.as_uint32, resp->dev_id.as_uint32,
 422			req->activity_id, resp->activity_id);
 423		return -EPROTO;
 424	}
 425
 426	return 0;
 427}
 428
 429static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
 430				const enum mana_command_code expected_code,
 431				const u32 min_size)
 432{
 433	if (resp_hdr->response.msg_type != expected_code)
 434		return -EPROTO;
 435
 436	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
 437		return -EPROTO;
 438
 439	if (resp_hdr->response.msg_size < min_size)
 440		return -EPROTO;
 441
 442	return 0;
 443}
 444
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 445static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
 446				 u32 proto_minor_ver, u32 proto_micro_ver,
 447				 u16 *max_num_vports)
 448{
 449	struct gdma_context *gc = ac->gdma_dev->gdma_context;
 450	struct mana_query_device_cfg_resp resp = {};
 451	struct mana_query_device_cfg_req req = {};
 452	struct device *dev = gc->dev;
 453	int err = 0;
 454
 455	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
 456			     sizeof(req), sizeof(resp));
 457	req.proto_major_ver = proto_major_ver;
 458	req.proto_minor_ver = proto_minor_ver;
 459	req.proto_micro_ver = proto_micro_ver;
 460
 461	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
 462	if (err) {
 463		dev_err(dev, "Failed to query config: %d", err);
 464		return err;
 465	}
 466
 467	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
 468				   sizeof(resp));
 469	if (err || resp.hdr.status) {
 470		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
 471			resp.hdr.status);
 472		if (!err)
 473			err = -EPROTO;
 474		return err;
 475	}
 476
 477	*max_num_vports = resp.max_num_vports;
 478
 479	return 0;
 480}
 481
 482static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
 483				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
 484{
 485	struct mana_query_vport_cfg_resp resp = {};
 486	struct mana_query_vport_cfg_req req = {};
 487	int err;
 488
 489	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
 490			     sizeof(req), sizeof(resp));
 491
 492	req.vport_index = vport_index;
 493
 494	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 495				sizeof(resp));
 496	if (err)
 497		return err;
 498
 499	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
 500				   sizeof(resp));
 501	if (err)
 502		return err;
 503
 504	if (resp.hdr.status)
 505		return -EPROTO;
 506
 507	*max_sq = resp.max_num_sq;
 508	*max_rq = resp.max_num_rq;
 509	*num_indir_entry = resp.num_indirection_ent;
 510
 511	apc->port_handle = resp.vport;
 512	ether_addr_copy(apc->mac_addr, resp.mac_addr);
 513
 514	return 0;
 515}
 516
 517static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
 518			  u32 doorbell_pg_id)
 
 
 
 
 
 
 
 
 
 519{
 520	struct mana_config_vport_resp resp = {};
 521	struct mana_config_vport_req req = {};
 522	int err;
 523
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
 525			     sizeof(req), sizeof(resp));
 526	req.vport = apc->port_handle;
 527	req.pdid = protection_dom_id;
 528	req.doorbell_pageid = doorbell_pg_id;
 529
 530	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 531				sizeof(resp));
 532	if (err) {
 533		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
 534		goto out;
 535	}
 536
 537	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
 538				   sizeof(resp));
 539	if (err || resp.hdr.status) {
 540		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
 541			   err, resp.hdr.status);
 542		if (!err)
 543			err = -EPROTO;
 544
 545		goto out;
 546	}
 547
 548	apc->tx_shortform_allowed = resp.short_form_allowed;
 549	apc->tx_vp_offset = resp.tx_vport_offset;
 
 
 
 550out:
 
 
 
 551	return err;
 552}
 
 553
 554static int mana_cfg_vport_steering(struct mana_port_context *apc,
 555				   enum TRI_STATE rx,
 556				   bool update_default_rxobj, bool update_key,
 557				   bool update_tab)
 558{
 559	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
 560	struct mana_cfg_rx_steer_req *req = NULL;
 561	struct mana_cfg_rx_steer_resp resp = {};
 562	struct net_device *ndev = apc->ndev;
 563	mana_handle_t *req_indir_tab;
 564	u32 req_buf_size;
 565	int err;
 566
 567	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
 568	req = kzalloc(req_buf_size, GFP_KERNEL);
 569	if (!req)
 570		return -ENOMEM;
 571
 572	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
 573			     sizeof(resp));
 574
 575	req->vport = apc->port_handle;
 576	req->num_indir_entries = num_entries;
 577	req->indir_tab_offset = sizeof(*req);
 578	req->rx_enable = rx;
 579	req->rss_enable = apc->rss_state;
 580	req->update_default_rxobj = update_default_rxobj;
 581	req->update_hashkey = update_key;
 582	req->update_indir_tab = update_tab;
 583	req->default_rxobj = apc->default_rxobj;
 584
 585	if (update_key)
 586		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
 587
 588	if (update_tab) {
 589		req_indir_tab = (mana_handle_t *)(req + 1);
 590		memcpy(req_indir_tab, apc->rxobj_table,
 591		       req->num_indir_entries * sizeof(mana_handle_t));
 592	}
 593
 594	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
 595				sizeof(resp));
 596	if (err) {
 597		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
 598		goto out;
 599	}
 600
 601	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
 602				   sizeof(resp));
 603	if (err) {
 604		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
 605		goto out;
 606	}
 607
 608	if (resp.hdr.status) {
 609		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
 610			   resp.hdr.status);
 611		err = -EPROTO;
 612	}
 
 
 
 613out:
 614	kfree(req);
 615	return err;
 616}
 617
 618static int mana_create_wq_obj(struct mana_port_context *apc,
 619			      mana_handle_t vport,
 620			      u32 wq_type, struct mana_obj_spec *wq_spec,
 621			      struct mana_obj_spec *cq_spec,
 622			      mana_handle_t *wq_obj)
 623{
 624	struct mana_create_wqobj_resp resp = {};
 625	struct mana_create_wqobj_req req = {};
 626	struct net_device *ndev = apc->ndev;
 627	int err;
 628
 629	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
 630			     sizeof(req), sizeof(resp));
 631	req.vport = vport;
 632	req.wq_type = wq_type;
 633	req.wq_gdma_region = wq_spec->gdma_region;
 634	req.cq_gdma_region = cq_spec->gdma_region;
 635	req.wq_size = wq_spec->queue_size;
 636	req.cq_size = cq_spec->queue_size;
 637	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
 638	req.cq_parent_qid = cq_spec->attached_eq;
 639
 640	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 641				sizeof(resp));
 642	if (err) {
 643		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
 644		goto out;
 645	}
 646
 647	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
 648				   sizeof(resp));
 649	if (err || resp.hdr.status) {
 650		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
 651			   resp.hdr.status);
 652		if (!err)
 653			err = -EPROTO;
 654		goto out;
 655	}
 656
 657	if (resp.wq_obj == INVALID_MANA_HANDLE) {
 658		netdev_err(ndev, "Got an invalid WQ object handle\n");
 659		err = -EPROTO;
 660		goto out;
 661	}
 662
 663	*wq_obj = resp.wq_obj;
 664	wq_spec->queue_index = resp.wq_id;
 665	cq_spec->queue_index = resp.cq_id;
 666
 667	return 0;
 668out:
 669	return err;
 670}
 
 671
 672static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
 673				mana_handle_t wq_obj)
 674{
 675	struct mana_destroy_wqobj_resp resp = {};
 676	struct mana_destroy_wqobj_req req = {};
 677	struct net_device *ndev = apc->ndev;
 678	int err;
 679
 680	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
 681			     sizeof(req), sizeof(resp));
 682	req.wq_type = wq_type;
 683	req.wq_obj_handle = wq_obj;
 684
 685	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
 686				sizeof(resp));
 687	if (err) {
 688		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
 689		return;
 690	}
 691
 692	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
 693				   sizeof(resp));
 694	if (err || resp.hdr.status)
 695		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
 696			   resp.hdr.status);
 697}
 
 698
 699static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
 700{
 701	int i;
 702
 703	for (i = 0; i < CQE_POLLING_BUFFER; i++)
 704		memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
 705}
 706
 707static void mana_destroy_eq(struct gdma_context *gc,
 708			    struct mana_port_context *apc)
 709{
 
 710	struct gdma_queue *eq;
 711	int i;
 712
 713	if (!apc->eqs)
 714		return;
 715
 716	for (i = 0; i < apc->num_queues; i++) {
 717		eq = apc->eqs[i].eq;
 718		if (!eq)
 719			continue;
 720
 721		mana_gd_destroy_queue(gc, eq);
 722	}
 723
 724	kfree(apc->eqs);
 725	apc->eqs = NULL;
 726}
 727
 728static int mana_create_eq(struct mana_port_context *apc)
 729{
 730	struct gdma_dev *gd = apc->ac->gdma_dev;
 
 731	struct gdma_queue_spec spec = {};
 732	int err;
 733	int i;
 734
 735	apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq),
 736			   GFP_KERNEL);
 737	if (!apc->eqs)
 738		return -ENOMEM;
 739
 740	spec.type = GDMA_EQ;
 741	spec.monitor_avl_buf = false;
 742	spec.queue_size = EQ_SIZE;
 743	spec.eq.callback = NULL;
 744	spec.eq.context = apc->eqs;
 745	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
 746	spec.eq.ndev = apc->ndev;
 747
 748	for (i = 0; i < apc->num_queues; i++) {
 749		mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
 750
 751		err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
 
 752		if (err)
 753			goto out;
 754	}
 755
 756	return 0;
 757out:
 758	mana_destroy_eq(gd->gdma_context, apc);
 759	return err;
 760}
 761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
 763{
 764	u32 used_space_old;
 765	u32 used_space_new;
 766
 767	used_space_old = wq->head - wq->tail;
 768	used_space_new = wq->head - (wq->tail + num_units);
 769
 770	if (WARN_ON_ONCE(used_space_new > used_space_old))
 771		return -ERANGE;
 772
 773	wq->tail += num_units;
 774	return 0;
 775}
 776
 777static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
 778{
 779	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
 780	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
 781	struct device *dev = gc->dev;
 782	int i;
 783
 784	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
 785
 786	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
 787		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
 788			       DMA_TO_DEVICE);
 789}
 790
 791static void mana_poll_tx_cq(struct mana_cq *cq)
 792{
 793	struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent;
 794	struct gdma_comp *completions = cq->gdma_comp_buf;
 795	struct gdma_posted_wqe_info *wqe_info;
 796	unsigned int pkt_transmitted = 0;
 797	unsigned int wqe_unit_cnt = 0;
 798	struct mana_txq *txq = cq->txq;
 799	struct mana_port_context *apc;
 800	struct netdev_queue *net_txq;
 801	struct gdma_queue *gdma_wq;
 802	unsigned int avail_space;
 803	struct net_device *ndev;
 804	struct sk_buff *skb;
 805	bool txq_stopped;
 806	int comp_read;
 807	int i;
 808
 809	ndev = txq->ndev;
 810	apc = netdev_priv(ndev);
 811
 812	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
 813				    CQE_POLLING_BUFFER);
 814
 
 
 
 815	for (i = 0; i < comp_read; i++) {
 816		struct mana_tx_comp_oob *cqe_oob;
 817
 818		if (WARN_ON_ONCE(!completions[i].is_sq))
 819			return;
 820
 821		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
 822		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
 823				 MANA_CQE_COMPLETION))
 824			return;
 825
 826		switch (cqe_oob->cqe_hdr.cqe_type) {
 827		case CQE_TX_OKAY:
 828			break;
 829
 830		case CQE_TX_SA_DROP:
 831		case CQE_TX_MTU_DROP:
 832		case CQE_TX_INVALID_OOB:
 833		case CQE_TX_INVALID_ETH_TYPE:
 834		case CQE_TX_HDR_PROCESSING_ERROR:
 835		case CQE_TX_VF_DISABLED:
 836		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
 837		case CQE_TX_VPORT_DISABLED:
 838		case CQE_TX_VLAN_TAGGING_VIOLATION:
 839			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
 840				  cqe_oob->cqe_hdr.cqe_type);
 841			break;
 842
 843		default:
 844			/* If the CQE type is unexpected, log an error, assert,
 845			 * and go through the error path.
 846			 */
 847			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
 848				  cqe_oob->cqe_hdr.cqe_type);
 849			return;
 850		}
 851
 852		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
 853			return;
 854
 855		skb = skb_dequeue(&txq->pending_skbs);
 856		if (WARN_ON_ONCE(!skb))
 857			return;
 858
 859		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
 860		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
 861
 862		mana_unmap_skb(skb, apc);
 863
 864		napi_consume_skb(skb, gdma_eq->eq.budget);
 865
 866		pkt_transmitted++;
 867	}
 868
 869	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
 870		return;
 871
 872	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
 873
 874	gdma_wq = txq->gdma_sq;
 875	avail_space = mana_gd_wq_avail_space(gdma_wq);
 876
 877	/* Ensure tail updated before checking q stop */
 878	smp_mb();
 879
 880	net_txq = txq->net_txq;
 881	txq_stopped = netif_tx_queue_stopped(net_txq);
 882
 883	/* Ensure checking txq_stopped before apc->port_is_up. */
 884	smp_rmb();
 885
 886	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
 887		netif_tx_wake_queue(net_txq);
 888		apc->eth_stats.wake_queue++;
 889	}
 890
 891	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
 892		WARN_ON_ONCE(1);
 
 
 893}
 894
 895static void mana_post_pkt_rxq(struct mana_rxq *rxq)
 896{
 897	struct mana_recv_buf_oob *recv_buf_oob;
 898	u32 curr_index;
 899	int err;
 900
 901	curr_index = rxq->buf_index++;
 902	if (rxq->buf_index == rxq->num_rx_buf)
 903		rxq->buf_index = 0;
 904
 905	recv_buf_oob = &rxq->rx_oobs[curr_index];
 906
 907	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
 908				    &recv_buf_oob->wqe_inf);
 909	if (WARN_ON_ONCE(err))
 910		return;
 911
 912	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
 913}
 914
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 915static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
 916			struct mana_rxq *rxq)
 917{
 918	struct mana_stats *rx_stats = &rxq->stats;
 919	struct net_device *ndev = rxq->ndev;
 920	uint pkt_len = cqe->ppi[0].pkt_len;
 921	struct mana_port_context *apc;
 922	u16 rxq_idx = rxq->rxq_idx;
 923	struct napi_struct *napi;
 924	struct gdma_queue *eq;
 925	struct sk_buff *skb;
 926	u32 hash_value;
 
 927
 928	apc = netdev_priv(ndev);
 929	eq = apc->eqs[rxq_idx].eq;
 930	eq->eq.work_done++;
 931	napi = &eq->eq.napi;
 932
 933	if (!buf_va) {
 934		++ndev->stats.rx_dropped;
 935		return;
 936	}
 937
 938	skb = build_skb(buf_va, PAGE_SIZE);
 939
 940	if (!skb) {
 941		free_page((unsigned long)buf_va);
 942		++ndev->stats.rx_dropped;
 943		return;
 944	}
 945
 946	skb_put(skb, pkt_len);
 
 
 
 
 
 
 
 947	skb->dev = napi->dev;
 948
 949	skb->protocol = eth_type_trans(skb, ndev);
 950	skb_checksum_none_assert(skb);
 951	skb_record_rx_queue(skb, rxq_idx);
 952
 953	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
 954		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
 955			skb->ip_summed = CHECKSUM_UNNECESSARY;
 956	}
 957
 958	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
 959		hash_value = cqe->ppi[0].pkt_hash;
 960
 961		if (cqe->rx_hashtype & MANA_HASH_L4)
 962			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
 963		else
 964			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
 965	}
 966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967	napi_gro_receive(napi, skb);
 968
 
 
 
 969	u64_stats_update_begin(&rx_stats->syncp);
 970	rx_stats->packets++;
 971	rx_stats->bytes += pkt_len;
 972	u64_stats_update_end(&rx_stats->syncp);
 
 
 
 
 
 
 
 
 973}
 974
 975static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
 976				struct gdma_comp *cqe)
 977{
 978	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
 979	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
 980	struct net_device *ndev = rxq->ndev;
 981	struct mana_recv_buf_oob *rxbuf_oob;
 982	struct device *dev = gc->dev;
 983	void *new_buf, *old_buf;
 984	struct page *new_page;
 985	u32 curr, pktlen;
 986	dma_addr_t da;
 987
 988	switch (oob->cqe_hdr.cqe_type) {
 989	case CQE_RX_OKAY:
 990		break;
 991
 992	case CQE_RX_TRUNCATED:
 993		netdev_err(ndev, "Dropped a truncated packet\n");
 994		return;
 
 
 995
 996	case CQE_RX_COALESCED_4:
 997		netdev_err(ndev, "RX coalescing is unsupported\n");
 998		return;
 999
1000	case CQE_RX_OBJECT_FENCE:
1001		netdev_err(ndev, "RX Fencing is unsupported\n");
1002		return;
1003
1004	default:
1005		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1006			   oob->cqe_hdr.cqe_type);
1007		return;
1008	}
1009
1010	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1011		return;
1012
1013	pktlen = oob->ppi[0].pkt_len;
1014
1015	if (pktlen == 0) {
1016		/* data packets should never have packetlength of zero */
1017		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1018			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1019		return;
1020	}
1021
1022	curr = rxq->buf_index;
1023	rxbuf_oob = &rxq->rx_oobs[curr];
1024	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1025
1026	new_page = alloc_page(GFP_ATOMIC);
 
 
 
 
 
 
1027
1028	if (new_page) {
1029		da = dma_map_page(dev, new_page, 0, rxq->datasize,
1030				  DMA_FROM_DEVICE);
1031
1032		if (dma_mapping_error(dev, da)) {
1033			__free_page(new_page);
1034			new_page = NULL;
1035		}
1036	}
1037
1038	new_buf = new_page ? page_to_virt(new_page) : NULL;
1039
1040	if (new_buf) {
1041		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1042			       DMA_FROM_DEVICE);
1043
1044		old_buf = rxbuf_oob->buf_va;
1045
1046		/* refresh the rxbuf_oob with the new page */
1047		rxbuf_oob->buf_va = new_buf;
1048		rxbuf_oob->buf_dma_addr = da;
1049		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1050	} else {
1051		old_buf = NULL; /* drop the packet if no memory */
1052	}
1053
1054	mana_rx_skb(old_buf, oob, rxq);
1055
 
1056	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1057
1058	mana_post_pkt_rxq(rxq);
1059}
1060
1061static void mana_poll_rx_cq(struct mana_cq *cq)
1062{
1063	struct gdma_comp *comp = cq->gdma_comp_buf;
 
1064	int comp_read, i;
1065
1066	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1067	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1068
 
 
1069	for (i = 0; i < comp_read; i++) {
1070		if (WARN_ON_ONCE(comp[i].is_sq))
1071			return;
1072
1073		/* verify recv cqe references the right rxq */
1074		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1075			return;
1076
1077		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1078	}
 
 
 
1079}
1080
1081static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1082{
1083	struct mana_cq *cq = context;
 
 
1084
1085	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1086
1087	if (cq->type == MANA_CQ_TYPE_RX)
1088		mana_poll_rx_cq(cq);
1089	else
1090		mana_poll_tx_cq(cq);
1091
1092	mana_gd_arm_cq(gdma_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093}
1094
1095static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1096{
1097	struct gdma_dev *gd = apc->ac->gdma_dev;
1098
1099	if (!cq->gdma_cq)
1100		return;
1101
1102	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1103}
1104
1105static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1106{
1107	struct gdma_dev *gd = apc->ac->gdma_dev;
1108
1109	if (!txq->gdma_sq)
1110		return;
1111
1112	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1113}
1114
1115static void mana_destroy_txq(struct mana_port_context *apc)
1116{
 
1117	int i;
1118
1119	if (!apc->tx_qp)
1120		return;
1121
1122	for (i = 0; i < apc->num_queues; i++) {
 
 
 
 
 
1123		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1124
1125		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1126
1127		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1128	}
1129
1130	kfree(apc->tx_qp);
1131	apc->tx_qp = NULL;
1132}
1133
1134static int mana_create_txq(struct mana_port_context *apc,
1135			   struct net_device *net)
1136{
1137	struct gdma_dev *gd = apc->ac->gdma_dev;
 
1138	struct mana_obj_spec wq_spec;
1139	struct mana_obj_spec cq_spec;
1140	struct gdma_queue_spec spec;
1141	struct gdma_context *gc;
1142	struct mana_txq *txq;
1143	struct mana_cq *cq;
1144	u32 txq_size;
1145	u32 cq_size;
1146	int err;
1147	int i;
1148
1149	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1150			     GFP_KERNEL);
1151	if (!apc->tx_qp)
1152		return -ENOMEM;
1153
1154	/*  The minimum size of the WQE is 32 bytes, hence
1155	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1156	 *  the SQ can store. This value is then used to size other queues
1157	 *  to prevent overflow.
1158	 */
1159	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1160	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1161
1162	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1163	cq_size = PAGE_ALIGN(cq_size);
1164
1165	gc = gd->gdma_context;
1166
1167	for (i = 0; i < apc->num_queues; i++) {
1168		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1169
1170		/* Create SQ */
1171		txq = &apc->tx_qp[i].txq;
1172
1173		u64_stats_init(&txq->stats.syncp);
1174		txq->ndev = net;
1175		txq->net_txq = netdev_get_tx_queue(net, i);
1176		txq->vp_offset = apc->tx_vp_offset;
1177		skb_queue_head_init(&txq->pending_skbs);
1178
1179		memset(&spec, 0, sizeof(spec));
1180		spec.type = GDMA_SQ;
1181		spec.monitor_avl_buf = true;
1182		spec.queue_size = txq_size;
1183		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1184		if (err)
1185			goto out;
1186
1187		/* Create SQ's CQ */
1188		cq = &apc->tx_qp[i].tx_cq;
1189		cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
1190		cq->type = MANA_CQ_TYPE_TX;
1191
1192		cq->txq = txq;
1193
1194		memset(&spec, 0, sizeof(spec));
1195		spec.type = GDMA_CQ;
1196		spec.monitor_avl_buf = false;
1197		spec.queue_size = cq_size;
1198		spec.cq.callback = mana_cq_handler;
1199		spec.cq.parent_eq = apc->eqs[i].eq;
1200		spec.cq.context = cq;
1201		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1202		if (err)
1203			goto out;
1204
1205		memset(&wq_spec, 0, sizeof(wq_spec));
1206		memset(&cq_spec, 0, sizeof(cq_spec));
1207
1208		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1209		wq_spec.queue_size = txq->gdma_sq->queue_size;
1210
1211		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1212		cq_spec.queue_size = cq->gdma_cq->queue_size;
1213		cq_spec.modr_ctx_id = 0;
1214		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1215
1216		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1217					 &wq_spec, &cq_spec,
1218					 &apc->tx_qp[i].tx_object);
1219
1220		if (err)
1221			goto out;
1222
1223		txq->gdma_sq->id = wq_spec.queue_index;
1224		cq->gdma_cq->id = cq_spec.queue_index;
1225
1226		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1227		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
 
 
1228
1229		txq->gdma_txq_id = txq->gdma_sq->id;
1230
1231		cq->gdma_id = cq->gdma_cq->id;
1232
1233		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1234			err = -EINVAL;
1235			goto out;
1236		}
1237
1238		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1239
1240		mana_gd_arm_cq(cq->gdma_cq);
 
 
 
1241	}
1242
1243	return 0;
1244out:
1245	mana_destroy_txq(apc);
1246	return err;
1247}
1248
1249static void mana_napi_sync_for_rx(struct mana_rxq *rxq)
1250{
1251	struct net_device *ndev = rxq->ndev;
1252	struct mana_port_context *apc;
1253	u16 rxq_idx = rxq->rxq_idx;
1254	struct napi_struct *napi;
1255	struct gdma_queue *eq;
1256
1257	apc = netdev_priv(ndev);
1258	eq = apc->eqs[rxq_idx].eq;
1259	napi = &eq->eq.napi;
1260
1261	napi_synchronize(napi);
1262}
1263
1264static void mana_destroy_rxq(struct mana_port_context *apc,
1265			     struct mana_rxq *rxq, bool validate_state)
1266
1267{
1268	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1269	struct mana_recv_buf_oob *rx_oob;
1270	struct device *dev = gc->dev;
 
1271	int i;
1272
1273	if (!rxq)
1274		return;
1275
 
 
1276	if (validate_state)
1277		mana_napi_sync_for_rx(rxq);
 
 
 
 
 
 
1278
1279	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1280
1281	mana_deinit_cq(apc, &rxq->rx_cq);
1282
 
 
 
1283	for (i = 0; i < rxq->num_rx_buf; i++) {
1284		rx_oob = &rxq->rx_oobs[i];
1285
1286		if (!rx_oob->buf_va)
1287			continue;
1288
1289		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1290			       DMA_FROM_DEVICE);
1291
1292		free_page((unsigned long)rx_oob->buf_va);
1293		rx_oob->buf_va = NULL;
1294	}
1295
1296	if (rxq->gdma_rq)
1297		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1298
1299	kfree(rxq);
1300}
1301
1302#define MANA_WQE_HEADER_SIZE 16
1303#define MANA_WQE_SGE_SIZE 16
1304
1305static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1306			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1307{
1308	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1309	struct mana_recv_buf_oob *rx_oob;
1310	struct device *dev = gc->dev;
1311	struct page *page;
1312	dma_addr_t da;
1313	u32 buf_idx;
1314
1315	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1316
1317	*rxq_size = 0;
1318	*cq_size = 0;
1319
1320	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1321		rx_oob = &rxq->rx_oobs[buf_idx];
1322		memset(rx_oob, 0, sizeof(*rx_oob));
1323
1324		page = alloc_page(GFP_KERNEL);
1325		if (!page)
1326			return -ENOMEM;
1327
1328		da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
 
1329
1330		if (dma_mapping_error(dev, da)) {
1331			__free_page(page);
1332			return -ENOMEM;
1333		}
1334
1335		rx_oob->buf_va = page_to_virt(page);
1336		rx_oob->buf_dma_addr = da;
1337
1338		rx_oob->num_sge = 1;
1339		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1340		rx_oob->sgl[0].size = rxq->datasize;
1341		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1342
1343		rx_oob->wqe_req.sgl = rx_oob->sgl;
1344		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1345		rx_oob->wqe_req.inline_oob_size = 0;
1346		rx_oob->wqe_req.inline_oob_data = NULL;
1347		rx_oob->wqe_req.flags = 0;
1348		rx_oob->wqe_req.client_data_unit = 0;
1349
1350		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1351				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1352		*cq_size += COMP_ENTRY_SIZE;
1353	}
1354
1355	return 0;
1356}
1357
1358static int mana_push_wqe(struct mana_rxq *rxq)
1359{
1360	struct mana_recv_buf_oob *rx_oob;
1361	u32 buf_idx;
1362	int err;
1363
1364	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1365		rx_oob = &rxq->rx_oobs[buf_idx];
1366
1367		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1368					    &rx_oob->wqe_inf);
1369		if (err)
1370			return -ENOSPC;
1371	}
1372
1373	return 0;
1374}
1375
1376static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1377					u32 rxq_idx, struct mana_eq *eq,
1378					struct net_device *ndev)
1379{
1380	struct gdma_dev *gd = apc->ac->gdma_dev;
1381	struct mana_obj_spec wq_spec;
1382	struct mana_obj_spec cq_spec;
1383	struct gdma_queue_spec spec;
1384	struct mana_cq *cq = NULL;
1385	struct gdma_context *gc;
1386	u32 cq_size, rq_size;
1387	struct mana_rxq *rxq;
1388	int err;
1389
1390	gc = gd->gdma_context;
1391
1392	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1393		      GFP_KERNEL);
1394	if (!rxq)
1395		return NULL;
1396
1397	rxq->ndev = ndev;
1398	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1399	rxq->rxq_idx = rxq_idx;
1400	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1401	rxq->rxobj = INVALID_MANA_HANDLE;
1402
1403	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1404	if (err)
1405		goto out;
1406
1407	rq_size = PAGE_ALIGN(rq_size);
1408	cq_size = PAGE_ALIGN(cq_size);
1409
1410	/* Create RQ */
1411	memset(&spec, 0, sizeof(spec));
1412	spec.type = GDMA_RQ;
1413	spec.monitor_avl_buf = true;
1414	spec.queue_size = rq_size;
1415	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1416	if (err)
1417		goto out;
1418
1419	/* Create RQ's CQ */
1420	cq = &rxq->rx_cq;
1421	cq->gdma_comp_buf = eq->cqe_poll;
1422	cq->type = MANA_CQ_TYPE_RX;
1423	cq->rxq = rxq;
1424
1425	memset(&spec, 0, sizeof(spec));
1426	spec.type = GDMA_CQ;
1427	spec.monitor_avl_buf = false;
1428	spec.queue_size = cq_size;
1429	spec.cq.callback = mana_cq_handler;
1430	spec.cq.parent_eq = eq->eq;
1431	spec.cq.context = cq;
1432	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1433	if (err)
1434		goto out;
1435
1436	memset(&wq_spec, 0, sizeof(wq_spec));
1437	memset(&cq_spec, 0, sizeof(cq_spec));
1438	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1439	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1440
1441	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1442	cq_spec.queue_size = cq->gdma_cq->queue_size;
1443	cq_spec.modr_ctx_id = 0;
1444	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1445
1446	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1447				 &wq_spec, &cq_spec, &rxq->rxobj);
1448	if (err)
1449		goto out;
1450
1451	rxq->gdma_rq->id = wq_spec.queue_index;
1452	cq->gdma_cq->id = cq_spec.queue_index;
1453
1454	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1455	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1456
1457	rxq->gdma_id = rxq->gdma_rq->id;
1458	cq->gdma_id = cq->gdma_cq->id;
1459
1460	err = mana_push_wqe(rxq);
1461	if (err)
1462		goto out;
1463
1464	if (cq->gdma_id >= gc->max_num_cqs)
 
1465		goto out;
 
1466
1467	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1468
1469	mana_gd_arm_cq(cq->gdma_cq);
 
 
 
 
 
 
 
 
 
1470out:
1471	if (!err)
1472		return rxq;
1473
1474	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1475
1476	mana_destroy_rxq(apc, rxq, false);
1477
1478	if (cq)
1479		mana_deinit_cq(apc, cq);
1480
1481	return NULL;
1482}
1483
1484static int mana_add_rx_queues(struct mana_port_context *apc,
1485			      struct net_device *ndev)
1486{
 
1487	struct mana_rxq *rxq;
1488	int err = 0;
1489	int i;
1490
1491	for (i = 0; i < apc->num_queues; i++) {
1492		rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
1493		if (!rxq) {
1494			err = -ENOMEM;
1495			goto out;
1496		}
1497
1498		u64_stats_init(&rxq->stats.syncp);
1499
1500		apc->rxqs[i] = rxq;
1501	}
1502
1503	apc->default_rxobj = apc->rxqs[0]->rxobj;
1504out:
1505	return err;
1506}
1507
1508static void mana_destroy_vport(struct mana_port_context *apc)
1509{
 
1510	struct mana_rxq *rxq;
1511	u32 rxq_idx;
1512
1513	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1514		rxq = apc->rxqs[rxq_idx];
1515		if (!rxq)
1516			continue;
1517
1518		mana_destroy_rxq(apc, rxq, true);
1519		apc->rxqs[rxq_idx] = NULL;
1520	}
1521
1522	mana_destroy_txq(apc);
 
 
 
 
1523}
1524
1525static int mana_create_vport(struct mana_port_context *apc,
1526			     struct net_device *net)
1527{
1528	struct gdma_dev *gd = apc->ac->gdma_dev;
1529	int err;
1530
1531	apc->default_rxobj = INVALID_MANA_HANDLE;
1532
 
 
 
 
 
 
1533	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1534	if (err)
1535		return err;
1536
1537	return mana_create_txq(apc, net);
1538}
1539
1540static void mana_rss_table_init(struct mana_port_context *apc)
1541{
1542	int i;
1543
1544	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1545		apc->indir_table[i] =
1546			ethtool_rxfh_indir_default(i, apc->num_queues);
1547}
1548
1549int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1550		    bool update_hash, bool update_tab)
1551{
1552	u32 queue_idx;
 
1553	int i;
1554
1555	if (update_tab) {
1556		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1557			queue_idx = apc->indir_table[i];
1558			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1559		}
1560	}
1561
1562	return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
 
 
 
 
 
 
1563}
1564
1565static int mana_init_port(struct net_device *ndev)
1566{
1567	struct mana_port_context *apc = netdev_priv(ndev);
1568	u32 max_txq, max_rxq, max_queues;
1569	int port_idx = apc->port_idx;
1570	u32 num_indirect_entries;
1571	int err;
1572
1573	err = mana_init_port_context(apc);
1574	if (err)
1575		return err;
1576
1577	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1578				   &num_indirect_entries);
1579	if (err) {
1580		netdev_err(ndev, "Failed to query info for vPort 0\n");
 
1581		goto reset_apc;
1582	}
1583
1584	max_queues = min_t(u32, max_txq, max_rxq);
1585	if (apc->max_queues > max_queues)
1586		apc->max_queues = max_queues;
1587
1588	if (apc->num_queues > apc->max_queues)
1589		apc->num_queues = apc->max_queues;
1590
1591	ether_addr_copy(ndev->dev_addr, apc->mac_addr);
1592
1593	return 0;
1594
1595reset_apc:
1596	kfree(apc->rxqs);
1597	apc->rxqs = NULL;
1598	return err;
1599}
1600
1601int mana_alloc_queues(struct net_device *ndev)
1602{
1603	struct mana_port_context *apc = netdev_priv(ndev);
1604	struct gdma_dev *gd = apc->ac->gdma_dev;
1605	int err;
1606
1607	err = mana_create_eq(apc);
1608	if (err)
1609		return err;
1610
1611	err = mana_create_vport(apc, ndev);
1612	if (err)
1613		goto destroy_eq;
1614
1615	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1616	if (err)
1617		goto destroy_vport;
1618
1619	err = mana_add_rx_queues(apc, ndev);
1620	if (err)
1621		goto destroy_vport;
1622
1623	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1624
1625	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1626	if (err)
1627		goto destroy_vport;
1628
1629	mana_rss_table_init(apc);
1630
1631	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1632	if (err)
1633		goto destroy_vport;
1634
 
 
 
 
 
 
 
 
1635	return 0;
1636
1637destroy_vport:
1638	mana_destroy_vport(apc);
1639destroy_eq:
1640	mana_destroy_eq(gd->gdma_context, apc);
1641	return err;
1642}
1643
1644int mana_attach(struct net_device *ndev)
1645{
1646	struct mana_port_context *apc = netdev_priv(ndev);
1647	int err;
1648
1649	ASSERT_RTNL();
1650
1651	err = mana_init_port(ndev);
1652	if (err)
1653		return err;
1654
1655	err = mana_alloc_queues(ndev);
1656	if (err) {
1657		kfree(apc->rxqs);
1658		apc->rxqs = NULL;
1659		return err;
 
1660	}
1661
1662	netif_device_attach(ndev);
1663
1664	apc->port_is_up = apc->port_st_save;
1665
1666	/* Ensure port state updated before txq state */
1667	smp_wmb();
1668
1669	if (apc->port_is_up) {
1670		netif_carrier_on(ndev);
1671		netif_tx_wake_all_queues(ndev);
1672	}
1673
1674	return 0;
1675}
1676
1677static int mana_dealloc_queues(struct net_device *ndev)
1678{
1679	struct mana_port_context *apc = netdev_priv(ndev);
 
1680	struct mana_txq *txq;
1681	int i, err;
1682
1683	if (apc->port_is_up)
1684		return -EINVAL;
1685
 
 
 
 
 
1686	/* No packet can be transmitted now since apc->port_is_up is false.
1687	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1688	 * a txq because it may not timely see apc->port_is_up being cleared
1689	 * to false, but it doesn't matter since mana_start_xmit() drops any
1690	 * new packets due to apc->port_is_up being false.
1691	 *
1692	 * Drain all the in-flight TX packets
1693	 */
1694	for (i = 0; i < apc->num_queues; i++) {
1695		txq = &apc->tx_qp[i].txq;
1696
1697		while (atomic_read(&txq->pending_sends) > 0)
1698			usleep_range(1000, 2000);
1699	}
1700
1701	/* We're 100% sure the queues can no longer be woken up, because
1702	 * we're sure now mana_poll_tx_cq() can't be running.
1703	 */
1704
1705	apc->rss_state = TRI_STATE_FALSE;
1706	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
1707	if (err) {
1708		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
1709		return err;
1710	}
1711
1712	/* TODO: Implement RX fencing */
1713	ssleep(1);
1714
1715	mana_destroy_vport(apc);
1716
1717	mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
1718
1719	return 0;
1720}
1721
1722int mana_detach(struct net_device *ndev, bool from_close)
1723{
1724	struct mana_port_context *apc = netdev_priv(ndev);
1725	int err;
1726
1727	ASSERT_RTNL();
1728
1729	apc->port_st_save = apc->port_is_up;
1730	apc->port_is_up = false;
1731
1732	/* Ensure port state updated before txq state */
1733	smp_wmb();
1734
1735	netif_tx_disable(ndev);
1736	netif_carrier_off(ndev);
1737
1738	if (apc->port_st_save) {
1739		err = mana_dealloc_queues(ndev);
1740		if (err)
1741			return err;
1742	}
1743
1744	if (!from_close) {
1745		netif_device_detach(ndev);
1746		mana_cleanup_port_context(apc);
1747	}
1748
1749	return 0;
1750}
1751
1752static int mana_probe_port(struct mana_context *ac, int port_idx,
1753			   struct net_device **ndev_storage)
1754{
1755	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1756	struct mana_port_context *apc;
1757	struct net_device *ndev;
1758	int err;
1759
1760	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
1761				 gc->max_num_queues);
1762	if (!ndev)
1763		return -ENOMEM;
1764
1765	*ndev_storage = ndev;
1766
1767	apc = netdev_priv(ndev);
1768	apc->ac = ac;
1769	apc->ndev = ndev;
1770	apc->max_queues = gc->max_num_queues;
1771	apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
1772	apc->port_handle = INVALID_MANA_HANDLE;
 
1773	apc->port_idx = port_idx;
1774
 
 
 
1775	ndev->netdev_ops = &mana_devops;
1776	ndev->ethtool_ops = &mana_ethtool_ops;
1777	ndev->mtu = ETH_DATA_LEN;
1778	ndev->max_mtu = ndev->mtu;
1779	ndev->min_mtu = ndev->mtu;
1780	ndev->needed_headroom = MANA_HEADROOM;
 
1781	SET_NETDEV_DEV(ndev, gc->dev);
1782
1783	netif_carrier_off(ndev);
1784
1785	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
1786
1787	err = mana_init_port(ndev);
1788	if (err)
1789		goto free_net;
1790
1791	netdev_lockdep_set_classes(ndev);
1792
1793	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1794	ndev->hw_features |= NETIF_F_RXCSUM;
1795	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1796	ndev->hw_features |= NETIF_F_RXHASH;
1797	ndev->features = ndev->hw_features;
1798	ndev->vlan_features = 0;
1799
1800	err = register_netdev(ndev);
1801	if (err) {
1802		netdev_err(ndev, "Unable to register netdev.\n");
1803		goto reset_apc;
1804	}
1805
1806	return 0;
1807
1808reset_apc:
1809	kfree(apc->rxqs);
1810	apc->rxqs = NULL;
1811free_net:
1812	*ndev_storage = NULL;
1813	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
1814	free_netdev(ndev);
1815	return err;
1816}
1817
1818int mana_probe(struct gdma_dev *gd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1819{
1820	struct gdma_context *gc = gd->gdma_context;
 
1821	struct device *dev = gc->dev;
1822	struct mana_context *ac;
1823	int err;
1824	int i;
1825
1826	dev_info(dev,
1827		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
1828		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
1829
1830	err = mana_gd_register_device(gd);
1831	if (err)
1832		return err;
1833
1834	ac = kzalloc(sizeof(*ac), GFP_KERNEL);
1835	if (!ac)
1836		return -ENOMEM;
 
 
 
 
 
1837
1838	ac->gdma_dev = gd;
1839	ac->num_ports = 1;
1840	gd->driver_data = ac;
1841
1842	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
1843				    MANA_MICRO_VERSION, &ac->num_ports);
1844	if (err)
1845		goto out;
1846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
1848		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
1849
1850	for (i = 0; i < ac->num_ports; i++) {
1851		err = mana_probe_port(ac, i, &ac->ports[i]);
1852		if (err)
1853			break;
 
 
 
 
 
 
 
 
 
 
1854	}
 
 
1855out:
1856	if (err)
1857		mana_remove(gd);
1858
1859	return err;
1860}
1861
1862void mana_remove(struct gdma_dev *gd)
1863{
1864	struct gdma_context *gc = gd->gdma_context;
1865	struct mana_context *ac = gd->driver_data;
1866	struct device *dev = gc->dev;
1867	struct net_device *ndev;
 
1868	int i;
1869
 
 
 
 
1870	for (i = 0; i < ac->num_ports; i++) {
1871		ndev = ac->ports[i];
1872		if (!ndev) {
1873			if (i == 0)
1874				dev_err(dev, "No net device to remove\n");
1875			goto out;
1876		}
1877
1878		/* All cleanup actions should stay after rtnl_lock(), otherwise
1879		 * other functions may access partially cleaned up data.
1880		 */
1881		rtnl_lock();
1882
1883		mana_detach(ndev, false);
 
 
 
 
 
 
 
 
 
1884
1885		unregister_netdevice(ndev);
1886
1887		rtnl_unlock();
1888
1889		free_netdev(ndev);
1890	}
 
 
1891out:
1892	mana_gd_deregister_device(gd);
 
 
 
 
1893	gd->driver_data = NULL;
1894	gd->gdma_context = NULL;
1895	kfree(ac);
1896}