Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "ixgbe.h"
   5#include "ixgbe_sriov.h"
   6
   7#ifdef CONFIG_IXGBE_DCB
   8/**
   9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
  10 * @adapter: board private structure to initialize
  11 *
  12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
  13 * will also try to cache the proper offsets if RSS/FCoE are enabled along
  14 * with VMDq.
  15 *
  16 **/
  17static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
  18{
  19#ifdef IXGBE_FCOE
  20	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  21#endif /* IXGBE_FCOE */
  22	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  23	int i;
  24	u16 reg_idx, pool;
  25	u8 tcs = adapter->hw_tcs;
  26
  27	/* verify we have DCB queueing enabled before proceeding */
  28	if (tcs <= 1)
  29		return false;
  30
  31	/* verify we have VMDq enabled before proceeding */
  32	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  33		return false;
  34
  35	/* start at VMDq register offset for SR-IOV enabled setups */
  36	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  37	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  38		/* If we are greater than indices move to next pool */
  39		if ((reg_idx & ~vmdq->mask) >= tcs) {
  40			pool++;
  41			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  42		}
  43		adapter->rx_ring[i]->reg_idx = reg_idx;
  44		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
  45	}
  46
  47	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  48	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  49		/* If we are greater than indices move to next pool */
  50		if ((reg_idx & ~vmdq->mask) >= tcs)
  51			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  52		adapter->tx_ring[i]->reg_idx = reg_idx;
  53	}
  54
  55#ifdef IXGBE_FCOE
  56	/* nothing to do if FCoE is disabled */
  57	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
  58		return true;
  59
  60	/* The work is already done if the FCoE ring is shared */
  61	if (fcoe->offset < tcs)
  62		return true;
  63
  64	/* The FCoE rings exist separately, we need to move their reg_idx */
  65	if (fcoe->indices) {
  66		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  67		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
  68
  69		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  70		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
  71			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  72			adapter->rx_ring[i]->reg_idx = reg_idx;
  73			adapter->rx_ring[i]->netdev = adapter->netdev;
  74			reg_idx++;
  75		}
  76
  77		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  78		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
  79			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  80			adapter->tx_ring[i]->reg_idx = reg_idx;
  81			reg_idx++;
  82		}
  83	}
  84
  85#endif /* IXGBE_FCOE */
  86	return true;
  87}
  88
  89/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
  90static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
  91				    unsigned int *tx, unsigned int *rx)
  92{
  93	struct ixgbe_hw *hw = &adapter->hw;
  94	u8 num_tcs = adapter->hw_tcs;
  95
  96	*tx = 0;
  97	*rx = 0;
  98
  99	switch (hw->mac.type) {
 100	case ixgbe_mac_82598EB:
 101		/* TxQs/TC: 4	RxQs/TC: 8 */
 102		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
 103		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
 104		break;
 105	case ixgbe_mac_82599EB:
 106	case ixgbe_mac_X540:
 107	case ixgbe_mac_X550:
 108	case ixgbe_mac_X550EM_x:
 109	case ixgbe_mac_x550em_a:
 110		if (num_tcs > 4) {
 111			/*
 112			 * TCs    : TC0/1 TC2/3 TC4-7
 113			 * TxQs/TC:    32    16     8
 114			 * RxQs/TC:    16    16    16
 115			 */
 116			*rx = tc << 4;
 117			if (tc < 3)
 118				*tx = tc << 5;		/*   0,  32,  64 */
 119			else if (tc < 5)
 120				*tx = (tc + 2) << 4;	/*  80,  96 */
 121			else
 122				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
 123		} else {
 124			/*
 125			 * TCs    : TC0 TC1 TC2/3
 126			 * TxQs/TC:  64  32    16
 127			 * RxQs/TC:  32  32    32
 128			 */
 129			*rx = tc << 5;
 130			if (tc < 2)
 131				*tx = tc << 6;		/*  0,  64 */
 132			else
 133				*tx = (tc + 4) << 4;	/* 96, 112 */
 134		}
 135		break;
 136	default:
 137		break;
 138	}
 139}
 140
 141/**
 142 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
 143 * @adapter: board private structure to initialize
 144 *
 145 * Cache the descriptor ring offsets for DCB to the assigned rings.
 146 *
 147 **/
 148static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 149{
 150	u8 num_tcs = adapter->hw_tcs;
 151	unsigned int tx_idx, rx_idx;
 152	int tc, offset, rss_i, i;
 153
 154	/* verify we have DCB queueing enabled before proceeding */
 155	if (num_tcs <= 1)
 156		return false;
 157
 158	rss_i = adapter->ring_feature[RING_F_RSS].indices;
 159
 160	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
 161		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
 162		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
 163			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
 164			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
 165			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
 166			adapter->tx_ring[offset + i]->dcb_tc = tc;
 167			adapter->rx_ring[offset + i]->dcb_tc = tc;
 168		}
 169	}
 170
 171	return true;
 172}
 173
 174#endif
 175/**
 176 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
 177 * @adapter: board private structure to initialize
 178 *
 179 * SR-IOV doesn't use any descriptor rings but changes the default if
 180 * no other mapping is used.
 181 *
 182 */
 183static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 184{
 185#ifdef IXGBE_FCOE
 186	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
 187#endif /* IXGBE_FCOE */
 188	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 189	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
 190	u16 reg_idx, pool;
 191	int i;
 192
 193	/* only proceed if VMDq is enabled */
 194	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
 195		return false;
 196
 197	/* start at VMDq register offset for SR-IOV enabled setups */
 198	pool = 0;
 199	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 200	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
 201#ifdef IXGBE_FCOE
 202		/* Allow first FCoE queue to be mapped as RSS */
 203		if (fcoe->offset && (i > fcoe->offset))
 204			break;
 205#endif
 206		/* If we are greater than indices move to next pool */
 207		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
 208			pool++;
 209			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
 210		}
 211		adapter->rx_ring[i]->reg_idx = reg_idx;
 212		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
 213	}
 214
 215#ifdef IXGBE_FCOE
 216	/* FCoE uses a linear block of queues so just assigning 1:1 */
 217	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
 218		adapter->rx_ring[i]->reg_idx = reg_idx;
 219		adapter->rx_ring[i]->netdev = adapter->netdev;
 220	}
 221
 222#endif
 223	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 224	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
 225#ifdef IXGBE_FCOE
 226		/* Allow first FCoE queue to be mapped as RSS */
 227		if (fcoe->offset && (i > fcoe->offset))
 228			break;
 229#endif
 230		/* If we are greater than indices move to next pool */
 231		if ((reg_idx & rss->mask) >= rss->indices)
 232			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
 233		adapter->tx_ring[i]->reg_idx = reg_idx;
 234	}
 235
 236#ifdef IXGBE_FCOE
 237	/* FCoE uses a linear block of queues so just assigning 1:1 */
 238	for (; i < adapter->num_tx_queues; i++, reg_idx++)
 239		adapter->tx_ring[i]->reg_idx = reg_idx;
 240
 241#endif
 242
 243	return true;
 244}
 245
 246/**
 247 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
 248 * @adapter: board private structure to initialize
 249 *
 250 * Cache the descriptor ring offsets for RSS to the assigned rings.
 251 *
 252 **/
 253static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 254{
 255	int i, reg_idx;
 256
 257	for (i = 0; i < adapter->num_rx_queues; i++) {
 258		adapter->rx_ring[i]->reg_idx = i;
 259		adapter->rx_ring[i]->netdev = adapter->netdev;
 260	}
 261	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
 262		adapter->tx_ring[i]->reg_idx = reg_idx;
 263	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
 264		adapter->xdp_ring[i]->reg_idx = reg_idx;
 265
 266	return true;
 267}
 268
 269/**
 270 * ixgbe_cache_ring_register - Descriptor ring to register mapping
 271 * @adapter: board private structure to initialize
 272 *
 273 * Once we know the feature-set enabled for the device, we'll cache
 274 * the register offset the descriptor ring is assigned to.
 275 *
 276 * Note, the order the various feature calls is important.  It must start with
 277 * the "most" features enabled at the same time, then trickle down to the
 278 * least amount of features turned on at once.
 279 **/
 280static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 281{
 282	/* start with default case */
 283	adapter->rx_ring[0]->reg_idx = 0;
 284	adapter->tx_ring[0]->reg_idx = 0;
 285
 286#ifdef CONFIG_IXGBE_DCB
 287	if (ixgbe_cache_ring_dcb_sriov(adapter))
 288		return;
 289
 290	if (ixgbe_cache_ring_dcb(adapter))
 291		return;
 292
 293#endif
 294	if (ixgbe_cache_ring_sriov(adapter))
 295		return;
 296
 297	ixgbe_cache_ring_rss(adapter);
 298}
 299
 300static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
 301{
 302	int queues;
 303
 304	queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
 305	return adapter->xdp_prog ? queues : 0;
 306}
 307
 308#define IXGBE_RSS_64Q_MASK	0x3F
 309#define IXGBE_RSS_16Q_MASK	0xF
 310#define IXGBE_RSS_8Q_MASK	0x7
 311#define IXGBE_RSS_4Q_MASK	0x3
 312#define IXGBE_RSS_2Q_MASK	0x1
 313#define IXGBE_RSS_DISABLED_MASK	0x0
 314
 315#ifdef CONFIG_IXGBE_DCB
 316/**
 317 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
 318 * @adapter: board private structure to initialize
 319 *
 320 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 321 * and VM pools where appropriate.  Also assign queues based on DCB
 322 * priorities and map accordingly..
 323 *
 324 **/
 325static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
 326{
 327	int i;
 328	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
 329	u16 vmdq_m = 0;
 330#ifdef IXGBE_FCOE
 331	u16 fcoe_i = 0;
 332#endif
 333	u8 tcs = adapter->hw_tcs;
 334
 335	/* verify we have DCB queueing enabled before proceeding */
 336	if (tcs <= 1)
 337		return false;
 338
 339	/* verify we have VMDq enabled before proceeding */
 340	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 341		return false;
 342
 343	/* limit VMDq instances on the PF by number of Tx queues */
 344	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
 345
 346	/* Add starting offset to total pool count */
 347	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 348
 349	/* 16 pools w/ 8 TC per pool */
 350	if (tcs > 4) {
 351		vmdq_i = min_t(u16, vmdq_i, 16);
 352		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
 353	/* 32 pools w/ 4 TC per pool */
 354	} else {
 355		vmdq_i = min_t(u16, vmdq_i, 32);
 356		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
 357	}
 358
 359#ifdef IXGBE_FCOE
 360	/* queues in the remaining pools are available for FCoE */
 361	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
 362
 363#endif
 364	/* remove the starting offset from the pool count */
 365	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
 366
 367	/* save features for later use */
 368	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
 369	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
 370
 371	/*
 372	 * We do not support DCB, VMDq, and RSS all simultaneously
 373	 * so we will disable RSS since it is the lowest priority
 374	 */
 375	adapter->ring_feature[RING_F_RSS].indices = 1;
 376	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
 377
 378	/* disable ATR as it is not supported when VMDq is enabled */
 379	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 380
 381	adapter->num_rx_pools = vmdq_i;
 382	adapter->num_rx_queues_per_pool = tcs;
 383
 384	adapter->num_tx_queues = vmdq_i * tcs;
 385	adapter->num_xdp_queues = 0;
 386	adapter->num_rx_queues = vmdq_i * tcs;
 387
 388#ifdef IXGBE_FCOE
 389	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 390		struct ixgbe_ring_feature *fcoe;
 391
 392		fcoe = &adapter->ring_feature[RING_F_FCOE];
 393
 394		/* limit ourselves based on feature limits */
 395		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 396
 397		if (fcoe_i) {
 398			/* alloc queues for FCoE separately */
 399			fcoe->indices = fcoe_i;
 400			fcoe->offset = vmdq_i * tcs;
 401
 402			/* add queues to adapter */
 403			adapter->num_tx_queues += fcoe_i;
 404			adapter->num_rx_queues += fcoe_i;
 405		} else if (tcs > 1) {
 406			/* use queue belonging to FcoE TC */
 407			fcoe->indices = 1;
 408			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
 409		} else {
 410			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
 411
 412			fcoe->indices = 0;
 413			fcoe->offset = 0;
 414		}
 415	}
 416
 417#endif /* IXGBE_FCOE */
 418	/* configure TC to queue mapping */
 419	for (i = 0; i < tcs; i++)
 420		netdev_set_tc_queue(adapter->netdev, i, 1, i);
 421
 422	return true;
 423}
 424
 425static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 426{
 427	struct net_device *dev = adapter->netdev;
 428	struct ixgbe_ring_feature *f;
 429	int rss_i, rss_m, i;
 430	int tcs;
 431
 432	/* Map queue offset and counts onto allocated tx queues */
 433	tcs = adapter->hw_tcs;
 434
 435	/* verify we have DCB queueing enabled before proceeding */
 436	if (tcs <= 1)
 437		return false;
 438
 439	/* determine the upper limit for our current DCB mode */
 440	rss_i = dev->num_tx_queues / tcs;
 441	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 442		/* 8 TC w/ 4 queues per TC */
 443		rss_i = min_t(u16, rss_i, 4);
 444		rss_m = IXGBE_RSS_4Q_MASK;
 445	} else if (tcs > 4) {
 446		/* 8 TC w/ 8 queues per TC */
 447		rss_i = min_t(u16, rss_i, 8);
 448		rss_m = IXGBE_RSS_8Q_MASK;
 449	} else {
 450		/* 4 TC w/ 16 queues per TC */
 451		rss_i = min_t(u16, rss_i, 16);
 452		rss_m = IXGBE_RSS_16Q_MASK;
 453	}
 454
 455	/* set RSS mask and indices */
 456	f = &adapter->ring_feature[RING_F_RSS];
 457	rss_i = min_t(int, rss_i, f->limit);
 458	f->indices = rss_i;
 459	f->mask = rss_m;
 460
 461	/* disable ATR as it is not supported when multiple TCs are enabled */
 462	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 463
 464#ifdef IXGBE_FCOE
 465	/* FCoE enabled queues require special configuration indexed
 466	 * by feature specific indices and offset. Here we map FCoE
 467	 * indices onto the DCB queue pairs allowing FCoE to own
 468	 * configuration later.
 469	 */
 470	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 471		u8 tc = ixgbe_fcoe_get_tc(adapter);
 472
 473		f = &adapter->ring_feature[RING_F_FCOE];
 474		f->indices = min_t(u16, rss_i, f->limit);
 475		f->offset = rss_i * tc;
 476	}
 477
 478#endif /* IXGBE_FCOE */
 479	for (i = 0; i < tcs; i++)
 480		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
 481
 482	adapter->num_tx_queues = rss_i * tcs;
 483	adapter->num_xdp_queues = 0;
 484	adapter->num_rx_queues = rss_i * tcs;
 485
 486	return true;
 487}
 488
 489#endif
 490/**
 491 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
 492 * @adapter: board private structure to initialize
 493 *
 494 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 495 * and VM pools where appropriate.  If RSS is available, then also try and
 496 * enable RSS and map accordingly.
 497 *
 498 **/
 499static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 500{
 501	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
 502	u16 vmdq_m = 0;
 503	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
 504	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
 505#ifdef IXGBE_FCOE
 506	u16 fcoe_i = 0;
 507#endif
 508
 509	/* only proceed if SR-IOV is enabled */
 510	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 511		return false;
 512
 513	/* limit l2fwd RSS based on total Tx queue limit */
 514	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
 515
 516	/* Add starting offset to total pool count */
 517	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 518
 519	/* double check we are limited to maximum pools */
 520	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 521
 522	/* 64 pool mode with 2 queues per pool */
 523	if (vmdq_i > 32) {
 524		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
 525		rss_m = IXGBE_RSS_2Q_MASK;
 526		rss_i = min_t(u16, rss_i, 2);
 527	/* 32 pool mode with up to 4 queues per pool */
 528	} else {
 529		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
 530		rss_m = IXGBE_RSS_4Q_MASK;
 531		/* We can support 4, 2, or 1 queues */
 532		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
 533	}
 534
 535#ifdef IXGBE_FCOE
 536	/* queues in the remaining pools are available for FCoE */
 537	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
 538
 539#endif
 540	/* remove the starting offset from the pool count */
 541	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
 542
 543	/* save features for later use */
 544	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
 545	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
 546
 547	/* limit RSS based on user input and save for later use */
 548	adapter->ring_feature[RING_F_RSS].indices = rss_i;
 549	adapter->ring_feature[RING_F_RSS].mask = rss_m;
 550
 551	adapter->num_rx_pools = vmdq_i;
 552	adapter->num_rx_queues_per_pool = rss_i;
 553
 554	adapter->num_rx_queues = vmdq_i * rss_i;
 555	adapter->num_tx_queues = vmdq_i * rss_i;
 556	adapter->num_xdp_queues = 0;
 557
 558	/* disable ATR as it is not supported when VMDq is enabled */
 559	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 560
 561#ifdef IXGBE_FCOE
 562	/*
 563	 * FCoE can use rings from adjacent buffers to allow RSS
 564	 * like behavior.  To account for this we need to add the
 565	 * FCoE indices to the total ring count.
 566	 */
 567	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 568		struct ixgbe_ring_feature *fcoe;
 569
 570		fcoe = &adapter->ring_feature[RING_F_FCOE];
 571
 572		/* limit ourselves based on feature limits */
 573		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 574
 575		if (vmdq_i > 1 && fcoe_i) {
 576			/* alloc queues for FCoE separately */
 577			fcoe->indices = fcoe_i;
 578			fcoe->offset = vmdq_i * rss_i;
 579		} else {
 580			/* merge FCoE queues with RSS queues */
 581			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
 582
 583			/* limit indices to rss_i if MSI-X is disabled */
 584			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 585				fcoe_i = rss_i;
 586
 587			/* attempt to reserve some queues for just FCoE */
 588			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
 589			fcoe->offset = fcoe_i - fcoe->indices;
 590
 591			fcoe_i -= rss_i;
 592		}
 593
 594		/* add queues to adapter */
 595		adapter->num_tx_queues += fcoe_i;
 596		adapter->num_rx_queues += fcoe_i;
 597	}
 598
 599#endif
 600	/* To support macvlan offload we have to use num_tc to
 601	 * restrict the queues that can be used by the device.
 602	 * By doing this we can avoid reporting a false number of
 603	 * queues.
 604	 */
 605	if (vmdq_i > 1)
 606		netdev_set_num_tc(adapter->netdev, 1);
 607
 608	/* populate TC0 for use by pool 0 */
 609	netdev_set_tc_queue(adapter->netdev, 0,
 610			    adapter->num_rx_queues_per_pool, 0);
 611
 612	return true;
 613}
 614
 615/**
 616 * ixgbe_set_rss_queues - Allocate queues for RSS
 617 * @adapter: board private structure to initialize
 618 *
 619 * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
 620 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
 621 *
 622 **/
 623static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 624{
 625	struct ixgbe_hw *hw = &adapter->hw;
 626	struct ixgbe_ring_feature *f;
 627	u16 rss_i;
 628
 629	/* set mask for 16 queue limit of RSS */
 630	f = &adapter->ring_feature[RING_F_RSS];
 631	rss_i = f->limit;
 632
 633	f->indices = rss_i;
 634
 635	if (hw->mac.type < ixgbe_mac_X550)
 636		f->mask = IXGBE_RSS_16Q_MASK;
 637	else
 638		f->mask = IXGBE_RSS_64Q_MASK;
 639
 640	/* disable ATR by default, it will be configured below */
 641	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 642
 643	/*
 644	 * Use Flow Director in addition to RSS to ensure the best
 645	 * distribution of flows across cores, even when an FDIR flow
 646	 * isn't matched.
 647	 */
 648	if (rss_i > 1 && adapter->atr_sample_rate) {
 649		f = &adapter->ring_feature[RING_F_FDIR];
 650
 651		rss_i = f->indices = f->limit;
 652
 653		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 654			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 655	}
 656
 657#ifdef IXGBE_FCOE
 658	/*
 659	 * FCoE can exist on the same rings as standard network traffic
 660	 * however it is preferred to avoid that if possible.  In order
 661	 * to get the best performance we allocate as many FCoE queues
 662	 * as we can and we place them at the end of the ring array to
 663	 * avoid sharing queues with standard RSS on systems with 24 or
 664	 * more CPUs.
 665	 */
 666	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 667		struct net_device *dev = adapter->netdev;
 668		u16 fcoe_i;
 669
 670		f = &adapter->ring_feature[RING_F_FCOE];
 671
 672		/* merge FCoE queues with RSS queues */
 673		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
 674		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
 675
 676		/* limit indices to rss_i if MSI-X is disabled */
 677		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 678			fcoe_i = rss_i;
 679
 680		/* attempt to reserve some queues for just FCoE */
 681		f->indices = min_t(u16, fcoe_i, f->limit);
 682		f->offset = fcoe_i - f->indices;
 683		rss_i = max_t(u16, fcoe_i, rss_i);
 684	}
 685
 686#endif /* IXGBE_FCOE */
 687	adapter->num_rx_queues = rss_i;
 688	adapter->num_tx_queues = rss_i;
 689	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
 690
 691	return true;
 692}
 693
 694/**
 695 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
 696 * @adapter: board private structure to initialize
 697 *
 698 * This is the top level queue allocation routine.  The order here is very
 699 * important, starting with the "most" number of features turned on at once,
 700 * and ending with the smallest set of features.  This way large combinations
 701 * can be allocated if they're turned on, and smaller combinations are the
 702 * fallthrough conditions.
 703 *
 704 **/
 705static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 706{
 707	/* Start with base case */
 708	adapter->num_rx_queues = 1;
 709	adapter->num_tx_queues = 1;
 710	adapter->num_xdp_queues = 0;
 711	adapter->num_rx_pools = 1;
 712	adapter->num_rx_queues_per_pool = 1;
 713
 714#ifdef CONFIG_IXGBE_DCB
 715	if (ixgbe_set_dcb_sriov_queues(adapter))
 716		return;
 717
 718	if (ixgbe_set_dcb_queues(adapter))
 719		return;
 720
 721#endif
 722	if (ixgbe_set_sriov_queues(adapter))
 723		return;
 724
 725	ixgbe_set_rss_queues(adapter);
 726}
 727
 728/**
 729 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
 730 * @adapter: board private structure
 731 *
 732 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
 733 * return a negative error code if unable to acquire MSI-X vectors for any
 734 * reason.
 735 */
 736static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
 737{
 738	struct ixgbe_hw *hw = &adapter->hw;
 739	int i, vectors, vector_threshold;
 740
 741	/* We start by asking for one vector per queue pair with XDP queues
 742	 * being stacked with TX queues.
 743	 */
 744	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
 745	vectors = max(vectors, adapter->num_xdp_queues);
 746
 747	/* It is easy to be greedy for MSI-X vectors. However, it really
 748	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
 749	 * be somewhat conservative and only ask for (roughly) the same number
 750	 * of vectors as there are CPUs.
 751	 */
 752	vectors = min_t(int, vectors, num_online_cpus());
 753
 754	/* Some vectors are necessary for non-queue interrupts */
 755	vectors += NON_Q_VECTORS;
 756
 757	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
 758	 * With features such as RSS and VMDq, we can easily surpass the
 759	 * number of Rx and Tx descriptor queues supported by our device.
 760	 * Thus, we cap the maximum in the rare cases where the CPU count also
 761	 * exceeds our vector limit
 762	 */
 763	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
 764
 765	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
 766	 * handler, and (2) an Other (Link Status Change, etc.) handler.
 767	 */
 768	vector_threshold = MIN_MSIX_COUNT;
 769
 770	adapter->msix_entries = kcalloc(vectors,
 771					sizeof(struct msix_entry),
 772					GFP_KERNEL);
 773	if (!adapter->msix_entries)
 774		return -ENOMEM;
 775
 776	for (i = 0; i < vectors; i++)
 777		adapter->msix_entries[i].entry = i;
 778
 779	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
 780					vector_threshold, vectors);
 781
 782	if (vectors < 0) {
 783		/* A negative count of allocated vectors indicates an error in
 784		 * acquiring within the specified range of MSI-X vectors
 785		 */
 786		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
 787			   vectors);
 788
 789		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 790		kfree(adapter->msix_entries);
 791		adapter->msix_entries = NULL;
 792
 793		return vectors;
 794	}
 795
 796	/* we successfully allocated some number of vectors within our
 797	 * requested range.
 798	 */
 799	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
 800
 801	/* Adjust for only the vectors we'll use, which is minimum
 802	 * of max_q_vectors, or the number of vectors we were allocated.
 803	 */
 804	vectors -= NON_Q_VECTORS;
 805	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
 806
 807	return 0;
 808}
 809
 810static void ixgbe_add_ring(struct ixgbe_ring *ring,
 811			   struct ixgbe_ring_container *head)
 812{
 813	ring->next = head->ring;
 814	head->ring = ring;
 815	head->count++;
 816	head->next_update = jiffies + 1;
 817}
 818
 819/**
 820 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
 821 * @adapter: board private structure to initialize
 822 * @v_count: q_vectors allocated on adapter, used for ring interleaving
 823 * @v_idx: index of vector in adapter struct
 824 * @txr_count: total number of Tx rings to allocate
 825 * @txr_idx: index of first Tx ring to allocate
 826 * @xdp_count: total number of XDP rings to allocate
 827 * @xdp_idx: index of first XDP ring to allocate
 828 * @rxr_count: total number of Rx rings to allocate
 829 * @rxr_idx: index of first Rx ring to allocate
 830 *
 831 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 832 **/
 833static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 834				int v_count, int v_idx,
 835				int txr_count, int txr_idx,
 836				int xdp_count, int xdp_idx,
 837				int rxr_count, int rxr_idx)
 838{
 839	int node = dev_to_node(&adapter->pdev->dev);
 840	struct ixgbe_q_vector *q_vector;
 841	struct ixgbe_ring *ring;
 842	int cpu = -1;
 843	int ring_count;
 844	u8 tcs = adapter->hw_tcs;
 845
 846	ring_count = txr_count + rxr_count + xdp_count;
 847
 848	/* customize cpu for Flow Director mapping */
 849	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
 850		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
 851		if (rss_i > 1 && adapter->atr_sample_rate) {
 852			cpu = cpumask_local_spread(v_idx, node);
 853			node = cpu_to_node(cpu);
 854		}
 855	}
 856
 857	/* allocate q_vector and rings */
 858	q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
 859				GFP_KERNEL, node);
 860	if (!q_vector)
 861		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
 862				   GFP_KERNEL);
 863	if (!q_vector)
 864		return -ENOMEM;
 865
 866	/* setup affinity mask and node */
 867	if (cpu != -1)
 868		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
 869	q_vector->numa_node = node;
 870
 871#ifdef CONFIG_IXGBE_DCA
 872	/* initialize CPU for DCA */
 873	q_vector->cpu = -1;
 874
 875#endif
 876	/* initialize NAPI */
 877	netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
 878
 879	/* tie q_vector and adapter together */
 880	adapter->q_vector[v_idx] = q_vector;
 881	q_vector->adapter = adapter;
 882	q_vector->v_idx = v_idx;
 883
 884	/* initialize work limits */
 885	q_vector->tx.work_limit = adapter->tx_work_limit;
 886
 887	/* Initialize setting for adaptive ITR */
 888	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
 889			   IXGBE_ITR_ADAPTIVE_LATENCY;
 890	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
 891			   IXGBE_ITR_ADAPTIVE_LATENCY;
 892
 893	/* intialize ITR */
 894	if (txr_count && !rxr_count) {
 895		/* tx only vector */
 896		if (adapter->tx_itr_setting == 1)
 897			q_vector->itr = IXGBE_12K_ITR;
 898		else
 899			q_vector->itr = adapter->tx_itr_setting;
 900	} else {
 901		/* rx or rx/tx vector */
 902		if (adapter->rx_itr_setting == 1)
 903			q_vector->itr = IXGBE_20K_ITR;
 904		else
 905			q_vector->itr = adapter->rx_itr_setting;
 906	}
 907
 908	/* initialize pointer to rings */
 909	ring = q_vector->ring;
 910
 911	while (txr_count) {
 912		/* assign generic ring traits */
 913		ring->dev = &adapter->pdev->dev;
 914		ring->netdev = adapter->netdev;
 915
 916		/* configure backlink on ring */
 917		ring->q_vector = q_vector;
 918
 919		/* update q_vector Tx values */
 920		ixgbe_add_ring(ring, &q_vector->tx);
 921
 922		/* apply Tx specific ring traits */
 923		ring->count = adapter->tx_ring_count;
 924		ring->queue_index = txr_idx;
 925
 926		/* assign ring to adapter */
 927		WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
 928
 929		/* update count and index */
 930		txr_count--;
 931		txr_idx += v_count;
 932
 933		/* push pointer to next ring */
 934		ring++;
 935	}
 936
 937	while (xdp_count) {
 938		/* assign generic ring traits */
 939		ring->dev = &adapter->pdev->dev;
 940		ring->netdev = adapter->netdev;
 941
 942		/* configure backlink on ring */
 943		ring->q_vector = q_vector;
 944
 945		/* update q_vector Tx values */
 946		ixgbe_add_ring(ring, &q_vector->tx);
 947
 948		/* apply Tx specific ring traits */
 949		ring->count = adapter->tx_ring_count;
 950		ring->queue_index = xdp_idx;
 951		set_ring_xdp(ring);
 952		spin_lock_init(&ring->tx_lock);
 953
 954		/* assign ring to adapter */
 955		WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
 956
 957		/* update count and index */
 958		xdp_count--;
 959		xdp_idx++;
 960
 961		/* push pointer to next ring */
 962		ring++;
 963	}
 964
 965	while (rxr_count) {
 966		/* assign generic ring traits */
 967		ring->dev = &adapter->pdev->dev;
 968		ring->netdev = adapter->netdev;
 969
 970		/* configure backlink on ring */
 971		ring->q_vector = q_vector;
 972
 973		/* update q_vector Rx values */
 974		ixgbe_add_ring(ring, &q_vector->rx);
 975
 976		/*
 977		 * 82599 errata, UDP frames with a 0 checksum
 978		 * can be marked as checksum errors.
 979		 */
 980		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 981			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
 982
 983#ifdef IXGBE_FCOE
 984		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
 985			struct ixgbe_ring_feature *f;
 986			f = &adapter->ring_feature[RING_F_FCOE];
 987			if ((rxr_idx >= f->offset) &&
 988			    (rxr_idx < f->offset + f->indices))
 989				set_bit(__IXGBE_RX_FCOE, &ring->state);
 990		}
 991
 992#endif /* IXGBE_FCOE */
 993		/* apply Rx specific ring traits */
 994		ring->count = adapter->rx_ring_count;
 995		ring->queue_index = rxr_idx;
 996
 997		/* assign ring to adapter */
 998		WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
 999
1000		/* update count and index */
1001		rxr_count--;
1002		rxr_idx += v_count;
1003
1004		/* push pointer to next ring */
1005		ring++;
1006	}
1007
1008	return 0;
1009}
1010
1011/**
1012 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1013 * @adapter: board private structure to initialize
1014 * @v_idx: Index of vector to be freed
1015 *
1016 * This function frees the memory allocated to the q_vector.  In addition if
1017 * NAPI is enabled it will delete any references to the NAPI struct prior
1018 * to freeing the q_vector.
1019 **/
1020static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1021{
1022	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1023	struct ixgbe_ring *ring;
1024
1025	ixgbe_for_each_ring(ring, q_vector->tx) {
1026		if (ring_is_xdp(ring))
1027			WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1028		else
1029			WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1030	}
1031
1032	ixgbe_for_each_ring(ring, q_vector->rx)
1033		WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1034
1035	adapter->q_vector[v_idx] = NULL;
1036	__netif_napi_del(&q_vector->napi);
1037
1038	/*
1039	 * after a call to __netif_napi_del() napi may still be used and
1040	 * ixgbe_get_stats64() might access the rings on this vector,
1041	 * we must wait a grace period before freeing it.
1042	 */
1043	kfree_rcu(q_vector, rcu);
1044}
1045
1046/**
1047 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1048 * @adapter: board private structure to initialize
1049 *
1050 * We allocate one q_vector per queue interrupt.  If allocation fails we
1051 * return -ENOMEM.
1052 **/
1053static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1054{
1055	int q_vectors = adapter->num_q_vectors;
1056	int rxr_remaining = adapter->num_rx_queues;
1057	int txr_remaining = adapter->num_tx_queues;
1058	int xdp_remaining = adapter->num_xdp_queues;
1059	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1060	int err, i;
1061
1062	/* only one q_vector if MSI-X is disabled. */
1063	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1064		q_vectors = 1;
1065
1066	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1067		for (; rxr_remaining; v_idx++) {
1068			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1069						   0, 0, 0, 0, 1, rxr_idx);
1070
1071			if (err)
1072				goto err_out;
1073
1074			/* update counts and index */
1075			rxr_remaining--;
1076			rxr_idx++;
1077		}
1078	}
1079
1080	for (; v_idx < q_vectors; v_idx++) {
1081		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1082		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1083		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1084
1085		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1086					   tqpv, txr_idx,
1087					   xqpv, xdp_idx,
1088					   rqpv, rxr_idx);
1089
1090		if (err)
1091			goto err_out;
1092
1093		/* update counts and index */
1094		rxr_remaining -= rqpv;
1095		txr_remaining -= tqpv;
1096		xdp_remaining -= xqpv;
1097		rxr_idx++;
1098		txr_idx++;
1099		xdp_idx += xqpv;
1100	}
1101
1102	for (i = 0; i < adapter->num_rx_queues; i++) {
1103		if (adapter->rx_ring[i])
1104			adapter->rx_ring[i]->ring_idx = i;
1105	}
1106
1107	for (i = 0; i < adapter->num_tx_queues; i++) {
1108		if (adapter->tx_ring[i])
1109			adapter->tx_ring[i]->ring_idx = i;
1110	}
1111
1112	for (i = 0; i < adapter->num_xdp_queues; i++) {
1113		if (adapter->xdp_ring[i])
1114			adapter->xdp_ring[i]->ring_idx = i;
1115	}
1116
1117	return 0;
1118
1119err_out:
1120	adapter->num_tx_queues = 0;
1121	adapter->num_xdp_queues = 0;
1122	adapter->num_rx_queues = 0;
1123	adapter->num_q_vectors = 0;
1124
1125	while (v_idx--)
1126		ixgbe_free_q_vector(adapter, v_idx);
1127
1128	return -ENOMEM;
1129}
1130
1131/**
1132 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1133 * @adapter: board private structure to initialize
1134 *
1135 * This function frees the memory allocated to the q_vectors.  In addition if
1136 * NAPI is enabled it will delete any references to the NAPI struct prior
1137 * to freeing the q_vector.
1138 **/
1139static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1140{
1141	int v_idx = adapter->num_q_vectors;
1142
1143	adapter->num_tx_queues = 0;
1144	adapter->num_xdp_queues = 0;
1145	adapter->num_rx_queues = 0;
1146	adapter->num_q_vectors = 0;
1147
1148	while (v_idx--)
1149		ixgbe_free_q_vector(adapter, v_idx);
1150}
1151
1152static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1153{
1154	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1155		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1156		pci_disable_msix(adapter->pdev);
1157		kfree(adapter->msix_entries);
1158		adapter->msix_entries = NULL;
1159	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1160		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1161		pci_disable_msi(adapter->pdev);
1162	}
1163}
1164
1165/**
1166 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1167 * @adapter: board private structure to initialize
1168 *
1169 * Attempt to configure the interrupts using the best available
1170 * capabilities of the hardware and the kernel.
1171 **/
1172static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1173{
1174	int err;
1175
1176	/* We will try to get MSI-X interrupts first */
1177	if (!ixgbe_acquire_msix_vectors(adapter))
1178		return;
1179
1180	/* At this point, we do not have MSI-X capabilities. We need to
1181	 * reconfigure or disable various features which require MSI-X
1182	 * capability.
1183	 */
1184
1185	/* Disable DCB unless we only have a single traffic class */
1186	if (adapter->hw_tcs > 1) {
1187		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1188		netdev_reset_tc(adapter->netdev);
1189
1190		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1191			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1192
1193		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1194		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1195		adapter->dcb_cfg.pfc_mode_enable = false;
1196	}
1197
1198	adapter->hw_tcs = 0;
1199	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1200	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1201
1202	/* Disable SR-IOV support */
1203	e_dev_warn("Disabling SR-IOV support\n");
1204	ixgbe_disable_sriov(adapter);
1205
1206	/* Disable RSS */
1207	e_dev_warn("Disabling RSS support\n");
1208	adapter->ring_feature[RING_F_RSS].limit = 1;
1209
1210	/* recalculate number of queues now that many features have been
1211	 * changed or disabled.
1212	 */
1213	ixgbe_set_num_queues(adapter);
1214	adapter->num_q_vectors = 1;
1215
1216	err = pci_enable_msi(adapter->pdev);
1217	if (err)
1218		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1219			   err);
1220	else
1221		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1222}
1223
1224/**
1225 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1226 * @adapter: board private structure to initialize
1227 *
1228 * We determine which interrupt scheme to use based on...
1229 * - Kernel support (MSI, MSI-X)
1230 *   - which can be user-defined (via MODULE_PARAM)
1231 * - Hardware queue count (num_*_queues)
1232 *   - defined by miscellaneous hardware support/features (RSS, etc.)
1233 **/
1234int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1235{
1236	int err;
1237
1238	/* Number of supported queues */
1239	ixgbe_set_num_queues(adapter);
1240
1241	/* Set interrupt mode */
1242	ixgbe_set_interrupt_capability(adapter);
1243
1244	err = ixgbe_alloc_q_vectors(adapter);
1245	if (err) {
1246		e_dev_err("Unable to allocate memory for queue vectors\n");
1247		goto err_alloc_q_vectors;
1248	}
1249
1250	ixgbe_cache_ring_register(adapter);
1251
1252	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1253		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1254		   adapter->num_rx_queues, adapter->num_tx_queues,
1255		   adapter->num_xdp_queues);
1256
1257	set_bit(__IXGBE_DOWN, &adapter->state);
1258
1259	return 0;
1260
1261err_alloc_q_vectors:
1262	ixgbe_reset_interrupt_capability(adapter);
1263	return err;
1264}
1265
1266/**
1267 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1268 * @adapter: board private structure to clear interrupt scheme on
1269 *
1270 * We go through and clear interrupt specific resources and reset the structure
1271 * to pre-load conditions
1272 **/
1273void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1274{
1275	adapter->num_tx_queues = 0;
1276	adapter->num_xdp_queues = 0;
1277	adapter->num_rx_queues = 0;
1278
1279	ixgbe_free_q_vectors(adapter);
1280	ixgbe_reset_interrupt_capability(adapter);
1281}
1282
1283void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1284		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1285{
1286	struct ixgbe_adv_tx_context_desc *context_desc;
1287	u16 i = tx_ring->next_to_use;
1288
1289	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1290
1291	i++;
1292	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1293
1294	/* set bits to identify this as an advanced context descriptor */
1295	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1296
1297	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1298	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
1299	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1300	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1301}
1302
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#include "ixgbe.h"
   5#include "ixgbe_sriov.h"
   6
   7#ifdef CONFIG_IXGBE_DCB
   8/**
   9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
  10 * @adapter: board private structure to initialize
  11 *
  12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
  13 * will also try to cache the proper offsets if RSS/FCoE are enabled along
  14 * with VMDq.
  15 *
  16 **/
  17static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
  18{
  19#ifdef IXGBE_FCOE
  20	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
  21#endif /* IXGBE_FCOE */
  22	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  23	int i;
  24	u16 reg_idx, pool;
  25	u8 tcs = adapter->hw_tcs;
  26
  27	/* verify we have DCB queueing enabled before proceeding */
  28	if (tcs <= 1)
  29		return false;
  30
  31	/* verify we have VMDq enabled before proceeding */
  32	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  33		return false;
  34
  35	/* start at VMDq register offset for SR-IOV enabled setups */
  36	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  37	for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
  38		/* If we are greater than indices move to next pool */
  39		if ((reg_idx & ~vmdq->mask) >= tcs) {
  40			pool++;
  41			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  42		}
  43		adapter->rx_ring[i]->reg_idx = reg_idx;
  44		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
  45	}
  46
  47	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
  48	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
  49		/* If we are greater than indices move to next pool */
  50		if ((reg_idx & ~vmdq->mask) >= tcs)
  51			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
  52		adapter->tx_ring[i]->reg_idx = reg_idx;
  53	}
  54
  55#ifdef IXGBE_FCOE
  56	/* nothing to do if FCoE is disabled */
  57	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
  58		return true;
  59
  60	/* The work is already done if the FCoE ring is shared */
  61	if (fcoe->offset < tcs)
  62		return true;
  63
  64	/* The FCoE rings exist separately, we need to move their reg_idx */
  65	if (fcoe->indices) {
  66		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  67		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
  68
  69		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  70		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
  71			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  72			adapter->rx_ring[i]->reg_idx = reg_idx;
  73			adapter->rx_ring[i]->netdev = adapter->netdev;
  74			reg_idx++;
  75		}
  76
  77		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
  78		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
  79			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
  80			adapter->tx_ring[i]->reg_idx = reg_idx;
  81			reg_idx++;
  82		}
  83	}
  84
  85#endif /* IXGBE_FCOE */
  86	return true;
  87}
  88
  89/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
  90static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
  91				    unsigned int *tx, unsigned int *rx)
  92{
  93	struct ixgbe_hw *hw = &adapter->hw;
  94	u8 num_tcs = adapter->hw_tcs;
  95
  96	*tx = 0;
  97	*rx = 0;
  98
  99	switch (hw->mac.type) {
 100	case ixgbe_mac_82598EB:
 101		/* TxQs/TC: 4	RxQs/TC: 8 */
 102		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
 103		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
 104		break;
 105	case ixgbe_mac_82599EB:
 106	case ixgbe_mac_X540:
 107	case ixgbe_mac_X550:
 108	case ixgbe_mac_X550EM_x:
 109	case ixgbe_mac_x550em_a:
 110		if (num_tcs > 4) {
 111			/*
 112			 * TCs    : TC0/1 TC2/3 TC4-7
 113			 * TxQs/TC:    32    16     8
 114			 * RxQs/TC:    16    16    16
 115			 */
 116			*rx = tc << 4;
 117			if (tc < 3)
 118				*tx = tc << 5;		/*   0,  32,  64 */
 119			else if (tc < 5)
 120				*tx = (tc + 2) << 4;	/*  80,  96 */
 121			else
 122				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
 123		} else {
 124			/*
 125			 * TCs    : TC0 TC1 TC2/3
 126			 * TxQs/TC:  64  32    16
 127			 * RxQs/TC:  32  32    32
 128			 */
 129			*rx = tc << 5;
 130			if (tc < 2)
 131				*tx = tc << 6;		/*  0,  64 */
 132			else
 133				*tx = (tc + 4) << 4;	/* 96, 112 */
 134		}
 135		break;
 136	default:
 137		break;
 138	}
 139}
 140
 141/**
 142 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
 143 * @adapter: board private structure to initialize
 144 *
 145 * Cache the descriptor ring offsets for DCB to the assigned rings.
 146 *
 147 **/
 148static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 149{
 150	u8 num_tcs = adapter->hw_tcs;
 151	unsigned int tx_idx, rx_idx;
 152	int tc, offset, rss_i, i;
 153
 154	/* verify we have DCB queueing enabled before proceeding */
 155	if (num_tcs <= 1)
 156		return false;
 157
 158	rss_i = adapter->ring_feature[RING_F_RSS].indices;
 159
 160	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
 161		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
 162		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
 163			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
 164			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
 165			adapter->rx_ring[offset + i]->netdev = adapter->netdev;
 166			adapter->tx_ring[offset + i]->dcb_tc = tc;
 167			adapter->rx_ring[offset + i]->dcb_tc = tc;
 168		}
 169	}
 170
 171	return true;
 172}
 173
 174#endif
 175/**
 176 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
 177 * @adapter: board private structure to initialize
 178 *
 179 * SR-IOV doesn't use any descriptor rings but changes the default if
 180 * no other mapping is used.
 181 *
 182 */
 183static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
 184{
 185#ifdef IXGBE_FCOE
 186	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
 187#endif /* IXGBE_FCOE */
 188	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 189	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
 190	u16 reg_idx, pool;
 191	int i;
 192
 193	/* only proceed if VMDq is enabled */
 194	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
 195		return false;
 196
 197	/* start at VMDq register offset for SR-IOV enabled setups */
 198	pool = 0;
 199	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 200	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
 201#ifdef IXGBE_FCOE
 202		/* Allow first FCoE queue to be mapped as RSS */
 203		if (fcoe->offset && (i > fcoe->offset))
 204			break;
 205#endif
 206		/* If we are greater than indices move to next pool */
 207		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
 208			pool++;
 209			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
 210		}
 211		adapter->rx_ring[i]->reg_idx = reg_idx;
 212		adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
 213	}
 214
 215#ifdef IXGBE_FCOE
 216	/* FCoE uses a linear block of queues so just assigning 1:1 */
 217	for (; i < adapter->num_rx_queues; i++, reg_idx++) {
 218		adapter->rx_ring[i]->reg_idx = reg_idx;
 219		adapter->rx_ring[i]->netdev = adapter->netdev;
 220	}
 221
 222#endif
 223	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
 224	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
 225#ifdef IXGBE_FCOE
 226		/* Allow first FCoE queue to be mapped as RSS */
 227		if (fcoe->offset && (i > fcoe->offset))
 228			break;
 229#endif
 230		/* If we are greater than indices move to next pool */
 231		if ((reg_idx & rss->mask) >= rss->indices)
 232			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
 233		adapter->tx_ring[i]->reg_idx = reg_idx;
 234	}
 235
 236#ifdef IXGBE_FCOE
 237	/* FCoE uses a linear block of queues so just assigning 1:1 */
 238	for (; i < adapter->num_tx_queues; i++, reg_idx++)
 239		adapter->tx_ring[i]->reg_idx = reg_idx;
 240
 241#endif
 242
 243	return true;
 244}
 245
 246/**
 247 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
 248 * @adapter: board private structure to initialize
 249 *
 250 * Cache the descriptor ring offsets for RSS to the assigned rings.
 251 *
 252 **/
 253static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 254{
 255	int i, reg_idx;
 256
 257	for (i = 0; i < adapter->num_rx_queues; i++) {
 258		adapter->rx_ring[i]->reg_idx = i;
 259		adapter->rx_ring[i]->netdev = adapter->netdev;
 260	}
 261	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
 262		adapter->tx_ring[i]->reg_idx = reg_idx;
 263	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
 264		adapter->xdp_ring[i]->reg_idx = reg_idx;
 265
 266	return true;
 267}
 268
 269/**
 270 * ixgbe_cache_ring_register - Descriptor ring to register mapping
 271 * @adapter: board private structure to initialize
 272 *
 273 * Once we know the feature-set enabled for the device, we'll cache
 274 * the register offset the descriptor ring is assigned to.
 275 *
 276 * Note, the order the various feature calls is important.  It must start with
 277 * the "most" features enabled at the same time, then trickle down to the
 278 * least amount of features turned on at once.
 279 **/
 280static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 281{
 282	/* start with default case */
 283	adapter->rx_ring[0]->reg_idx = 0;
 284	adapter->tx_ring[0]->reg_idx = 0;
 285
 286#ifdef CONFIG_IXGBE_DCB
 287	if (ixgbe_cache_ring_dcb_sriov(adapter))
 288		return;
 289
 290	if (ixgbe_cache_ring_dcb(adapter))
 291		return;
 292
 293#endif
 294	if (ixgbe_cache_ring_sriov(adapter))
 295		return;
 296
 297	ixgbe_cache_ring_rss(adapter);
 298}
 299
 300static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
 301{
 302	int queues;
 303
 304	queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
 305	return adapter->xdp_prog ? queues : 0;
 306}
 307
 308#define IXGBE_RSS_64Q_MASK	0x3F
 309#define IXGBE_RSS_16Q_MASK	0xF
 310#define IXGBE_RSS_8Q_MASK	0x7
 311#define IXGBE_RSS_4Q_MASK	0x3
 312#define IXGBE_RSS_2Q_MASK	0x1
 313#define IXGBE_RSS_DISABLED_MASK	0x0
 314
 315#ifdef CONFIG_IXGBE_DCB
 316/**
 317 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
 318 * @adapter: board private structure to initialize
 319 *
 320 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 321 * and VM pools where appropriate.  Also assign queues based on DCB
 322 * priorities and map accordingly..
 323 *
 324 **/
 325static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
 326{
 327	int i;
 328	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
 329	u16 vmdq_m = 0;
 330#ifdef IXGBE_FCOE
 331	u16 fcoe_i = 0;
 332#endif
 333	u8 tcs = adapter->hw_tcs;
 334
 335	/* verify we have DCB queueing enabled before proceeding */
 336	if (tcs <= 1)
 337		return false;
 338
 339	/* verify we have VMDq enabled before proceeding */
 340	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 341		return false;
 342
 343	/* limit VMDq instances on the PF by number of Tx queues */
 344	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
 345
 346	/* Add starting offset to total pool count */
 347	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 348
 349	/* 16 pools w/ 8 TC per pool */
 350	if (tcs > 4) {
 351		vmdq_i = min_t(u16, vmdq_i, 16);
 352		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
 353	/* 32 pools w/ 4 TC per pool */
 354	} else {
 355		vmdq_i = min_t(u16, vmdq_i, 32);
 356		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
 357	}
 358
 359#ifdef IXGBE_FCOE
 360	/* queues in the remaining pools are available for FCoE */
 361	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
 362
 363#endif
 364	/* remove the starting offset from the pool count */
 365	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
 366
 367	/* save features for later use */
 368	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
 369	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
 370
 371	/*
 372	 * We do not support DCB, VMDq, and RSS all simultaneously
 373	 * so we will disable RSS since it is the lowest priority
 374	 */
 375	adapter->ring_feature[RING_F_RSS].indices = 1;
 376	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
 377
 378	/* disable ATR as it is not supported when VMDq is enabled */
 379	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 380
 381	adapter->num_rx_pools = vmdq_i;
 382	adapter->num_rx_queues_per_pool = tcs;
 383
 384	adapter->num_tx_queues = vmdq_i * tcs;
 385	adapter->num_xdp_queues = 0;
 386	adapter->num_rx_queues = vmdq_i * tcs;
 387
 388#ifdef IXGBE_FCOE
 389	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 390		struct ixgbe_ring_feature *fcoe;
 391
 392		fcoe = &adapter->ring_feature[RING_F_FCOE];
 393
 394		/* limit ourselves based on feature limits */
 395		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 396
 397		if (fcoe_i) {
 398			/* alloc queues for FCoE separately */
 399			fcoe->indices = fcoe_i;
 400			fcoe->offset = vmdq_i * tcs;
 401
 402			/* add queues to adapter */
 403			adapter->num_tx_queues += fcoe_i;
 404			adapter->num_rx_queues += fcoe_i;
 405		} else if (tcs > 1) {
 406			/* use queue belonging to FcoE TC */
 407			fcoe->indices = 1;
 408			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
 409		} else {
 410			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
 411
 412			fcoe->indices = 0;
 413			fcoe->offset = 0;
 414		}
 415	}
 416
 417#endif /* IXGBE_FCOE */
 418	/* configure TC to queue mapping */
 419	for (i = 0; i < tcs; i++)
 420		netdev_set_tc_queue(adapter->netdev, i, 1, i);
 421
 422	return true;
 423}
 424
 425static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
 426{
 427	struct net_device *dev = adapter->netdev;
 428	struct ixgbe_ring_feature *f;
 429	int rss_i, rss_m, i;
 430	int tcs;
 431
 432	/* Map queue offset and counts onto allocated tx queues */
 433	tcs = adapter->hw_tcs;
 434
 435	/* verify we have DCB queueing enabled before proceeding */
 436	if (tcs <= 1)
 437		return false;
 438
 439	/* determine the upper limit for our current DCB mode */
 440	rss_i = dev->num_tx_queues / tcs;
 441	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
 442		/* 8 TC w/ 4 queues per TC */
 443		rss_i = min_t(u16, rss_i, 4);
 444		rss_m = IXGBE_RSS_4Q_MASK;
 445	} else if (tcs > 4) {
 446		/* 8 TC w/ 8 queues per TC */
 447		rss_i = min_t(u16, rss_i, 8);
 448		rss_m = IXGBE_RSS_8Q_MASK;
 449	} else {
 450		/* 4 TC w/ 16 queues per TC */
 451		rss_i = min_t(u16, rss_i, 16);
 452		rss_m = IXGBE_RSS_16Q_MASK;
 453	}
 454
 455	/* set RSS mask and indices */
 456	f = &adapter->ring_feature[RING_F_RSS];
 457	rss_i = min_t(int, rss_i, f->limit);
 458	f->indices = rss_i;
 459	f->mask = rss_m;
 460
 461	/* disable ATR as it is not supported when multiple TCs are enabled */
 462	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 463
 464#ifdef IXGBE_FCOE
 465	/* FCoE enabled queues require special configuration indexed
 466	 * by feature specific indices and offset. Here we map FCoE
 467	 * indices onto the DCB queue pairs allowing FCoE to own
 468	 * configuration later.
 469	 */
 470	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 471		u8 tc = ixgbe_fcoe_get_tc(adapter);
 472
 473		f = &adapter->ring_feature[RING_F_FCOE];
 474		f->indices = min_t(u16, rss_i, f->limit);
 475		f->offset = rss_i * tc;
 476	}
 477
 478#endif /* IXGBE_FCOE */
 479	for (i = 0; i < tcs; i++)
 480		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
 481
 482	adapter->num_tx_queues = rss_i * tcs;
 483	adapter->num_xdp_queues = 0;
 484	adapter->num_rx_queues = rss_i * tcs;
 485
 486	return true;
 487}
 488
 489#endif
 490/**
 491 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
 492 * @adapter: board private structure to initialize
 493 *
 494 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
 495 * and VM pools where appropriate.  If RSS is available, then also try and
 496 * enable RSS and map accordingly.
 497 *
 498 **/
 499static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 500{
 501	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
 502	u16 vmdq_m = 0;
 503	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
 504	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
 505#ifdef IXGBE_FCOE
 506	u16 fcoe_i = 0;
 507#endif
 508
 509	/* only proceed if SR-IOV is enabled */
 510	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 511		return false;
 512
 513	/* limit l2fwd RSS based on total Tx queue limit */
 514	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
 515
 516	/* Add starting offset to total pool count */
 517	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 518
 519	/* double check we are limited to maximum pools */
 520	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 521
 522	/* 64 pool mode with 2 queues per pool */
 523	if (vmdq_i > 32) {
 524		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
 525		rss_m = IXGBE_RSS_2Q_MASK;
 526		rss_i = min_t(u16, rss_i, 2);
 527	/* 32 pool mode with up to 4 queues per pool */
 528	} else {
 529		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
 530		rss_m = IXGBE_RSS_4Q_MASK;
 531		/* We can support 4, 2, or 1 queues */
 532		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
 533	}
 534
 535#ifdef IXGBE_FCOE
 536	/* queues in the remaining pools are available for FCoE */
 537	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
 538
 539#endif
 540	/* remove the starting offset from the pool count */
 541	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
 542
 543	/* save features for later use */
 544	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
 545	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
 546
 547	/* limit RSS based on user input and save for later use */
 548	adapter->ring_feature[RING_F_RSS].indices = rss_i;
 549	adapter->ring_feature[RING_F_RSS].mask = rss_m;
 550
 551	adapter->num_rx_pools = vmdq_i;
 552	adapter->num_rx_queues_per_pool = rss_i;
 553
 554	adapter->num_rx_queues = vmdq_i * rss_i;
 555	adapter->num_tx_queues = vmdq_i * rss_i;
 556	adapter->num_xdp_queues = 0;
 557
 558	/* disable ATR as it is not supported when VMDq is enabled */
 559	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 560
 561#ifdef IXGBE_FCOE
 562	/*
 563	 * FCoE can use rings from adjacent buffers to allow RSS
 564	 * like behavior.  To account for this we need to add the
 565	 * FCoE indices to the total ring count.
 566	 */
 567	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 568		struct ixgbe_ring_feature *fcoe;
 569
 570		fcoe = &adapter->ring_feature[RING_F_FCOE];
 571
 572		/* limit ourselves based on feature limits */
 573		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
 574
 575		if (vmdq_i > 1 && fcoe_i) {
 576			/* alloc queues for FCoE separately */
 577			fcoe->indices = fcoe_i;
 578			fcoe->offset = vmdq_i * rss_i;
 579		} else {
 580			/* merge FCoE queues with RSS queues */
 581			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
 582
 583			/* limit indices to rss_i if MSI-X is disabled */
 584			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 585				fcoe_i = rss_i;
 586
 587			/* attempt to reserve some queues for just FCoE */
 588			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
 589			fcoe->offset = fcoe_i - fcoe->indices;
 590
 591			fcoe_i -= rss_i;
 592		}
 593
 594		/* add queues to adapter */
 595		adapter->num_tx_queues += fcoe_i;
 596		adapter->num_rx_queues += fcoe_i;
 597	}
 598
 599#endif
 600	/* To support macvlan offload we have to use num_tc to
 601	 * restrict the queues that can be used by the device.
 602	 * By doing this we can avoid reporting a false number of
 603	 * queues.
 604	 */
 605	if (vmdq_i > 1)
 606		netdev_set_num_tc(adapter->netdev, 1);
 607
 608	/* populate TC0 for use by pool 0 */
 609	netdev_set_tc_queue(adapter->netdev, 0,
 610			    adapter->num_rx_queues_per_pool, 0);
 611
 612	return true;
 613}
 614
 615/**
 616 * ixgbe_set_rss_queues - Allocate queues for RSS
 617 * @adapter: board private structure to initialize
 618 *
 619 * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
 620 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
 621 *
 622 **/
 623static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 624{
 625	struct ixgbe_hw *hw = &adapter->hw;
 626	struct ixgbe_ring_feature *f;
 627	u16 rss_i;
 628
 629	/* set mask for 16 queue limit of RSS */
 630	f = &adapter->ring_feature[RING_F_RSS];
 631	rss_i = f->limit;
 632
 633	f->indices = rss_i;
 634
 635	if (hw->mac.type < ixgbe_mac_X550)
 636		f->mask = IXGBE_RSS_16Q_MASK;
 637	else
 638		f->mask = IXGBE_RSS_64Q_MASK;
 639
 640	/* disable ATR by default, it will be configured below */
 641	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 642
 643	/*
 644	 * Use Flow Director in addition to RSS to ensure the best
 645	 * distribution of flows across cores, even when an FDIR flow
 646	 * isn't matched.
 647	 */
 648	if (rss_i > 1 && adapter->atr_sample_rate) {
 649		f = &adapter->ring_feature[RING_F_FDIR];
 650
 651		rss_i = f->indices = f->limit;
 652
 653		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
 654			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
 655	}
 656
 657#ifdef IXGBE_FCOE
 658	/*
 659	 * FCoE can exist on the same rings as standard network traffic
 660	 * however it is preferred to avoid that if possible.  In order
 661	 * to get the best performance we allocate as many FCoE queues
 662	 * as we can and we place them at the end of the ring array to
 663	 * avoid sharing queues with standard RSS on systems with 24 or
 664	 * more CPUs.
 665	 */
 666	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 667		struct net_device *dev = adapter->netdev;
 668		u16 fcoe_i;
 669
 670		f = &adapter->ring_feature[RING_F_FCOE];
 671
 672		/* merge FCoE queues with RSS queues */
 673		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
 674		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
 675
 676		/* limit indices to rss_i if MSI-X is disabled */
 677		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 678			fcoe_i = rss_i;
 679
 680		/* attempt to reserve some queues for just FCoE */
 681		f->indices = min_t(u16, fcoe_i, f->limit);
 682		f->offset = fcoe_i - f->indices;
 683		rss_i = max_t(u16, fcoe_i, rss_i);
 684	}
 685
 686#endif /* IXGBE_FCOE */
 687	adapter->num_rx_queues = rss_i;
 688	adapter->num_tx_queues = rss_i;
 689	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
 690
 691	return true;
 692}
 693
 694/**
 695 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
 696 * @adapter: board private structure to initialize
 697 *
 698 * This is the top level queue allocation routine.  The order here is very
 699 * important, starting with the "most" number of features turned on at once,
 700 * and ending with the smallest set of features.  This way large combinations
 701 * can be allocated if they're turned on, and smaller combinations are the
 702 * fallthrough conditions.
 703 *
 704 **/
 705static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 706{
 707	/* Start with base case */
 708	adapter->num_rx_queues = 1;
 709	adapter->num_tx_queues = 1;
 710	adapter->num_xdp_queues = 0;
 711	adapter->num_rx_pools = 1;
 712	adapter->num_rx_queues_per_pool = 1;
 713
 714#ifdef CONFIG_IXGBE_DCB
 715	if (ixgbe_set_dcb_sriov_queues(adapter))
 716		return;
 717
 718	if (ixgbe_set_dcb_queues(adapter))
 719		return;
 720
 721#endif
 722	if (ixgbe_set_sriov_queues(adapter))
 723		return;
 724
 725	ixgbe_set_rss_queues(adapter);
 726}
 727
 728/**
 729 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
 730 * @adapter: board private structure
 731 *
 732 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
 733 * return a negative error code if unable to acquire MSI-X vectors for any
 734 * reason.
 735 */
 736static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
 737{
 738	struct ixgbe_hw *hw = &adapter->hw;
 739	int i, vectors, vector_threshold;
 740
 741	/* We start by asking for one vector per queue pair with XDP queues
 742	 * being stacked with TX queues.
 743	 */
 744	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
 745	vectors = max(vectors, adapter->num_xdp_queues);
 746
 747	/* It is easy to be greedy for MSI-X vectors. However, it really
 748	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
 749	 * be somewhat conservative and only ask for (roughly) the same number
 750	 * of vectors as there are CPUs.
 751	 */
 752	vectors = min_t(int, vectors, num_online_cpus());
 753
 754	/* Some vectors are necessary for non-queue interrupts */
 755	vectors += NON_Q_VECTORS;
 756
 757	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
 758	 * With features such as RSS and VMDq, we can easily surpass the
 759	 * number of Rx and Tx descriptor queues supported by our device.
 760	 * Thus, we cap the maximum in the rare cases where the CPU count also
 761	 * exceeds our vector limit
 762	 */
 763	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
 764
 765	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
 766	 * handler, and (2) an Other (Link Status Change, etc.) handler.
 767	 */
 768	vector_threshold = MIN_MSIX_COUNT;
 769
 770	adapter->msix_entries = kcalloc(vectors,
 771					sizeof(struct msix_entry),
 772					GFP_KERNEL);
 773	if (!adapter->msix_entries)
 774		return -ENOMEM;
 775
 776	for (i = 0; i < vectors; i++)
 777		adapter->msix_entries[i].entry = i;
 778
 779	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
 780					vector_threshold, vectors);
 781
 782	if (vectors < 0) {
 783		/* A negative count of allocated vectors indicates an error in
 784		 * acquiring within the specified range of MSI-X vectors
 785		 */
 786		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
 787			   vectors);
 788
 789		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
 790		kfree(adapter->msix_entries);
 791		adapter->msix_entries = NULL;
 792
 793		return vectors;
 794	}
 795
 796	/* we successfully allocated some number of vectors within our
 797	 * requested range.
 798	 */
 799	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
 800
 801	/* Adjust for only the vectors we'll use, which is minimum
 802	 * of max_q_vectors, or the number of vectors we were allocated.
 803	 */
 804	vectors -= NON_Q_VECTORS;
 805	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
 806
 807	return 0;
 808}
 809
 810static void ixgbe_add_ring(struct ixgbe_ring *ring,
 811			   struct ixgbe_ring_container *head)
 812{
 813	ring->next = head->ring;
 814	head->ring = ring;
 815	head->count++;
 816	head->next_update = jiffies + 1;
 817}
 818
 819/**
 820 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
 821 * @adapter: board private structure to initialize
 822 * @v_count: q_vectors allocated on adapter, used for ring interleaving
 823 * @v_idx: index of vector in adapter struct
 824 * @txr_count: total number of Tx rings to allocate
 825 * @txr_idx: index of first Tx ring to allocate
 826 * @xdp_count: total number of XDP rings to allocate
 827 * @xdp_idx: index of first XDP ring to allocate
 828 * @rxr_count: total number of Rx rings to allocate
 829 * @rxr_idx: index of first Rx ring to allocate
 830 *
 831 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
 832 **/
 833static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
 834				int v_count, int v_idx,
 835				int txr_count, int txr_idx,
 836				int xdp_count, int xdp_idx,
 837				int rxr_count, int rxr_idx)
 838{
 839	int node = dev_to_node(&adapter->pdev->dev);
 840	struct ixgbe_q_vector *q_vector;
 841	struct ixgbe_ring *ring;
 842	int cpu = -1;
 843	int ring_count;
 844	u8 tcs = adapter->hw_tcs;
 845
 846	ring_count = txr_count + rxr_count + xdp_count;
 847
 848	/* customize cpu for Flow Director mapping */
 849	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
 850		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
 851		if (rss_i > 1 && adapter->atr_sample_rate) {
 852			cpu = cpumask_local_spread(v_idx, node);
 853			node = cpu_to_node(cpu);
 854		}
 855	}
 856
 857	/* allocate q_vector and rings */
 858	q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
 859				GFP_KERNEL, node);
 860	if (!q_vector)
 861		q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
 862				   GFP_KERNEL);
 863	if (!q_vector)
 864		return -ENOMEM;
 865
 866	/* setup affinity mask and node */
 867	if (cpu != -1)
 868		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
 869	q_vector->numa_node = node;
 870
 871#ifdef CONFIG_IXGBE_DCA
 872	/* initialize CPU for DCA */
 873	q_vector->cpu = -1;
 874
 875#endif
 876	/* initialize NAPI */
 877	netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
 878
 879	/* tie q_vector and adapter together */
 880	adapter->q_vector[v_idx] = q_vector;
 881	q_vector->adapter = adapter;
 882	q_vector->v_idx = v_idx;
 883
 884	/* initialize work limits */
 885	q_vector->tx.work_limit = adapter->tx_work_limit;
 886
 887	/* Initialize setting for adaptive ITR */
 888	q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
 889			   IXGBE_ITR_ADAPTIVE_LATENCY;
 890	q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
 891			   IXGBE_ITR_ADAPTIVE_LATENCY;
 892
 893	/* intialize ITR */
 894	if (txr_count && !rxr_count) {
 895		/* tx only vector */
 896		if (adapter->tx_itr_setting == 1)
 897			q_vector->itr = IXGBE_12K_ITR;
 898		else
 899			q_vector->itr = adapter->tx_itr_setting;
 900	} else {
 901		/* rx or rx/tx vector */
 902		if (adapter->rx_itr_setting == 1)
 903			q_vector->itr = IXGBE_20K_ITR;
 904		else
 905			q_vector->itr = adapter->rx_itr_setting;
 906	}
 907
 908	/* initialize pointer to rings */
 909	ring = q_vector->ring;
 910
 911	while (txr_count) {
 912		/* assign generic ring traits */
 913		ring->dev = &adapter->pdev->dev;
 914		ring->netdev = adapter->netdev;
 915
 916		/* configure backlink on ring */
 917		ring->q_vector = q_vector;
 918
 919		/* update q_vector Tx values */
 920		ixgbe_add_ring(ring, &q_vector->tx);
 921
 922		/* apply Tx specific ring traits */
 923		ring->count = adapter->tx_ring_count;
 924		ring->queue_index = txr_idx;
 925
 926		/* assign ring to adapter */
 927		WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
 928
 929		/* update count and index */
 930		txr_count--;
 931		txr_idx += v_count;
 932
 933		/* push pointer to next ring */
 934		ring++;
 935	}
 936
 937	while (xdp_count) {
 938		/* assign generic ring traits */
 939		ring->dev = &adapter->pdev->dev;
 940		ring->netdev = adapter->netdev;
 941
 942		/* configure backlink on ring */
 943		ring->q_vector = q_vector;
 944
 945		/* update q_vector Tx values */
 946		ixgbe_add_ring(ring, &q_vector->tx);
 947
 948		/* apply Tx specific ring traits */
 949		ring->count = adapter->tx_ring_count;
 950		ring->queue_index = xdp_idx;
 951		set_ring_xdp(ring);
 952		spin_lock_init(&ring->tx_lock);
 953
 954		/* assign ring to adapter */
 955		WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
 956
 957		/* update count and index */
 958		xdp_count--;
 959		xdp_idx++;
 960
 961		/* push pointer to next ring */
 962		ring++;
 963	}
 964
 965	while (rxr_count) {
 966		/* assign generic ring traits */
 967		ring->dev = &adapter->pdev->dev;
 968		ring->netdev = adapter->netdev;
 969
 970		/* configure backlink on ring */
 971		ring->q_vector = q_vector;
 972
 973		/* update q_vector Rx values */
 974		ixgbe_add_ring(ring, &q_vector->rx);
 975
 976		/*
 977		 * 82599 errata, UDP frames with a 0 checksum
 978		 * can be marked as checksum errors.
 979		 */
 980		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
 981			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
 982
 983#ifdef IXGBE_FCOE
 984		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
 985			struct ixgbe_ring_feature *f;
 986			f = &adapter->ring_feature[RING_F_FCOE];
 987			if ((rxr_idx >= f->offset) &&
 988			    (rxr_idx < f->offset + f->indices))
 989				set_bit(__IXGBE_RX_FCOE, &ring->state);
 990		}
 991
 992#endif /* IXGBE_FCOE */
 993		/* apply Rx specific ring traits */
 994		ring->count = adapter->rx_ring_count;
 995		ring->queue_index = rxr_idx;
 996
 997		/* assign ring to adapter */
 998		WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
 999
1000		/* update count and index */
1001		rxr_count--;
1002		rxr_idx += v_count;
1003
1004		/* push pointer to next ring */
1005		ring++;
1006	}
1007
1008	return 0;
1009}
1010
1011/**
1012 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1013 * @adapter: board private structure to initialize
1014 * @v_idx: Index of vector to be freed
1015 *
1016 * This function frees the memory allocated to the q_vector.  In addition if
1017 * NAPI is enabled it will delete any references to the NAPI struct prior
1018 * to freeing the q_vector.
1019 **/
1020static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1021{
1022	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1023	struct ixgbe_ring *ring;
1024
1025	ixgbe_for_each_ring(ring, q_vector->tx) {
1026		if (ring_is_xdp(ring))
1027			WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1028		else
1029			WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1030	}
1031
1032	ixgbe_for_each_ring(ring, q_vector->rx)
1033		WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1034
1035	adapter->q_vector[v_idx] = NULL;
1036	__netif_napi_del(&q_vector->napi);
1037
1038	/*
1039	 * after a call to __netif_napi_del() napi may still be used and
1040	 * ixgbe_get_stats64() might access the rings on this vector,
1041	 * we must wait a grace period before freeing it.
1042	 */
1043	kfree_rcu(q_vector, rcu);
1044}
1045
1046/**
1047 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1048 * @adapter: board private structure to initialize
1049 *
1050 * We allocate one q_vector per queue interrupt.  If allocation fails we
1051 * return -ENOMEM.
1052 **/
1053static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1054{
1055	int q_vectors = adapter->num_q_vectors;
1056	int rxr_remaining = adapter->num_rx_queues;
1057	int txr_remaining = adapter->num_tx_queues;
1058	int xdp_remaining = adapter->num_xdp_queues;
1059	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1060	int err, i;
1061
1062	/* only one q_vector if MSI-X is disabled. */
1063	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1064		q_vectors = 1;
1065
1066	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1067		for (; rxr_remaining; v_idx++) {
1068			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1069						   0, 0, 0, 0, 1, rxr_idx);
1070
1071			if (err)
1072				goto err_out;
1073
1074			/* update counts and index */
1075			rxr_remaining--;
1076			rxr_idx++;
1077		}
1078	}
1079
1080	for (; v_idx < q_vectors; v_idx++) {
1081		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1082		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1083		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1084
1085		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1086					   tqpv, txr_idx,
1087					   xqpv, xdp_idx,
1088					   rqpv, rxr_idx);
1089
1090		if (err)
1091			goto err_out;
1092
1093		/* update counts and index */
1094		rxr_remaining -= rqpv;
1095		txr_remaining -= tqpv;
1096		xdp_remaining -= xqpv;
1097		rxr_idx++;
1098		txr_idx++;
1099		xdp_idx += xqpv;
1100	}
1101
1102	for (i = 0; i < adapter->num_rx_queues; i++) {
1103		if (adapter->rx_ring[i])
1104			adapter->rx_ring[i]->ring_idx = i;
1105	}
1106
1107	for (i = 0; i < adapter->num_tx_queues; i++) {
1108		if (adapter->tx_ring[i])
1109			adapter->tx_ring[i]->ring_idx = i;
1110	}
1111
1112	for (i = 0; i < adapter->num_xdp_queues; i++) {
1113		if (adapter->xdp_ring[i])
1114			adapter->xdp_ring[i]->ring_idx = i;
1115	}
1116
1117	return 0;
1118
1119err_out:
1120	adapter->num_tx_queues = 0;
1121	adapter->num_xdp_queues = 0;
1122	adapter->num_rx_queues = 0;
1123	adapter->num_q_vectors = 0;
1124
1125	while (v_idx--)
1126		ixgbe_free_q_vector(adapter, v_idx);
1127
1128	return -ENOMEM;
1129}
1130
1131/**
1132 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1133 * @adapter: board private structure to initialize
1134 *
1135 * This function frees the memory allocated to the q_vectors.  In addition if
1136 * NAPI is enabled it will delete any references to the NAPI struct prior
1137 * to freeing the q_vector.
1138 **/
1139static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1140{
1141	int v_idx = adapter->num_q_vectors;
1142
1143	adapter->num_tx_queues = 0;
1144	adapter->num_xdp_queues = 0;
1145	adapter->num_rx_queues = 0;
1146	adapter->num_q_vectors = 0;
1147
1148	while (v_idx--)
1149		ixgbe_free_q_vector(adapter, v_idx);
1150}
1151
1152static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1153{
1154	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1155		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1156		pci_disable_msix(adapter->pdev);
1157		kfree(adapter->msix_entries);
1158		adapter->msix_entries = NULL;
1159	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1160		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1161		pci_disable_msi(adapter->pdev);
1162	}
1163}
1164
1165/**
1166 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1167 * @adapter: board private structure to initialize
1168 *
1169 * Attempt to configure the interrupts using the best available
1170 * capabilities of the hardware and the kernel.
1171 **/
1172static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1173{
1174	int err;
1175
1176	/* We will try to get MSI-X interrupts first */
1177	if (!ixgbe_acquire_msix_vectors(adapter))
1178		return;
1179
1180	/* At this point, we do not have MSI-X capabilities. We need to
1181	 * reconfigure or disable various features which require MSI-X
1182	 * capability.
1183	 */
1184
1185	/* Disable DCB unless we only have a single traffic class */
1186	if (adapter->hw_tcs > 1) {
1187		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1188		netdev_reset_tc(adapter->netdev);
1189
1190		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1191			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1192
1193		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1194		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1195		adapter->dcb_cfg.pfc_mode_enable = false;
1196	}
1197
1198	adapter->hw_tcs = 0;
1199	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1200	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1201
1202	/* Disable SR-IOV support */
1203	e_dev_warn("Disabling SR-IOV support\n");
1204	ixgbe_disable_sriov(adapter);
1205
1206	/* Disable RSS */
1207	e_dev_warn("Disabling RSS support\n");
1208	adapter->ring_feature[RING_F_RSS].limit = 1;
1209
1210	/* recalculate number of queues now that many features have been
1211	 * changed or disabled.
1212	 */
1213	ixgbe_set_num_queues(adapter);
1214	adapter->num_q_vectors = 1;
1215
1216	err = pci_enable_msi(adapter->pdev);
1217	if (err)
1218		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1219			   err);
1220	else
1221		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1222}
1223
1224/**
1225 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1226 * @adapter: board private structure to initialize
1227 *
1228 * We determine which interrupt scheme to use based on...
1229 * - Kernel support (MSI, MSI-X)
1230 *   - which can be user-defined (via MODULE_PARAM)
1231 * - Hardware queue count (num_*_queues)
1232 *   - defined by miscellaneous hardware support/features (RSS, etc.)
1233 **/
1234int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1235{
1236	int err;
1237
1238	/* Number of supported queues */
1239	ixgbe_set_num_queues(adapter);
1240
1241	/* Set interrupt mode */
1242	ixgbe_set_interrupt_capability(adapter);
1243
1244	err = ixgbe_alloc_q_vectors(adapter);
1245	if (err) {
1246		e_dev_err("Unable to allocate memory for queue vectors\n");
1247		goto err_alloc_q_vectors;
1248	}
1249
1250	ixgbe_cache_ring_register(adapter);
1251
1252	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1253		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1254		   adapter->num_rx_queues, adapter->num_tx_queues,
1255		   adapter->num_xdp_queues);
1256
1257	set_bit(__IXGBE_DOWN, &adapter->state);
1258
1259	return 0;
1260
1261err_alloc_q_vectors:
1262	ixgbe_reset_interrupt_capability(adapter);
1263	return err;
1264}
1265
1266/**
1267 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1268 * @adapter: board private structure to clear interrupt scheme on
1269 *
1270 * We go through and clear interrupt specific resources and reset the structure
1271 * to pre-load conditions
1272 **/
1273void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1274{
1275	adapter->num_tx_queues = 0;
1276	adapter->num_xdp_queues = 0;
1277	adapter->num_rx_queues = 0;
1278
1279	ixgbe_free_q_vectors(adapter);
1280	ixgbe_reset_interrupt_capability(adapter);
1281}
1282
1283void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1284		       u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1285{
1286	struct ixgbe_adv_tx_context_desc *context_desc;
1287	u16 i = tx_ring->next_to_use;
1288
1289	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1290
1291	i++;
1292	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1293
1294	/* set bits to identify this as an advanced context descriptor */
1295	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1296
1297	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1298	context_desc->fceof_saidx	= cpu_to_le32(fceof_saidx);
1299	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1300	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1301}
1302