Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4#include "ixgbe.h"
5#include "ixgbe_sriov.h"
6
7#ifdef CONFIG_IXGBE_DCB
8/**
9 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
10 * @adapter: board private structure to initialize
11 *
12 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
13 * will also try to cache the proper offsets if RSS/FCoE are enabled along
14 * with VMDq.
15 *
16 **/
17static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
18{
19#ifdef IXGBE_FCOE
20 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
21#endif /* IXGBE_FCOE */
22 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
23 int i;
24 u16 reg_idx, pool;
25 u8 tcs = adapter->hw_tcs;
26
27 /* verify we have DCB queueing enabled before proceeding */
28 if (tcs <= 1)
29 return false;
30
31 /* verify we have VMDq enabled before proceeding */
32 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
33 return false;
34
35 /* start at VMDq register offset for SR-IOV enabled setups */
36 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
37 for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
38 /* If we are greater than indices move to next pool */
39 if ((reg_idx & ~vmdq->mask) >= tcs) {
40 pool++;
41 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
42 }
43 adapter->rx_ring[i]->reg_idx = reg_idx;
44 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
45 }
46
47 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
49 /* If we are greater than indices move to next pool */
50 if ((reg_idx & ~vmdq->mask) >= tcs)
51 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
52 adapter->tx_ring[i]->reg_idx = reg_idx;
53 }
54
55#ifdef IXGBE_FCOE
56 /* nothing to do if FCoE is disabled */
57 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
58 return true;
59
60 /* The work is already done if the FCoE ring is shared */
61 if (fcoe->offset < tcs)
62 return true;
63
64 /* The FCoE rings exist separately, we need to move their reg_idx */
65 if (fcoe->indices) {
66 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
67 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
68
69 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
70 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
71 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
72 adapter->rx_ring[i]->reg_idx = reg_idx;
73 adapter->rx_ring[i]->netdev = adapter->netdev;
74 reg_idx++;
75 }
76
77 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
79 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
80 adapter->tx_ring[i]->reg_idx = reg_idx;
81 reg_idx++;
82 }
83 }
84
85#endif /* IXGBE_FCOE */
86 return true;
87}
88
89/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
90static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
91 unsigned int *tx, unsigned int *rx)
92{
93 struct ixgbe_hw *hw = &adapter->hw;
94 u8 num_tcs = adapter->hw_tcs;
95
96 *tx = 0;
97 *rx = 0;
98
99 switch (hw->mac.type) {
100 case ixgbe_mac_82598EB:
101 /* TxQs/TC: 4 RxQs/TC: 8 */
102 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
103 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
104 break;
105 case ixgbe_mac_82599EB:
106 case ixgbe_mac_X540:
107 case ixgbe_mac_X550:
108 case ixgbe_mac_X550EM_x:
109 case ixgbe_mac_x550em_a:
110 if (num_tcs > 4) {
111 /*
112 * TCs : TC0/1 TC2/3 TC4-7
113 * TxQs/TC: 32 16 8
114 * RxQs/TC: 16 16 16
115 */
116 *rx = tc << 4;
117 if (tc < 3)
118 *tx = tc << 5; /* 0, 32, 64 */
119 else if (tc < 5)
120 *tx = (tc + 2) << 4; /* 80, 96 */
121 else
122 *tx = (tc + 8) << 3; /* 104, 112, 120 */
123 } else {
124 /*
125 * TCs : TC0 TC1 TC2/3
126 * TxQs/TC: 64 32 16
127 * RxQs/TC: 32 32 32
128 */
129 *rx = tc << 5;
130 if (tc < 2)
131 *tx = tc << 6; /* 0, 64 */
132 else
133 *tx = (tc + 4) << 4; /* 96, 112 */
134 }
135 default:
136 break;
137 }
138}
139
140/**
141 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
142 * @adapter: board private structure to initialize
143 *
144 * Cache the descriptor ring offsets for DCB to the assigned rings.
145 *
146 **/
147static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
148{
149 u8 num_tcs = adapter->hw_tcs;
150 unsigned int tx_idx, rx_idx;
151 int tc, offset, rss_i, i;
152
153 /* verify we have DCB queueing enabled before proceeding */
154 if (num_tcs <= 1)
155 return false;
156
157 rss_i = adapter->ring_feature[RING_F_RSS].indices;
158
159 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
160 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
161 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
162 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
163 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
164 adapter->rx_ring[offset + i]->netdev = adapter->netdev;
165 adapter->tx_ring[offset + i]->dcb_tc = tc;
166 adapter->rx_ring[offset + i]->dcb_tc = tc;
167 }
168 }
169
170 return true;
171}
172
173#endif
174/**
175 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
176 * @adapter: board private structure to initialize
177 *
178 * SR-IOV doesn't use any descriptor rings but changes the default if
179 * no other mapping is used.
180 *
181 */
182static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
183{
184#ifdef IXGBE_FCOE
185 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
186#endif /* IXGBE_FCOE */
187 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
188 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
189 u16 reg_idx, pool;
190 int i;
191
192 /* only proceed if VMDq is enabled */
193 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
194 return false;
195
196 /* start at VMDq register offset for SR-IOV enabled setups */
197 pool = 0;
198 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
199 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
200#ifdef IXGBE_FCOE
201 /* Allow first FCoE queue to be mapped as RSS */
202 if (fcoe->offset && (i > fcoe->offset))
203 break;
204#endif
205 /* If we are greater than indices move to next pool */
206 if ((reg_idx & ~vmdq->mask) >= rss->indices) {
207 pool++;
208 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
209 }
210 adapter->rx_ring[i]->reg_idx = reg_idx;
211 adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
212 }
213
214#ifdef IXGBE_FCOE
215 /* FCoE uses a linear block of queues so just assigning 1:1 */
216 for (; i < adapter->num_rx_queues; i++, reg_idx++) {
217 adapter->rx_ring[i]->reg_idx = reg_idx;
218 adapter->rx_ring[i]->netdev = adapter->netdev;
219 }
220
221#endif
222 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
223 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
224#ifdef IXGBE_FCOE
225 /* Allow first FCoE queue to be mapped as RSS */
226 if (fcoe->offset && (i > fcoe->offset))
227 break;
228#endif
229 /* If we are greater than indices move to next pool */
230 if ((reg_idx & rss->mask) >= rss->indices)
231 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
232 adapter->tx_ring[i]->reg_idx = reg_idx;
233 }
234
235#ifdef IXGBE_FCOE
236 /* FCoE uses a linear block of queues so just assigning 1:1 */
237 for (; i < adapter->num_tx_queues; i++, reg_idx++)
238 adapter->tx_ring[i]->reg_idx = reg_idx;
239
240#endif
241
242 return true;
243}
244
245/**
246 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
247 * @adapter: board private structure to initialize
248 *
249 * Cache the descriptor ring offsets for RSS to the assigned rings.
250 *
251 **/
252static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
253{
254 int i, reg_idx;
255
256 for (i = 0; i < adapter->num_rx_queues; i++) {
257 adapter->rx_ring[i]->reg_idx = i;
258 adapter->rx_ring[i]->netdev = adapter->netdev;
259 }
260 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
261 adapter->tx_ring[i]->reg_idx = reg_idx;
262 for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
263 adapter->xdp_ring[i]->reg_idx = reg_idx;
264
265 return true;
266}
267
268/**
269 * ixgbe_cache_ring_register - Descriptor ring to register mapping
270 * @adapter: board private structure to initialize
271 *
272 * Once we know the feature-set enabled for the device, we'll cache
273 * the register offset the descriptor ring is assigned to.
274 *
275 * Note, the order the various feature calls is important. It must start with
276 * the "most" features enabled at the same time, then trickle down to the
277 * least amount of features turned on at once.
278 **/
279static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
280{
281 /* start with default case */
282 adapter->rx_ring[0]->reg_idx = 0;
283 adapter->tx_ring[0]->reg_idx = 0;
284
285#ifdef CONFIG_IXGBE_DCB
286 if (ixgbe_cache_ring_dcb_sriov(adapter))
287 return;
288
289 if (ixgbe_cache_ring_dcb(adapter))
290 return;
291
292#endif
293 if (ixgbe_cache_ring_sriov(adapter))
294 return;
295
296 ixgbe_cache_ring_rss(adapter);
297}
298
299static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
300{
301 return adapter->xdp_prog ? nr_cpu_ids : 0;
302}
303
304#define IXGBE_RSS_64Q_MASK 0x3F
305#define IXGBE_RSS_16Q_MASK 0xF
306#define IXGBE_RSS_8Q_MASK 0x7
307#define IXGBE_RSS_4Q_MASK 0x3
308#define IXGBE_RSS_2Q_MASK 0x1
309#define IXGBE_RSS_DISABLED_MASK 0x0
310
311#ifdef CONFIG_IXGBE_DCB
312/**
313 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
314 * @adapter: board private structure to initialize
315 *
316 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
317 * and VM pools where appropriate. Also assign queues based on DCB
318 * priorities and map accordingly..
319 *
320 **/
321static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
322{
323 int i;
324 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
325 u16 vmdq_m = 0;
326#ifdef IXGBE_FCOE
327 u16 fcoe_i = 0;
328#endif
329 u8 tcs = adapter->hw_tcs;
330
331 /* verify we have DCB queueing enabled before proceeding */
332 if (tcs <= 1)
333 return false;
334
335 /* verify we have VMDq enabled before proceeding */
336 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
337 return false;
338
339 /* limit VMDq instances on the PF by number of Tx queues */
340 vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
341
342 /* Add starting offset to total pool count */
343 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
344
345 /* 16 pools w/ 8 TC per pool */
346 if (tcs > 4) {
347 vmdq_i = min_t(u16, vmdq_i, 16);
348 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
349 /* 32 pools w/ 4 TC per pool */
350 } else {
351 vmdq_i = min_t(u16, vmdq_i, 32);
352 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
353 }
354
355#ifdef IXGBE_FCOE
356 /* queues in the remaining pools are available for FCoE */
357 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
358
359#endif
360 /* remove the starting offset from the pool count */
361 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
362
363 /* save features for later use */
364 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
365 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
366
367 /*
368 * We do not support DCB, VMDq, and RSS all simultaneously
369 * so we will disable RSS since it is the lowest priority
370 */
371 adapter->ring_feature[RING_F_RSS].indices = 1;
372 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
373
374 /* disable ATR as it is not supported when VMDq is enabled */
375 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
376
377 adapter->num_rx_pools = vmdq_i;
378 adapter->num_rx_queues_per_pool = tcs;
379
380 adapter->num_tx_queues = vmdq_i * tcs;
381 adapter->num_xdp_queues = 0;
382 adapter->num_rx_queues = vmdq_i * tcs;
383
384#ifdef IXGBE_FCOE
385 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
386 struct ixgbe_ring_feature *fcoe;
387
388 fcoe = &adapter->ring_feature[RING_F_FCOE];
389
390 /* limit ourselves based on feature limits */
391 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
392
393 if (fcoe_i) {
394 /* alloc queues for FCoE separately */
395 fcoe->indices = fcoe_i;
396 fcoe->offset = vmdq_i * tcs;
397
398 /* add queues to adapter */
399 adapter->num_tx_queues += fcoe_i;
400 adapter->num_rx_queues += fcoe_i;
401 } else if (tcs > 1) {
402 /* use queue belonging to FcoE TC */
403 fcoe->indices = 1;
404 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
405 } else {
406 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
407
408 fcoe->indices = 0;
409 fcoe->offset = 0;
410 }
411 }
412
413#endif /* IXGBE_FCOE */
414 /* configure TC to queue mapping */
415 for (i = 0; i < tcs; i++)
416 netdev_set_tc_queue(adapter->netdev, i, 1, i);
417
418 return true;
419}
420
421static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
422{
423 struct net_device *dev = adapter->netdev;
424 struct ixgbe_ring_feature *f;
425 int rss_i, rss_m, i;
426 int tcs;
427
428 /* Map queue offset and counts onto allocated tx queues */
429 tcs = adapter->hw_tcs;
430
431 /* verify we have DCB queueing enabled before proceeding */
432 if (tcs <= 1)
433 return false;
434
435 /* determine the upper limit for our current DCB mode */
436 rss_i = dev->num_tx_queues / tcs;
437 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
438 /* 8 TC w/ 4 queues per TC */
439 rss_i = min_t(u16, rss_i, 4);
440 rss_m = IXGBE_RSS_4Q_MASK;
441 } else if (tcs > 4) {
442 /* 8 TC w/ 8 queues per TC */
443 rss_i = min_t(u16, rss_i, 8);
444 rss_m = IXGBE_RSS_8Q_MASK;
445 } else {
446 /* 4 TC w/ 16 queues per TC */
447 rss_i = min_t(u16, rss_i, 16);
448 rss_m = IXGBE_RSS_16Q_MASK;
449 }
450
451 /* set RSS mask and indices */
452 f = &adapter->ring_feature[RING_F_RSS];
453 rss_i = min_t(int, rss_i, f->limit);
454 f->indices = rss_i;
455 f->mask = rss_m;
456
457 /* disable ATR as it is not supported when multiple TCs are enabled */
458 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
459
460#ifdef IXGBE_FCOE
461 /* FCoE enabled queues require special configuration indexed
462 * by feature specific indices and offset. Here we map FCoE
463 * indices onto the DCB queue pairs allowing FCoE to own
464 * configuration later.
465 */
466 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
467 u8 tc = ixgbe_fcoe_get_tc(adapter);
468
469 f = &adapter->ring_feature[RING_F_FCOE];
470 f->indices = min_t(u16, rss_i, f->limit);
471 f->offset = rss_i * tc;
472 }
473
474#endif /* IXGBE_FCOE */
475 for (i = 0; i < tcs; i++)
476 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
477
478 adapter->num_tx_queues = rss_i * tcs;
479 adapter->num_xdp_queues = 0;
480 adapter->num_rx_queues = rss_i * tcs;
481
482 return true;
483}
484
485#endif
486/**
487 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
488 * @adapter: board private structure to initialize
489 *
490 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
491 * and VM pools where appropriate. If RSS is available, then also try and
492 * enable RSS and map accordingly.
493 *
494 **/
495static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
496{
497 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
498 u16 vmdq_m = 0;
499 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
500 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
501#ifdef IXGBE_FCOE
502 u16 fcoe_i = 0;
503#endif
504
505 /* only proceed if SR-IOV is enabled */
506 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
507 return false;
508
509 /* limit l2fwd RSS based on total Tx queue limit */
510 rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
511
512 /* Add starting offset to total pool count */
513 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
514
515 /* double check we are limited to maximum pools */
516 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
517
518 /* 64 pool mode with 2 queues per pool */
519 if (vmdq_i > 32) {
520 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
521 rss_m = IXGBE_RSS_2Q_MASK;
522 rss_i = min_t(u16, rss_i, 2);
523 /* 32 pool mode with up to 4 queues per pool */
524 } else {
525 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
526 rss_m = IXGBE_RSS_4Q_MASK;
527 /* We can support 4, 2, or 1 queues */
528 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
529 }
530
531#ifdef IXGBE_FCOE
532 /* queues in the remaining pools are available for FCoE */
533 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
534
535#endif
536 /* remove the starting offset from the pool count */
537 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
538
539 /* save features for later use */
540 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
541 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
542
543 /* limit RSS based on user input and save for later use */
544 adapter->ring_feature[RING_F_RSS].indices = rss_i;
545 adapter->ring_feature[RING_F_RSS].mask = rss_m;
546
547 adapter->num_rx_pools = vmdq_i;
548 adapter->num_rx_queues_per_pool = rss_i;
549
550 adapter->num_rx_queues = vmdq_i * rss_i;
551 adapter->num_tx_queues = vmdq_i * rss_i;
552 adapter->num_xdp_queues = 0;
553
554 /* disable ATR as it is not supported when VMDq is enabled */
555 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
556
557#ifdef IXGBE_FCOE
558 /*
559 * FCoE can use rings from adjacent buffers to allow RSS
560 * like behavior. To account for this we need to add the
561 * FCoE indices to the total ring count.
562 */
563 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
564 struct ixgbe_ring_feature *fcoe;
565
566 fcoe = &adapter->ring_feature[RING_F_FCOE];
567
568 /* limit ourselves based on feature limits */
569 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
570
571 if (vmdq_i > 1 && fcoe_i) {
572 /* alloc queues for FCoE separately */
573 fcoe->indices = fcoe_i;
574 fcoe->offset = vmdq_i * rss_i;
575 } else {
576 /* merge FCoE queues with RSS queues */
577 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
578
579 /* limit indices to rss_i if MSI-X is disabled */
580 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
581 fcoe_i = rss_i;
582
583 /* attempt to reserve some queues for just FCoE */
584 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
585 fcoe->offset = fcoe_i - fcoe->indices;
586
587 fcoe_i -= rss_i;
588 }
589
590 /* add queues to adapter */
591 adapter->num_tx_queues += fcoe_i;
592 adapter->num_rx_queues += fcoe_i;
593 }
594
595#endif
596 /* To support macvlan offload we have to use num_tc to
597 * restrict the queues that can be used by the device.
598 * By doing this we can avoid reporting a false number of
599 * queues.
600 */
601 if (vmdq_i > 1)
602 netdev_set_num_tc(adapter->netdev, 1);
603
604 /* populate TC0 for use by pool 0 */
605 netdev_set_tc_queue(adapter->netdev, 0,
606 adapter->num_rx_queues_per_pool, 0);
607
608 return true;
609}
610
611/**
612 * ixgbe_set_rss_queues - Allocate queues for RSS
613 * @adapter: board private structure to initialize
614 *
615 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
616 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
617 *
618 **/
619static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
620{
621 struct ixgbe_hw *hw = &adapter->hw;
622 struct ixgbe_ring_feature *f;
623 u16 rss_i;
624
625 /* set mask for 16 queue limit of RSS */
626 f = &adapter->ring_feature[RING_F_RSS];
627 rss_i = f->limit;
628
629 f->indices = rss_i;
630
631 if (hw->mac.type < ixgbe_mac_X550)
632 f->mask = IXGBE_RSS_16Q_MASK;
633 else
634 f->mask = IXGBE_RSS_64Q_MASK;
635
636 /* disable ATR by default, it will be configured below */
637 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
638
639 /*
640 * Use Flow Director in addition to RSS to ensure the best
641 * distribution of flows across cores, even when an FDIR flow
642 * isn't matched.
643 */
644 if (rss_i > 1 && adapter->atr_sample_rate) {
645 f = &adapter->ring_feature[RING_F_FDIR];
646
647 rss_i = f->indices = f->limit;
648
649 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
650 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
651 }
652
653#ifdef IXGBE_FCOE
654 /*
655 * FCoE can exist on the same rings as standard network traffic
656 * however it is preferred to avoid that if possible. In order
657 * to get the best performance we allocate as many FCoE queues
658 * as we can and we place them at the end of the ring array to
659 * avoid sharing queues with standard RSS on systems with 24 or
660 * more CPUs.
661 */
662 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
663 struct net_device *dev = adapter->netdev;
664 u16 fcoe_i;
665
666 f = &adapter->ring_feature[RING_F_FCOE];
667
668 /* merge FCoE queues with RSS queues */
669 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
670 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
671
672 /* limit indices to rss_i if MSI-X is disabled */
673 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
674 fcoe_i = rss_i;
675
676 /* attempt to reserve some queues for just FCoE */
677 f->indices = min_t(u16, fcoe_i, f->limit);
678 f->offset = fcoe_i - f->indices;
679 rss_i = max_t(u16, fcoe_i, rss_i);
680 }
681
682#endif /* IXGBE_FCOE */
683 adapter->num_rx_queues = rss_i;
684 adapter->num_tx_queues = rss_i;
685 adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
686
687 return true;
688}
689
690/**
691 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
692 * @adapter: board private structure to initialize
693 *
694 * This is the top level queue allocation routine. The order here is very
695 * important, starting with the "most" number of features turned on at once,
696 * and ending with the smallest set of features. This way large combinations
697 * can be allocated if they're turned on, and smaller combinations are the
698 * fallthrough conditions.
699 *
700 **/
701static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
702{
703 /* Start with base case */
704 adapter->num_rx_queues = 1;
705 adapter->num_tx_queues = 1;
706 adapter->num_xdp_queues = 0;
707 adapter->num_rx_pools = 1;
708 adapter->num_rx_queues_per_pool = 1;
709
710#ifdef CONFIG_IXGBE_DCB
711 if (ixgbe_set_dcb_sriov_queues(adapter))
712 return;
713
714 if (ixgbe_set_dcb_queues(adapter))
715 return;
716
717#endif
718 if (ixgbe_set_sriov_queues(adapter))
719 return;
720
721 ixgbe_set_rss_queues(adapter);
722}
723
724/**
725 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
726 * @adapter: board private structure
727 *
728 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
729 * return a negative error code if unable to acquire MSI-X vectors for any
730 * reason.
731 */
732static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
733{
734 struct ixgbe_hw *hw = &adapter->hw;
735 int i, vectors, vector_threshold;
736
737 /* We start by asking for one vector per queue pair with XDP queues
738 * being stacked with TX queues.
739 */
740 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
741 vectors = max(vectors, adapter->num_xdp_queues);
742
743 /* It is easy to be greedy for MSI-X vectors. However, it really
744 * doesn't do much good if we have a lot more vectors than CPUs. We'll
745 * be somewhat conservative and only ask for (roughly) the same number
746 * of vectors as there are CPUs.
747 */
748 vectors = min_t(int, vectors, num_online_cpus());
749
750 /* Some vectors are necessary for non-queue interrupts */
751 vectors += NON_Q_VECTORS;
752
753 /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
754 * With features such as RSS and VMDq, we can easily surpass the
755 * number of Rx and Tx descriptor queues supported by our device.
756 * Thus, we cap the maximum in the rare cases where the CPU count also
757 * exceeds our vector limit
758 */
759 vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
760
761 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
762 * handler, and (2) an Other (Link Status Change, etc.) handler.
763 */
764 vector_threshold = MIN_MSIX_COUNT;
765
766 adapter->msix_entries = kcalloc(vectors,
767 sizeof(struct msix_entry),
768 GFP_KERNEL);
769 if (!adapter->msix_entries)
770 return -ENOMEM;
771
772 for (i = 0; i < vectors; i++)
773 adapter->msix_entries[i].entry = i;
774
775 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
776 vector_threshold, vectors);
777
778 if (vectors < 0) {
779 /* A negative count of allocated vectors indicates an error in
780 * acquiring within the specified range of MSI-X vectors
781 */
782 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
783 vectors);
784
785 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
786 kfree(adapter->msix_entries);
787 adapter->msix_entries = NULL;
788
789 return vectors;
790 }
791
792 /* we successfully allocated some number of vectors within our
793 * requested range.
794 */
795 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
796
797 /* Adjust for only the vectors we'll use, which is minimum
798 * of max_q_vectors, or the number of vectors we were allocated.
799 */
800 vectors -= NON_Q_VECTORS;
801 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
802
803 return 0;
804}
805
806static void ixgbe_add_ring(struct ixgbe_ring *ring,
807 struct ixgbe_ring_container *head)
808{
809 ring->next = head->ring;
810 head->ring = ring;
811 head->count++;
812 head->next_update = jiffies + 1;
813}
814
815/**
816 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
817 * @adapter: board private structure to initialize
818 * @v_count: q_vectors allocated on adapter, used for ring interleaving
819 * @v_idx: index of vector in adapter struct
820 * @txr_count: total number of Tx rings to allocate
821 * @txr_idx: index of first Tx ring to allocate
822 * @xdp_count: total number of XDP rings to allocate
823 * @xdp_idx: index of first XDP ring to allocate
824 * @rxr_count: total number of Rx rings to allocate
825 * @rxr_idx: index of first Rx ring to allocate
826 *
827 * We allocate one q_vector. If allocation fails we return -ENOMEM.
828 **/
829static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
830 int v_count, int v_idx,
831 int txr_count, int txr_idx,
832 int xdp_count, int xdp_idx,
833 int rxr_count, int rxr_idx)
834{
835 int node = dev_to_node(&adapter->pdev->dev);
836 struct ixgbe_q_vector *q_vector;
837 struct ixgbe_ring *ring;
838 int cpu = -1;
839 int ring_count;
840 u8 tcs = adapter->hw_tcs;
841
842 ring_count = txr_count + rxr_count + xdp_count;
843
844 /* customize cpu for Flow Director mapping */
845 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
846 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
847 if (rss_i > 1 && adapter->atr_sample_rate) {
848 cpu = cpumask_local_spread(v_idx, node);
849 node = cpu_to_node(cpu);
850 }
851 }
852
853 /* allocate q_vector and rings */
854 q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count),
855 GFP_KERNEL, node);
856 if (!q_vector)
857 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
858 GFP_KERNEL);
859 if (!q_vector)
860 return -ENOMEM;
861
862 /* setup affinity mask and node */
863 if (cpu != -1)
864 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
865 q_vector->numa_node = node;
866
867#ifdef CONFIG_IXGBE_DCA
868 /* initialize CPU for DCA */
869 q_vector->cpu = -1;
870
871#endif
872 /* initialize NAPI */
873 netif_napi_add(adapter->netdev, &q_vector->napi,
874 ixgbe_poll, 64);
875
876 /* tie q_vector and adapter together */
877 adapter->q_vector[v_idx] = q_vector;
878 q_vector->adapter = adapter;
879 q_vector->v_idx = v_idx;
880
881 /* initialize work limits */
882 q_vector->tx.work_limit = adapter->tx_work_limit;
883
884 /* Initialize setting for adaptive ITR */
885 q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
886 IXGBE_ITR_ADAPTIVE_LATENCY;
887 q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
888 IXGBE_ITR_ADAPTIVE_LATENCY;
889
890 /* intialize ITR */
891 if (txr_count && !rxr_count) {
892 /* tx only vector */
893 if (adapter->tx_itr_setting == 1)
894 q_vector->itr = IXGBE_12K_ITR;
895 else
896 q_vector->itr = adapter->tx_itr_setting;
897 } else {
898 /* rx or rx/tx vector */
899 if (adapter->rx_itr_setting == 1)
900 q_vector->itr = IXGBE_20K_ITR;
901 else
902 q_vector->itr = adapter->rx_itr_setting;
903 }
904
905 /* initialize pointer to rings */
906 ring = q_vector->ring;
907
908 while (txr_count) {
909 /* assign generic ring traits */
910 ring->dev = &adapter->pdev->dev;
911 ring->netdev = adapter->netdev;
912
913 /* configure backlink on ring */
914 ring->q_vector = q_vector;
915
916 /* update q_vector Tx values */
917 ixgbe_add_ring(ring, &q_vector->tx);
918
919 /* apply Tx specific ring traits */
920 ring->count = adapter->tx_ring_count;
921 ring->queue_index = txr_idx;
922
923 /* assign ring to adapter */
924 WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
925
926 /* update count and index */
927 txr_count--;
928 txr_idx += v_count;
929
930 /* push pointer to next ring */
931 ring++;
932 }
933
934 while (xdp_count) {
935 /* assign generic ring traits */
936 ring->dev = &adapter->pdev->dev;
937 ring->netdev = adapter->netdev;
938
939 /* configure backlink on ring */
940 ring->q_vector = q_vector;
941
942 /* update q_vector Tx values */
943 ixgbe_add_ring(ring, &q_vector->tx);
944
945 /* apply Tx specific ring traits */
946 ring->count = adapter->tx_ring_count;
947 ring->queue_index = xdp_idx;
948 set_ring_xdp(ring);
949
950 /* assign ring to adapter */
951 WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
952
953 /* update count and index */
954 xdp_count--;
955 xdp_idx++;
956
957 /* push pointer to next ring */
958 ring++;
959 }
960
961 while (rxr_count) {
962 /* assign generic ring traits */
963 ring->dev = &adapter->pdev->dev;
964 ring->netdev = adapter->netdev;
965
966 /* configure backlink on ring */
967 ring->q_vector = q_vector;
968
969 /* update q_vector Rx values */
970 ixgbe_add_ring(ring, &q_vector->rx);
971
972 /*
973 * 82599 errata, UDP frames with a 0 checksum
974 * can be marked as checksum errors.
975 */
976 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
977 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
978
979#ifdef IXGBE_FCOE
980 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
981 struct ixgbe_ring_feature *f;
982 f = &adapter->ring_feature[RING_F_FCOE];
983 if ((rxr_idx >= f->offset) &&
984 (rxr_idx < f->offset + f->indices))
985 set_bit(__IXGBE_RX_FCOE, &ring->state);
986 }
987
988#endif /* IXGBE_FCOE */
989 /* apply Rx specific ring traits */
990 ring->count = adapter->rx_ring_count;
991 ring->queue_index = rxr_idx;
992
993 /* assign ring to adapter */
994 WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
995
996 /* update count and index */
997 rxr_count--;
998 rxr_idx += v_count;
999
1000 /* push pointer to next ring */
1001 ring++;
1002 }
1003
1004 return 0;
1005}
1006
1007/**
1008 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1009 * @adapter: board private structure to initialize
1010 * @v_idx: Index of vector to be freed
1011 *
1012 * This function frees the memory allocated to the q_vector. In addition if
1013 * NAPI is enabled it will delete any references to the NAPI struct prior
1014 * to freeing the q_vector.
1015 **/
1016static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1017{
1018 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1019 struct ixgbe_ring *ring;
1020
1021 ixgbe_for_each_ring(ring, q_vector->tx) {
1022 if (ring_is_xdp(ring))
1023 WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
1024 else
1025 WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
1026 }
1027
1028 ixgbe_for_each_ring(ring, q_vector->rx)
1029 WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
1030
1031 adapter->q_vector[v_idx] = NULL;
1032 napi_hash_del(&q_vector->napi);
1033 netif_napi_del(&q_vector->napi);
1034
1035 /*
1036 * ixgbe_get_stats64() might access the rings on this vector,
1037 * we must wait a grace period before freeing it.
1038 */
1039 kfree_rcu(q_vector, rcu);
1040}
1041
1042/**
1043 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1044 * @adapter: board private structure to initialize
1045 *
1046 * We allocate one q_vector per queue interrupt. If allocation fails we
1047 * return -ENOMEM.
1048 **/
1049static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1050{
1051 int q_vectors = adapter->num_q_vectors;
1052 int rxr_remaining = adapter->num_rx_queues;
1053 int txr_remaining = adapter->num_tx_queues;
1054 int xdp_remaining = adapter->num_xdp_queues;
1055 int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1056 int err, i;
1057
1058 /* only one q_vector if MSI-X is disabled. */
1059 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1060 q_vectors = 1;
1061
1062 if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1063 for (; rxr_remaining; v_idx++) {
1064 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1065 0, 0, 0, 0, 1, rxr_idx);
1066
1067 if (err)
1068 goto err_out;
1069
1070 /* update counts and index */
1071 rxr_remaining--;
1072 rxr_idx++;
1073 }
1074 }
1075
1076 for (; v_idx < q_vectors; v_idx++) {
1077 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1078 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1079 int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1080
1081 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1082 tqpv, txr_idx,
1083 xqpv, xdp_idx,
1084 rqpv, rxr_idx);
1085
1086 if (err)
1087 goto err_out;
1088
1089 /* update counts and index */
1090 rxr_remaining -= rqpv;
1091 txr_remaining -= tqpv;
1092 xdp_remaining -= xqpv;
1093 rxr_idx++;
1094 txr_idx++;
1095 xdp_idx += xqpv;
1096 }
1097
1098 for (i = 0; i < adapter->num_rx_queues; i++) {
1099 if (adapter->rx_ring[i])
1100 adapter->rx_ring[i]->ring_idx = i;
1101 }
1102
1103 for (i = 0; i < adapter->num_tx_queues; i++) {
1104 if (adapter->tx_ring[i])
1105 adapter->tx_ring[i]->ring_idx = i;
1106 }
1107
1108 for (i = 0; i < adapter->num_xdp_queues; i++) {
1109 if (adapter->xdp_ring[i])
1110 adapter->xdp_ring[i]->ring_idx = i;
1111 }
1112
1113 return 0;
1114
1115err_out:
1116 adapter->num_tx_queues = 0;
1117 adapter->num_xdp_queues = 0;
1118 adapter->num_rx_queues = 0;
1119 adapter->num_q_vectors = 0;
1120
1121 while (v_idx--)
1122 ixgbe_free_q_vector(adapter, v_idx);
1123
1124 return -ENOMEM;
1125}
1126
1127/**
1128 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1129 * @adapter: board private structure to initialize
1130 *
1131 * This function frees the memory allocated to the q_vectors. In addition if
1132 * NAPI is enabled it will delete any references to the NAPI struct prior
1133 * to freeing the q_vector.
1134 **/
1135static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1136{
1137 int v_idx = adapter->num_q_vectors;
1138
1139 adapter->num_tx_queues = 0;
1140 adapter->num_xdp_queues = 0;
1141 adapter->num_rx_queues = 0;
1142 adapter->num_q_vectors = 0;
1143
1144 while (v_idx--)
1145 ixgbe_free_q_vector(adapter, v_idx);
1146}
1147
1148static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1149{
1150 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1151 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1152 pci_disable_msix(adapter->pdev);
1153 kfree(adapter->msix_entries);
1154 adapter->msix_entries = NULL;
1155 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1156 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1157 pci_disable_msi(adapter->pdev);
1158 }
1159}
1160
1161/**
1162 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1163 * @adapter: board private structure to initialize
1164 *
1165 * Attempt to configure the interrupts using the best available
1166 * capabilities of the hardware and the kernel.
1167 **/
1168static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1169{
1170 int err;
1171
1172 /* We will try to get MSI-X interrupts first */
1173 if (!ixgbe_acquire_msix_vectors(adapter))
1174 return;
1175
1176 /* At this point, we do not have MSI-X capabilities. We need to
1177 * reconfigure or disable various features which require MSI-X
1178 * capability.
1179 */
1180
1181 /* Disable DCB unless we only have a single traffic class */
1182 if (adapter->hw_tcs > 1) {
1183 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1184 netdev_reset_tc(adapter->netdev);
1185
1186 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1187 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1188
1189 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1190 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1191 adapter->dcb_cfg.pfc_mode_enable = false;
1192 }
1193
1194 adapter->hw_tcs = 0;
1195 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1196 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1197
1198 /* Disable SR-IOV support */
1199 e_dev_warn("Disabling SR-IOV support\n");
1200 ixgbe_disable_sriov(adapter);
1201
1202 /* Disable RSS */
1203 e_dev_warn("Disabling RSS support\n");
1204 adapter->ring_feature[RING_F_RSS].limit = 1;
1205
1206 /* recalculate number of queues now that many features have been
1207 * changed or disabled.
1208 */
1209 ixgbe_set_num_queues(adapter);
1210 adapter->num_q_vectors = 1;
1211
1212 err = pci_enable_msi(adapter->pdev);
1213 if (err)
1214 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1215 err);
1216 else
1217 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1218}
1219
1220/**
1221 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1222 * @adapter: board private structure to initialize
1223 *
1224 * We determine which interrupt scheme to use based on...
1225 * - Kernel support (MSI, MSI-X)
1226 * - which can be user-defined (via MODULE_PARAM)
1227 * - Hardware queue count (num_*_queues)
1228 * - defined by miscellaneous hardware support/features (RSS, etc.)
1229 **/
1230int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1231{
1232 int err;
1233
1234 /* Number of supported queues */
1235 ixgbe_set_num_queues(adapter);
1236
1237 /* Set interrupt mode */
1238 ixgbe_set_interrupt_capability(adapter);
1239
1240 err = ixgbe_alloc_q_vectors(adapter);
1241 if (err) {
1242 e_dev_err("Unable to allocate memory for queue vectors\n");
1243 goto err_alloc_q_vectors;
1244 }
1245
1246 ixgbe_cache_ring_register(adapter);
1247
1248 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1249 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1250 adapter->num_rx_queues, adapter->num_tx_queues,
1251 adapter->num_xdp_queues);
1252
1253 set_bit(__IXGBE_DOWN, &adapter->state);
1254
1255 return 0;
1256
1257err_alloc_q_vectors:
1258 ixgbe_reset_interrupt_capability(adapter);
1259 return err;
1260}
1261
1262/**
1263 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1264 * @adapter: board private structure to clear interrupt scheme on
1265 *
1266 * We go through and clear interrupt specific resources and reset the structure
1267 * to pre-load conditions
1268 **/
1269void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1270{
1271 adapter->num_tx_queues = 0;
1272 adapter->num_xdp_queues = 0;
1273 adapter->num_rx_queues = 0;
1274
1275 ixgbe_free_q_vectors(adapter);
1276 ixgbe_reset_interrupt_capability(adapter);
1277}
1278
1279void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1280 u32 fceof_saidx, u32 type_tucmd, u32 mss_l4len_idx)
1281{
1282 struct ixgbe_adv_tx_context_desc *context_desc;
1283 u16 i = tx_ring->next_to_use;
1284
1285 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1286
1287 i++;
1288 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1289
1290 /* set bits to identify this as an advanced context descriptor */
1291 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1292
1293 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1294 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx);
1295 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1296 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1297}
1298
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include "ixgbe_sriov.h"
31
32#ifdef CONFIG_IXGBE_DCB
33/**
34 * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
35 * @adapter: board private structure to initialize
36 *
37 * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
38 * will also try to cache the proper offsets if RSS/FCoE are enabled along
39 * with VMDq.
40 *
41 **/
42static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
43{
44#ifdef IXGBE_FCOE
45 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
46#endif /* IXGBE_FCOE */
47 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
48 int i;
49 u16 reg_idx;
50 u8 tcs = netdev_get_num_tc(adapter->netdev);
51
52 /* verify we have DCB queueing enabled before proceeding */
53 if (tcs <= 1)
54 return false;
55
56 /* verify we have VMDq enabled before proceeding */
57 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
58 return false;
59
60 /* start at VMDq register offset for SR-IOV enabled setups */
61 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
62 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
63 /* If we are greater than indices move to next pool */
64 if ((reg_idx & ~vmdq->mask) >= tcs)
65 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
66 adapter->rx_ring[i]->reg_idx = reg_idx;
67 }
68
69 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
70 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
71 /* If we are greater than indices move to next pool */
72 if ((reg_idx & ~vmdq->mask) >= tcs)
73 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
74 adapter->tx_ring[i]->reg_idx = reg_idx;
75 }
76
77#ifdef IXGBE_FCOE
78 /* nothing to do if FCoE is disabled */
79 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
80 return true;
81
82 /* The work is already done if the FCoE ring is shared */
83 if (fcoe->offset < tcs)
84 return true;
85
86 /* The FCoE rings exist separately, we need to move their reg_idx */
87 if (fcoe->indices) {
88 u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
89 u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
90
91 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
92 for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
93 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
94 adapter->rx_ring[i]->reg_idx = reg_idx;
95 reg_idx++;
96 }
97
98 reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
99 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
100 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
101 adapter->tx_ring[i]->reg_idx = reg_idx;
102 reg_idx++;
103 }
104 }
105
106#endif /* IXGBE_FCOE */
107 return true;
108}
109
110/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
111static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
112 unsigned int *tx, unsigned int *rx)
113{
114 struct net_device *dev = adapter->netdev;
115 struct ixgbe_hw *hw = &adapter->hw;
116 u8 num_tcs = netdev_get_num_tc(dev);
117
118 *tx = 0;
119 *rx = 0;
120
121 switch (hw->mac.type) {
122 case ixgbe_mac_82598EB:
123 /* TxQs/TC: 4 RxQs/TC: 8 */
124 *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
125 *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
126 break;
127 case ixgbe_mac_82599EB:
128 case ixgbe_mac_X540:
129 case ixgbe_mac_X550:
130 case ixgbe_mac_X550EM_x:
131 case ixgbe_mac_x550em_a:
132 if (num_tcs > 4) {
133 /*
134 * TCs : TC0/1 TC2/3 TC4-7
135 * TxQs/TC: 32 16 8
136 * RxQs/TC: 16 16 16
137 */
138 *rx = tc << 4;
139 if (tc < 3)
140 *tx = tc << 5; /* 0, 32, 64 */
141 else if (tc < 5)
142 *tx = (tc + 2) << 4; /* 80, 96 */
143 else
144 *tx = (tc + 8) << 3; /* 104, 112, 120 */
145 } else {
146 /*
147 * TCs : TC0 TC1 TC2/3
148 * TxQs/TC: 64 32 16
149 * RxQs/TC: 32 32 32
150 */
151 *rx = tc << 5;
152 if (tc < 2)
153 *tx = tc << 6; /* 0, 64 */
154 else
155 *tx = (tc + 4) << 4; /* 96, 112 */
156 }
157 default:
158 break;
159 }
160}
161
162/**
163 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
164 * @adapter: board private structure to initialize
165 *
166 * Cache the descriptor ring offsets for DCB to the assigned rings.
167 *
168 **/
169static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
170{
171 struct net_device *dev = adapter->netdev;
172 unsigned int tx_idx, rx_idx;
173 int tc, offset, rss_i, i;
174 u8 num_tcs = netdev_get_num_tc(dev);
175
176 /* verify we have DCB queueing enabled before proceeding */
177 if (num_tcs <= 1)
178 return false;
179
180 rss_i = adapter->ring_feature[RING_F_RSS].indices;
181
182 for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
183 ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
184 for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
185 adapter->tx_ring[offset + i]->reg_idx = tx_idx;
186 adapter->rx_ring[offset + i]->reg_idx = rx_idx;
187 adapter->tx_ring[offset + i]->dcb_tc = tc;
188 adapter->rx_ring[offset + i]->dcb_tc = tc;
189 }
190 }
191
192 return true;
193}
194
195#endif
196/**
197 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
198 * @adapter: board private structure to initialize
199 *
200 * SR-IOV doesn't use any descriptor rings but changes the default if
201 * no other mapping is used.
202 *
203 */
204static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
205{
206#ifdef IXGBE_FCOE
207 struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
208#endif /* IXGBE_FCOE */
209 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
210 struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
211 int i;
212 u16 reg_idx;
213
214 /* only proceed if VMDq is enabled */
215 if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
216 return false;
217
218 /* start at VMDq register offset for SR-IOV enabled setups */
219 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
220 for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
221#ifdef IXGBE_FCOE
222 /* Allow first FCoE queue to be mapped as RSS */
223 if (fcoe->offset && (i > fcoe->offset))
224 break;
225#endif
226 /* If we are greater than indices move to next pool */
227 if ((reg_idx & ~vmdq->mask) >= rss->indices)
228 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
229 adapter->rx_ring[i]->reg_idx = reg_idx;
230 }
231
232#ifdef IXGBE_FCOE
233 /* FCoE uses a linear block of queues so just assigning 1:1 */
234 for (; i < adapter->num_rx_queues; i++, reg_idx++)
235 adapter->rx_ring[i]->reg_idx = reg_idx;
236
237#endif
238 reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
239 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
240#ifdef IXGBE_FCOE
241 /* Allow first FCoE queue to be mapped as RSS */
242 if (fcoe->offset && (i > fcoe->offset))
243 break;
244#endif
245 /* If we are greater than indices move to next pool */
246 if ((reg_idx & rss->mask) >= rss->indices)
247 reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
248 adapter->tx_ring[i]->reg_idx = reg_idx;
249 }
250
251#ifdef IXGBE_FCOE
252 /* FCoE uses a linear block of queues so just assigning 1:1 */
253 for (; i < adapter->num_tx_queues; i++, reg_idx++)
254 adapter->tx_ring[i]->reg_idx = reg_idx;
255
256#endif
257
258 return true;
259}
260
261/**
262 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
263 * @adapter: board private structure to initialize
264 *
265 * Cache the descriptor ring offsets for RSS to the assigned rings.
266 *
267 **/
268static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
269{
270 int i;
271
272 for (i = 0; i < adapter->num_rx_queues; i++)
273 adapter->rx_ring[i]->reg_idx = i;
274 for (i = 0; i < adapter->num_tx_queues; i++)
275 adapter->tx_ring[i]->reg_idx = i;
276
277 return true;
278}
279
280/**
281 * ixgbe_cache_ring_register - Descriptor ring to register mapping
282 * @adapter: board private structure to initialize
283 *
284 * Once we know the feature-set enabled for the device, we'll cache
285 * the register offset the descriptor ring is assigned to.
286 *
287 * Note, the order the various feature calls is important. It must start with
288 * the "most" features enabled at the same time, then trickle down to the
289 * least amount of features turned on at once.
290 **/
291static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
292{
293 /* start with default case */
294 adapter->rx_ring[0]->reg_idx = 0;
295 adapter->tx_ring[0]->reg_idx = 0;
296
297#ifdef CONFIG_IXGBE_DCB
298 if (ixgbe_cache_ring_dcb_sriov(adapter))
299 return;
300
301 if (ixgbe_cache_ring_dcb(adapter))
302 return;
303
304#endif
305 if (ixgbe_cache_ring_sriov(adapter))
306 return;
307
308 ixgbe_cache_ring_rss(adapter);
309}
310
311#define IXGBE_RSS_16Q_MASK 0xF
312#define IXGBE_RSS_8Q_MASK 0x7
313#define IXGBE_RSS_4Q_MASK 0x3
314#define IXGBE_RSS_2Q_MASK 0x1
315#define IXGBE_RSS_DISABLED_MASK 0x0
316
317#ifdef CONFIG_IXGBE_DCB
318/**
319 * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
320 * @adapter: board private structure to initialize
321 *
322 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
323 * and VM pools where appropriate. Also assign queues based on DCB
324 * priorities and map accordingly..
325 *
326 **/
327static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
328{
329 int i;
330 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
331 u16 vmdq_m = 0;
332#ifdef IXGBE_FCOE
333 u16 fcoe_i = 0;
334#endif
335 u8 tcs = netdev_get_num_tc(adapter->netdev);
336
337 /* verify we have DCB queueing enabled before proceeding */
338 if (tcs <= 1)
339 return false;
340
341 /* verify we have VMDq enabled before proceeding */
342 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
343 return false;
344
345 /* Add starting offset to total pool count */
346 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
347
348 /* 16 pools w/ 8 TC per pool */
349 if (tcs > 4) {
350 vmdq_i = min_t(u16, vmdq_i, 16);
351 vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
352 /* 32 pools w/ 4 TC per pool */
353 } else {
354 vmdq_i = min_t(u16, vmdq_i, 32);
355 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
356 }
357
358#ifdef IXGBE_FCOE
359 /* queues in the remaining pools are available for FCoE */
360 fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
361
362#endif
363 /* remove the starting offset from the pool count */
364 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
365
366 /* save features for later use */
367 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
368 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
369
370 /*
371 * We do not support DCB, VMDq, and RSS all simultaneously
372 * so we will disable RSS since it is the lowest priority
373 */
374 adapter->ring_feature[RING_F_RSS].indices = 1;
375 adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
376
377 /* disable ATR as it is not supported when VMDq is enabled */
378 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
379
380 adapter->num_rx_pools = vmdq_i;
381 adapter->num_rx_queues_per_pool = tcs;
382
383 adapter->num_tx_queues = vmdq_i * tcs;
384 adapter->num_rx_queues = vmdq_i * tcs;
385
386#ifdef IXGBE_FCOE
387 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
388 struct ixgbe_ring_feature *fcoe;
389
390 fcoe = &adapter->ring_feature[RING_F_FCOE];
391
392 /* limit ourselves based on feature limits */
393 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
394
395 if (fcoe_i) {
396 /* alloc queues for FCoE separately */
397 fcoe->indices = fcoe_i;
398 fcoe->offset = vmdq_i * tcs;
399
400 /* add queues to adapter */
401 adapter->num_tx_queues += fcoe_i;
402 adapter->num_rx_queues += fcoe_i;
403 } else if (tcs > 1) {
404 /* use queue belonging to FcoE TC */
405 fcoe->indices = 1;
406 fcoe->offset = ixgbe_fcoe_get_tc(adapter);
407 } else {
408 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
409
410 fcoe->indices = 0;
411 fcoe->offset = 0;
412 }
413 }
414
415#endif /* IXGBE_FCOE */
416 /* configure TC to queue mapping */
417 for (i = 0; i < tcs; i++)
418 netdev_set_tc_queue(adapter->netdev, i, 1, i);
419
420 return true;
421}
422
423static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
424{
425 struct net_device *dev = adapter->netdev;
426 struct ixgbe_ring_feature *f;
427 int rss_i, rss_m, i;
428 int tcs;
429
430 /* Map queue offset and counts onto allocated tx queues */
431 tcs = netdev_get_num_tc(dev);
432
433 /* verify we have DCB queueing enabled before proceeding */
434 if (tcs <= 1)
435 return false;
436
437 /* determine the upper limit for our current DCB mode */
438 rss_i = dev->num_tx_queues / tcs;
439 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
440 /* 8 TC w/ 4 queues per TC */
441 rss_i = min_t(u16, rss_i, 4);
442 rss_m = IXGBE_RSS_4Q_MASK;
443 } else if (tcs > 4) {
444 /* 8 TC w/ 8 queues per TC */
445 rss_i = min_t(u16, rss_i, 8);
446 rss_m = IXGBE_RSS_8Q_MASK;
447 } else {
448 /* 4 TC w/ 16 queues per TC */
449 rss_i = min_t(u16, rss_i, 16);
450 rss_m = IXGBE_RSS_16Q_MASK;
451 }
452
453 /* set RSS mask and indices */
454 f = &adapter->ring_feature[RING_F_RSS];
455 rss_i = min_t(int, rss_i, f->limit);
456 f->indices = rss_i;
457 f->mask = rss_m;
458
459 /* disable ATR as it is not supported when multiple TCs are enabled */
460 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
461
462#ifdef IXGBE_FCOE
463 /* FCoE enabled queues require special configuration indexed
464 * by feature specific indices and offset. Here we map FCoE
465 * indices onto the DCB queue pairs allowing FCoE to own
466 * configuration later.
467 */
468 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
469 u8 tc = ixgbe_fcoe_get_tc(adapter);
470
471 f = &adapter->ring_feature[RING_F_FCOE];
472 f->indices = min_t(u16, rss_i, f->limit);
473 f->offset = rss_i * tc;
474 }
475
476#endif /* IXGBE_FCOE */
477 for (i = 0; i < tcs; i++)
478 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
479
480 adapter->num_tx_queues = rss_i * tcs;
481 adapter->num_rx_queues = rss_i * tcs;
482
483 return true;
484}
485
486#endif
487/**
488 * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
489 * @adapter: board private structure to initialize
490 *
491 * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
492 * and VM pools where appropriate. If RSS is available, then also try and
493 * enable RSS and map accordingly.
494 *
495 **/
496static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
497{
498 u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
499 u16 vmdq_m = 0;
500 u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
501 u16 rss_m = IXGBE_RSS_DISABLED_MASK;
502#ifdef IXGBE_FCOE
503 u16 fcoe_i = 0;
504#endif
505 bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
506
507 /* only proceed if SR-IOV is enabled */
508 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
509 return false;
510
511 /* Add starting offset to total pool count */
512 vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
513
514 /* double check we are limited to maximum pools */
515 vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
516
517 /* 64 pool mode with 2 queues per pool */
518 if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
519 vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
520 rss_m = IXGBE_RSS_2Q_MASK;
521 rss_i = min_t(u16, rss_i, 2);
522 /* 32 pool mode with up to 4 queues per pool */
523 } else {
524 vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
525 rss_m = IXGBE_RSS_4Q_MASK;
526 /* We can support 4, 2, or 1 queues */
527 rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
528 }
529
530#ifdef IXGBE_FCOE
531 /* queues in the remaining pools are available for FCoE */
532 fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
533
534#endif
535 /* remove the starting offset from the pool count */
536 vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
537
538 /* save features for later use */
539 adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
540 adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
541
542 /* limit RSS based on user input and save for later use */
543 adapter->ring_feature[RING_F_RSS].indices = rss_i;
544 adapter->ring_feature[RING_F_RSS].mask = rss_m;
545
546 adapter->num_rx_pools = vmdq_i;
547 adapter->num_rx_queues_per_pool = rss_i;
548
549 adapter->num_rx_queues = vmdq_i * rss_i;
550 adapter->num_tx_queues = vmdq_i * rss_i;
551
552 /* disable ATR as it is not supported when VMDq is enabled */
553 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
554
555#ifdef IXGBE_FCOE
556 /*
557 * FCoE can use rings from adjacent buffers to allow RSS
558 * like behavior. To account for this we need to add the
559 * FCoE indices to the total ring count.
560 */
561 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
562 struct ixgbe_ring_feature *fcoe;
563
564 fcoe = &adapter->ring_feature[RING_F_FCOE];
565
566 /* limit ourselves based on feature limits */
567 fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
568
569 if (vmdq_i > 1 && fcoe_i) {
570 /* alloc queues for FCoE separately */
571 fcoe->indices = fcoe_i;
572 fcoe->offset = vmdq_i * rss_i;
573 } else {
574 /* merge FCoE queues with RSS queues */
575 fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
576
577 /* limit indices to rss_i if MSI-X is disabled */
578 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
579 fcoe_i = rss_i;
580
581 /* attempt to reserve some queues for just FCoE */
582 fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
583 fcoe->offset = fcoe_i - fcoe->indices;
584
585 fcoe_i -= rss_i;
586 }
587
588 /* add queues to adapter */
589 adapter->num_tx_queues += fcoe_i;
590 adapter->num_rx_queues += fcoe_i;
591 }
592
593#endif
594 return true;
595}
596
597/**
598 * ixgbe_set_rss_queues - Allocate queues for RSS
599 * @adapter: board private structure to initialize
600 *
601 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
602 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
603 *
604 **/
605static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
606{
607 struct ixgbe_ring_feature *f;
608 u16 rss_i;
609
610 /* set mask for 16 queue limit of RSS */
611 f = &adapter->ring_feature[RING_F_RSS];
612 rss_i = f->limit;
613
614 f->indices = rss_i;
615 f->mask = IXGBE_RSS_16Q_MASK;
616
617 /* disable ATR by default, it will be configured below */
618 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
619
620 /*
621 * Use Flow Director in addition to RSS to ensure the best
622 * distribution of flows across cores, even when an FDIR flow
623 * isn't matched.
624 */
625 if (rss_i > 1 && adapter->atr_sample_rate) {
626 f = &adapter->ring_feature[RING_F_FDIR];
627
628 rss_i = f->indices = f->limit;
629
630 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
631 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
632 }
633
634#ifdef IXGBE_FCOE
635 /*
636 * FCoE can exist on the same rings as standard network traffic
637 * however it is preferred to avoid that if possible. In order
638 * to get the best performance we allocate as many FCoE queues
639 * as we can and we place them at the end of the ring array to
640 * avoid sharing queues with standard RSS on systems with 24 or
641 * more CPUs.
642 */
643 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
644 struct net_device *dev = adapter->netdev;
645 u16 fcoe_i;
646
647 f = &adapter->ring_feature[RING_F_FCOE];
648
649 /* merge FCoE queues with RSS queues */
650 fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
651 fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
652
653 /* limit indices to rss_i if MSI-X is disabled */
654 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
655 fcoe_i = rss_i;
656
657 /* attempt to reserve some queues for just FCoE */
658 f->indices = min_t(u16, fcoe_i, f->limit);
659 f->offset = fcoe_i - f->indices;
660 rss_i = max_t(u16, fcoe_i, rss_i);
661 }
662
663#endif /* IXGBE_FCOE */
664 adapter->num_rx_queues = rss_i;
665 adapter->num_tx_queues = rss_i;
666
667 return true;
668}
669
670/**
671 * ixgbe_set_num_queues - Allocate queues for device, feature dependent
672 * @adapter: board private structure to initialize
673 *
674 * This is the top level queue allocation routine. The order here is very
675 * important, starting with the "most" number of features turned on at once,
676 * and ending with the smallest set of features. This way large combinations
677 * can be allocated if they're turned on, and smaller combinations are the
678 * fallthrough conditions.
679 *
680 **/
681static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
682{
683 /* Start with base case */
684 adapter->num_rx_queues = 1;
685 adapter->num_tx_queues = 1;
686 adapter->num_rx_pools = adapter->num_rx_queues;
687 adapter->num_rx_queues_per_pool = 1;
688
689#ifdef CONFIG_IXGBE_DCB
690 if (ixgbe_set_dcb_sriov_queues(adapter))
691 return;
692
693 if (ixgbe_set_dcb_queues(adapter))
694 return;
695
696#endif
697 if (ixgbe_set_sriov_queues(adapter))
698 return;
699
700 ixgbe_set_rss_queues(adapter);
701}
702
703/**
704 * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
705 * @adapter: board private structure
706 *
707 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
708 * return a negative error code if unable to acquire MSI-X vectors for any
709 * reason.
710 */
711static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
712{
713 struct ixgbe_hw *hw = &adapter->hw;
714 int i, vectors, vector_threshold;
715
716 /* We start by asking for one vector per queue pair */
717 vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
718
719 /* It is easy to be greedy for MSI-X vectors. However, it really
720 * doesn't do much good if we have a lot more vectors than CPUs. We'll
721 * be somewhat conservative and only ask for (roughly) the same number
722 * of vectors as there are CPUs.
723 */
724 vectors = min_t(int, vectors, num_online_cpus());
725
726 /* Some vectors are necessary for non-queue interrupts */
727 vectors += NON_Q_VECTORS;
728
729 /* Hardware can only support a maximum of hw.mac->max_msix_vectors.
730 * With features such as RSS and VMDq, we can easily surpass the
731 * number of Rx and Tx descriptor queues supported by our device.
732 * Thus, we cap the maximum in the rare cases where the CPU count also
733 * exceeds our vector limit
734 */
735 vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
736
737 /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
738 * handler, and (2) an Other (Link Status Change, etc.) handler.
739 */
740 vector_threshold = MIN_MSIX_COUNT;
741
742 adapter->msix_entries = kcalloc(vectors,
743 sizeof(struct msix_entry),
744 GFP_KERNEL);
745 if (!adapter->msix_entries)
746 return -ENOMEM;
747
748 for (i = 0; i < vectors; i++)
749 adapter->msix_entries[i].entry = i;
750
751 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
752 vector_threshold, vectors);
753
754 if (vectors < 0) {
755 /* A negative count of allocated vectors indicates an error in
756 * acquiring within the specified range of MSI-X vectors
757 */
758 e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
759 vectors);
760
761 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
762 kfree(adapter->msix_entries);
763 adapter->msix_entries = NULL;
764
765 return vectors;
766 }
767
768 /* we successfully allocated some number of vectors within our
769 * requested range.
770 */
771 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
772
773 /* Adjust for only the vectors we'll use, which is minimum
774 * of max_q_vectors, or the number of vectors we were allocated.
775 */
776 vectors -= NON_Q_VECTORS;
777 adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
778
779 return 0;
780}
781
782static void ixgbe_add_ring(struct ixgbe_ring *ring,
783 struct ixgbe_ring_container *head)
784{
785 ring->next = head->ring;
786 head->ring = ring;
787 head->count++;
788}
789
790/**
791 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
792 * @adapter: board private structure to initialize
793 * @v_count: q_vectors allocated on adapter, used for ring interleaving
794 * @v_idx: index of vector in adapter struct
795 * @txr_count: total number of Tx rings to allocate
796 * @txr_idx: index of first Tx ring to allocate
797 * @rxr_count: total number of Rx rings to allocate
798 * @rxr_idx: index of first Rx ring to allocate
799 *
800 * We allocate one q_vector. If allocation fails we return -ENOMEM.
801 **/
802static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
803 int v_count, int v_idx,
804 int txr_count, int txr_idx,
805 int rxr_count, int rxr_idx)
806{
807 struct ixgbe_q_vector *q_vector;
808 struct ixgbe_ring *ring;
809 int node = NUMA_NO_NODE;
810 int cpu = -1;
811 int ring_count, size;
812 u8 tcs = netdev_get_num_tc(adapter->netdev);
813
814 ring_count = txr_count + rxr_count;
815 size = sizeof(struct ixgbe_q_vector) +
816 (sizeof(struct ixgbe_ring) * ring_count);
817
818 /* customize cpu for Flow Director mapping */
819 if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
820 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
821 if (rss_i > 1 && adapter->atr_sample_rate) {
822 if (cpu_online(v_idx)) {
823 cpu = v_idx;
824 node = cpu_to_node(cpu);
825 }
826 }
827 }
828
829 /* allocate q_vector and rings */
830 q_vector = kzalloc_node(size, GFP_KERNEL, node);
831 if (!q_vector)
832 q_vector = kzalloc(size, GFP_KERNEL);
833 if (!q_vector)
834 return -ENOMEM;
835
836 /* setup affinity mask and node */
837 if (cpu != -1)
838 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
839 q_vector->numa_node = node;
840
841#ifdef CONFIG_IXGBE_DCA
842 /* initialize CPU for DCA */
843 q_vector->cpu = -1;
844
845#endif
846 /* initialize NAPI */
847 netif_napi_add(adapter->netdev, &q_vector->napi,
848 ixgbe_poll, 64);
849
850#ifdef CONFIG_NET_RX_BUSY_POLL
851 /* initialize busy poll */
852 atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
853
854#endif
855 /* tie q_vector and adapter together */
856 adapter->q_vector[v_idx] = q_vector;
857 q_vector->adapter = adapter;
858 q_vector->v_idx = v_idx;
859
860 /* initialize work limits */
861 q_vector->tx.work_limit = adapter->tx_work_limit;
862
863 /* initialize pointer to rings */
864 ring = q_vector->ring;
865
866 /* intialize ITR */
867 if (txr_count && !rxr_count) {
868 /* tx only vector */
869 if (adapter->tx_itr_setting == 1)
870 q_vector->itr = IXGBE_12K_ITR;
871 else
872 q_vector->itr = adapter->tx_itr_setting;
873 } else {
874 /* rx or rx/tx vector */
875 if (adapter->rx_itr_setting == 1)
876 q_vector->itr = IXGBE_20K_ITR;
877 else
878 q_vector->itr = adapter->rx_itr_setting;
879 }
880
881 while (txr_count) {
882 /* assign generic ring traits */
883 ring->dev = &adapter->pdev->dev;
884 ring->netdev = adapter->netdev;
885
886 /* configure backlink on ring */
887 ring->q_vector = q_vector;
888
889 /* update q_vector Tx values */
890 ixgbe_add_ring(ring, &q_vector->tx);
891
892 /* apply Tx specific ring traits */
893 ring->count = adapter->tx_ring_count;
894 if (adapter->num_rx_pools > 1)
895 ring->queue_index =
896 txr_idx % adapter->num_rx_queues_per_pool;
897 else
898 ring->queue_index = txr_idx;
899
900 /* assign ring to adapter */
901 adapter->tx_ring[txr_idx] = ring;
902
903 /* update count and index */
904 txr_count--;
905 txr_idx += v_count;
906
907 /* push pointer to next ring */
908 ring++;
909 }
910
911 while (rxr_count) {
912 /* assign generic ring traits */
913 ring->dev = &adapter->pdev->dev;
914 ring->netdev = adapter->netdev;
915
916 /* configure backlink on ring */
917 ring->q_vector = q_vector;
918
919 /* update q_vector Rx values */
920 ixgbe_add_ring(ring, &q_vector->rx);
921
922 /*
923 * 82599 errata, UDP frames with a 0 checksum
924 * can be marked as checksum errors.
925 */
926 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
927 set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
928
929#ifdef IXGBE_FCOE
930 if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
931 struct ixgbe_ring_feature *f;
932 f = &adapter->ring_feature[RING_F_FCOE];
933 if ((rxr_idx >= f->offset) &&
934 (rxr_idx < f->offset + f->indices))
935 set_bit(__IXGBE_RX_FCOE, &ring->state);
936 }
937
938#endif /* IXGBE_FCOE */
939 /* apply Rx specific ring traits */
940 ring->count = adapter->rx_ring_count;
941 if (adapter->num_rx_pools > 1)
942 ring->queue_index =
943 rxr_idx % adapter->num_rx_queues_per_pool;
944 else
945 ring->queue_index = rxr_idx;
946
947 /* assign ring to adapter */
948 adapter->rx_ring[rxr_idx] = ring;
949
950 /* update count and index */
951 rxr_count--;
952 rxr_idx += v_count;
953
954 /* push pointer to next ring */
955 ring++;
956 }
957
958 return 0;
959}
960
961/**
962 * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
963 * @adapter: board private structure to initialize
964 * @v_idx: Index of vector to be freed
965 *
966 * This function frees the memory allocated to the q_vector. In addition if
967 * NAPI is enabled it will delete any references to the NAPI struct prior
968 * to freeing the q_vector.
969 **/
970static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
971{
972 struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
973 struct ixgbe_ring *ring;
974
975 ixgbe_for_each_ring(ring, q_vector->tx)
976 adapter->tx_ring[ring->queue_index] = NULL;
977
978 ixgbe_for_each_ring(ring, q_vector->rx)
979 adapter->rx_ring[ring->queue_index] = NULL;
980
981 adapter->q_vector[v_idx] = NULL;
982 napi_hash_del(&q_vector->napi);
983 netif_napi_del(&q_vector->napi);
984
985 /*
986 * ixgbe_get_stats64() might access the rings on this vector,
987 * we must wait a grace period before freeing it.
988 */
989 kfree_rcu(q_vector, rcu);
990}
991
992/**
993 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
994 * @adapter: board private structure to initialize
995 *
996 * We allocate one q_vector per queue interrupt. If allocation fails we
997 * return -ENOMEM.
998 **/
999static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1000{
1001 int q_vectors = adapter->num_q_vectors;
1002 int rxr_remaining = adapter->num_rx_queues;
1003 int txr_remaining = adapter->num_tx_queues;
1004 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1005 int err;
1006
1007 /* only one q_vector if MSI-X is disabled. */
1008 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1009 q_vectors = 1;
1010
1011 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1012 for (; rxr_remaining; v_idx++) {
1013 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1014 0, 0, 1, rxr_idx);
1015
1016 if (err)
1017 goto err_out;
1018
1019 /* update counts and index */
1020 rxr_remaining--;
1021 rxr_idx++;
1022 }
1023 }
1024
1025 for (; v_idx < q_vectors; v_idx++) {
1026 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1027 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1028 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1029 tqpv, txr_idx,
1030 rqpv, rxr_idx);
1031
1032 if (err)
1033 goto err_out;
1034
1035 /* update counts and index */
1036 rxr_remaining -= rqpv;
1037 txr_remaining -= tqpv;
1038 rxr_idx++;
1039 txr_idx++;
1040 }
1041
1042 return 0;
1043
1044err_out:
1045 adapter->num_tx_queues = 0;
1046 adapter->num_rx_queues = 0;
1047 adapter->num_q_vectors = 0;
1048
1049 while (v_idx--)
1050 ixgbe_free_q_vector(adapter, v_idx);
1051
1052 return -ENOMEM;
1053}
1054
1055/**
1056 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1057 * @adapter: board private structure to initialize
1058 *
1059 * This function frees the memory allocated to the q_vectors. In addition if
1060 * NAPI is enabled it will delete any references to the NAPI struct prior
1061 * to freeing the q_vector.
1062 **/
1063static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1064{
1065 int v_idx = adapter->num_q_vectors;
1066
1067 adapter->num_tx_queues = 0;
1068 adapter->num_rx_queues = 0;
1069 adapter->num_q_vectors = 0;
1070
1071 while (v_idx--)
1072 ixgbe_free_q_vector(adapter, v_idx);
1073}
1074
1075static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1076{
1077 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1078 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1079 pci_disable_msix(adapter->pdev);
1080 kfree(adapter->msix_entries);
1081 adapter->msix_entries = NULL;
1082 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1083 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1084 pci_disable_msi(adapter->pdev);
1085 }
1086}
1087
1088/**
1089 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1090 * @adapter: board private structure to initialize
1091 *
1092 * Attempt to configure the interrupts using the best available
1093 * capabilities of the hardware and the kernel.
1094 **/
1095static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1096{
1097 int err;
1098
1099 /* We will try to get MSI-X interrupts first */
1100 if (!ixgbe_acquire_msix_vectors(adapter))
1101 return;
1102
1103 /* At this point, we do not have MSI-X capabilities. We need to
1104 * reconfigure or disable various features which require MSI-X
1105 * capability.
1106 */
1107
1108 /* Disable DCB unless we only have a single traffic class */
1109 if (netdev_get_num_tc(adapter->netdev) > 1) {
1110 e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1111 netdev_reset_tc(adapter->netdev);
1112
1113 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1114 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1115
1116 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1117 adapter->temp_dcb_cfg.pfc_mode_enable = false;
1118 adapter->dcb_cfg.pfc_mode_enable = false;
1119 }
1120
1121 adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1122 adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1123
1124 /* Disable SR-IOV support */
1125 e_dev_warn("Disabling SR-IOV support\n");
1126 ixgbe_disable_sriov(adapter);
1127
1128 /* Disable RSS */
1129 e_dev_warn("Disabling RSS support\n");
1130 adapter->ring_feature[RING_F_RSS].limit = 1;
1131
1132 /* recalculate number of queues now that many features have been
1133 * changed or disabled.
1134 */
1135 ixgbe_set_num_queues(adapter);
1136 adapter->num_q_vectors = 1;
1137
1138 err = pci_enable_msi(adapter->pdev);
1139 if (err)
1140 e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1141 err);
1142 else
1143 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1144}
1145
1146/**
1147 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1148 * @adapter: board private structure to initialize
1149 *
1150 * We determine which interrupt scheme to use based on...
1151 * - Kernel support (MSI, MSI-X)
1152 * - which can be user-defined (via MODULE_PARAM)
1153 * - Hardware queue count (num_*_queues)
1154 * - defined by miscellaneous hardware support/features (RSS, etc.)
1155 **/
1156int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1157{
1158 int err;
1159
1160 /* Number of supported queues */
1161 ixgbe_set_num_queues(adapter);
1162
1163 /* Set interrupt mode */
1164 ixgbe_set_interrupt_capability(adapter);
1165
1166 err = ixgbe_alloc_q_vectors(adapter);
1167 if (err) {
1168 e_dev_err("Unable to allocate memory for queue vectors\n");
1169 goto err_alloc_q_vectors;
1170 }
1171
1172 ixgbe_cache_ring_register(adapter);
1173
1174 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
1175 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1176 adapter->num_rx_queues, adapter->num_tx_queues);
1177
1178 set_bit(__IXGBE_DOWN, &adapter->state);
1179
1180 return 0;
1181
1182err_alloc_q_vectors:
1183 ixgbe_reset_interrupt_capability(adapter);
1184 return err;
1185}
1186
1187/**
1188 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1189 * @adapter: board private structure to clear interrupt scheme on
1190 *
1191 * We go through and clear interrupt specific resources and reset the structure
1192 * to pre-load conditions
1193 **/
1194void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1195{
1196 adapter->num_tx_queues = 0;
1197 adapter->num_rx_queues = 0;
1198
1199 ixgbe_free_q_vectors(adapter);
1200 ixgbe_reset_interrupt_capability(adapter);
1201}
1202
1203void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1204 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1205{
1206 struct ixgbe_adv_tx_context_desc *context_desc;
1207 u16 i = tx_ring->next_to_use;
1208
1209 context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1210
1211 i++;
1212 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1213
1214 /* set bits to identify this as an advanced context descriptor */
1215 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1216
1217 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1218 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
1219 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1220 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1221}
1222