Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6#include "ice_dcb_lib.h"
7
8/**
9 * ice_setup_rx_ctx - Configure a receive ring context
10 * @ring: The Rx ring to configure
11 *
12 * Configure the Rx descriptor ring in RLAN context.
13 */
14static int ice_setup_rx_ctx(struct ice_ring *ring)
15{
16 struct ice_vsi *vsi = ring->vsi;
17 struct ice_hw *hw = &vsi->back->hw;
18 u32 rxdid = ICE_RXDID_FLEX_NIC;
19 struct ice_rlan_ctx rlan_ctx;
20 u32 regval;
21 u16 pf_q;
22 int err;
23
24 /* what is Rx queue number in global space of 2K Rx queues */
25 pf_q = vsi->rxq_map[ring->q_index];
26
27 /* clear the context structure first */
28 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
29
30 rlan_ctx.base = ring->dma >> 7;
31
32 rlan_ctx.qlen = ring->count;
33
34 /* Receive Packet Data Buffer Size.
35 * The Packet Data Buffer Size is defined in 128 byte units.
36 */
37 rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
38
39 /* use 32 byte descriptors */
40 rlan_ctx.dsize = 1;
41
42 /* Strip the Ethernet CRC bytes before the packet is posted to host
43 * memory.
44 */
45 rlan_ctx.crcstrip = 1;
46
47 /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
48 rlan_ctx.l2tsel = 1;
49
50 rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
51 rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
52 rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
53
54 /* This controls whether VLAN is stripped from inner headers
55 * The VLAN in the inner L2 header is stripped to the receive
56 * descriptor if enabled by this flag.
57 */
58 rlan_ctx.showiv = 0;
59
60 /* Max packet size for this queue - must not be set to a larger value
61 * than 5 x DBUF
62 */
63 rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
64 ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
65
66 /* Rx queue threshold in units of 64 */
67 rlan_ctx.lrxqthresh = 1;
68
69 /* Enable Flexible Descriptors in the queue context which
70 * allows this driver to select a specific receive descriptor format
71 */
72 if (vsi->type != ICE_VSI_VF) {
73 regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
74 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
75 QRXFLXP_CNTXT_RXDID_IDX_M;
76
77 /* increasing context priority to pick up profile ID;
78 * default is 0x01; setting to 0x03 to ensure profile
79 * is programming if prev context is of same priority
80 */
81 regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
82 QRXFLXP_CNTXT_RXDID_PRIO_M;
83
84 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
85 }
86
87 /* Absolute queue number out of 2K needs to be passed */
88 err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
89 if (err) {
90 dev_err(&vsi->back->pdev->dev,
91 "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
92 pf_q, err);
93 return -EIO;
94 }
95
96 if (vsi->type == ICE_VSI_VF)
97 return 0;
98
99 /* init queue specific tail register */
100 ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
101 writel(0, ring->tail);
102 ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
103
104 return 0;
105}
106
107/**
108 * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
109 * @ring: The Tx ring to configure
110 * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
111 * @pf_q: queue index in the PF space
112 *
113 * Configure the Tx descriptor ring in TLAN context.
114 */
115static void
116ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
117{
118 struct ice_vsi *vsi = ring->vsi;
119 struct ice_hw *hw = &vsi->back->hw;
120
121 tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
122
123 tlan_ctx->port_num = vsi->port_info->lport;
124
125 /* Transmit Queue Length */
126 tlan_ctx->qlen = ring->count;
127
128 ice_set_cgd_num(tlan_ctx, ring);
129
130 /* PF number */
131 tlan_ctx->pf_num = hw->pf_id;
132
133 /* queue belongs to a specific VSI type
134 * VF / VM index should be programmed per vmvf_type setting:
135 * for vmvf_type = VF, it is VF number between 0-256
136 * for vmvf_type = VM, it is VM number between 0-767
137 * for PF or EMP this field should be set to zero
138 */
139 switch (vsi->type) {
140 case ICE_VSI_LB:
141 /* fall through */
142 case ICE_VSI_PF:
143 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
144 break;
145 case ICE_VSI_VF:
146 /* Firmware expects vmvf_num to be absolute VF ID */
147 tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
148 tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
149 break;
150 default:
151 return;
152 }
153
154 /* make sure the context is associated with the right VSI */
155 tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
156
157 tlan_ctx->tso_ena = ICE_TX_LEGACY;
158 tlan_ctx->tso_qnum = pf_q;
159
160 /* Legacy or Advanced Host Interface:
161 * 0: Advanced Host Interface
162 * 1: Legacy Host Interface
163 */
164 tlan_ctx->legacy_int = ICE_TX_LEGACY;
165}
166
167/**
168 * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
169 * @pf: the PF being configured
170 * @pf_q: the PF queue
171 * @ena: enable or disable state of the queue
172 *
173 * This routine will wait for the given Rx queue of the PF to reach the
174 * enabled or disabled state.
175 * Returns -ETIMEDOUT in case of failing to reach the requested state after
176 * multiple retries; else will return 0 in case of success.
177 */
178static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
179{
180 int i;
181
182 for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
183 if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
184 QRX_CTRL_QENA_STAT_M))
185 return 0;
186
187 usleep_range(20, 40);
188 }
189
190 return -ETIMEDOUT;
191}
192
193/**
194 * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
195 * @vsi: the VSI being configured
196 * @ena: start or stop the Rx rings
197 * @rxq_idx: Rx queue index
198 */
199#ifndef CONFIG_PCI_IOV
200static
201#endif /* !CONFIG_PCI_IOV */
202int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
203{
204 int pf_q = vsi->rxq_map[rxq_idx];
205 struct ice_pf *pf = vsi->back;
206 struct ice_hw *hw = &pf->hw;
207 int ret = 0;
208 u32 rx_reg;
209
210 rx_reg = rd32(hw, QRX_CTRL(pf_q));
211
212 /* Skip if the queue is already in the requested state */
213 if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
214 return 0;
215
216 /* turn on/off the queue */
217 if (ena)
218 rx_reg |= QRX_CTRL_QENA_REQ_M;
219 else
220 rx_reg &= ~QRX_CTRL_QENA_REQ_M;
221 wr32(hw, QRX_CTRL(pf_q), rx_reg);
222
223 /* wait for the change to finish */
224 ret = ice_pf_rxq_wait(pf, pf_q, ena);
225 if (ret)
226 dev_err(&pf->pdev->dev,
227 "VSI idx %d Rx ring %d %sable timeout\n",
228 vsi->idx, pf_q, (ena ? "en" : "dis"));
229
230 return ret;
231}
232
233/**
234 * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
235 * @vsi: the VSI being configured
236 * @ena: start or stop the Rx rings
237 */
238static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
239{
240 int i, ret = 0;
241
242 for (i = 0; i < vsi->num_rxq; i++) {
243 ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
244 if (ret)
245 break;
246 }
247
248 return ret;
249}
250
251/**
252 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
253 * @vsi: VSI pointer
254 *
255 * On error: returns error code (negative)
256 * On success: returns 0
257 */
258static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
259{
260 struct ice_pf *pf = vsi->back;
261
262 /* allocate memory for both Tx and Rx ring pointers */
263 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
264 sizeof(*vsi->tx_rings), GFP_KERNEL);
265 if (!vsi->tx_rings)
266 return -ENOMEM;
267
268 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
269 sizeof(*vsi->rx_rings), GFP_KERNEL);
270 if (!vsi->rx_rings)
271 goto err_rings;
272
273 vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
274 sizeof(*vsi->txq_map), GFP_KERNEL);
275
276 if (!vsi->txq_map)
277 goto err_txq_map;
278
279 vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
280 sizeof(*vsi->rxq_map), GFP_KERNEL);
281 if (!vsi->rxq_map)
282 goto err_rxq_map;
283
284
285 /* There is no need to allocate q_vectors for a loopback VSI. */
286 if (vsi->type == ICE_VSI_LB)
287 return 0;
288
289 /* allocate memory for q_vector pointers */
290 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
291 sizeof(*vsi->q_vectors), GFP_KERNEL);
292 if (!vsi->q_vectors)
293 goto err_vectors;
294
295 return 0;
296
297err_vectors:
298 devm_kfree(&pf->pdev->dev, vsi->rxq_map);
299err_rxq_map:
300 devm_kfree(&pf->pdev->dev, vsi->txq_map);
301err_txq_map:
302 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
303err_rings:
304 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
305 return -ENOMEM;
306}
307
308/**
309 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
310 * @vsi: the VSI being configured
311 */
312static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
313{
314 switch (vsi->type) {
315 case ICE_VSI_PF:
316 /* fall through */
317 case ICE_VSI_LB:
318 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
319 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
320 break;
321 default:
322 dev_dbg(&vsi->back->pdev->dev,
323 "Not setting number of Tx/Rx descriptors for VSI type %d\n",
324 vsi->type);
325 break;
326 }
327}
328
329/**
330 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
331 * @vsi: the VSI being configured
332 * @vf_id: ID of the VF being configured
333 *
334 * Return 0 on success and a negative value on error
335 */
336static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
337{
338 struct ice_pf *pf = vsi->back;
339 struct ice_vf *vf = NULL;
340
341 if (vsi->type == ICE_VSI_VF)
342 vsi->vf_id = vf_id;
343
344 switch (vsi->type) {
345 case ICE_VSI_PF:
346 vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
347 num_online_cpus());
348
349 pf->num_lan_tx = vsi->alloc_txq;
350
351 /* only 1 Rx queue unless RSS is enabled */
352 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
353 vsi->alloc_rxq = 1;
354 else
355 vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
356 num_online_cpus());
357
358 pf->num_lan_rx = vsi->alloc_rxq;
359
360 vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
361 break;
362 case ICE_VSI_VF:
363 vf = &pf->vf[vsi->vf_id];
364 vsi->alloc_txq = vf->num_vf_qs;
365 vsi->alloc_rxq = vf->num_vf_qs;
366 /* pf->num_vf_msix includes (VF miscellaneous vector +
367 * data queue interrupts). Since vsi->num_q_vectors is number
368 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
369 * original vector count
370 */
371 vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
372 break;
373 case ICE_VSI_LB:
374 vsi->alloc_txq = 1;
375 vsi->alloc_rxq = 1;
376 break;
377 default:
378 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
379 break;
380 }
381
382 ice_vsi_set_num_desc(vsi);
383}
384
385/**
386 * ice_get_free_slot - get the next non-NULL location index in array
387 * @array: array to search
388 * @size: size of the array
389 * @curr: last known occupied index to be used as a search hint
390 *
391 * void * is being used to keep the functionality generic. This lets us use this
392 * function on any array of pointers.
393 */
394static int ice_get_free_slot(void *array, int size, int curr)
395{
396 int **tmp_array = (int **)array;
397 int next;
398
399 if (curr < (size - 1) && !tmp_array[curr + 1]) {
400 next = curr + 1;
401 } else {
402 int i = 0;
403
404 while ((i < size) && (tmp_array[i]))
405 i++;
406 if (i == size)
407 next = ICE_NO_VSI;
408 else
409 next = i;
410 }
411 return next;
412}
413
414/**
415 * ice_vsi_delete - delete a VSI from the switch
416 * @vsi: pointer to VSI being removed
417 */
418void ice_vsi_delete(struct ice_vsi *vsi)
419{
420 struct ice_pf *pf = vsi->back;
421 struct ice_vsi_ctx *ctxt;
422 enum ice_status status;
423
424 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
425 if (!ctxt)
426 return;
427
428 if (vsi->type == ICE_VSI_VF)
429 ctxt->vf_num = vsi->vf_id;
430 ctxt->vsi_num = vsi->vsi_num;
431
432 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
433
434 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
435 if (status)
436 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
437 vsi->vsi_num);
438
439 devm_kfree(&pf->pdev->dev, ctxt);
440}
441
442/**
443 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
444 * @vsi: pointer to VSI being cleared
445 */
446static void ice_vsi_free_arrays(struct ice_vsi *vsi)
447{
448 struct ice_pf *pf = vsi->back;
449
450 /* free the ring and vector containers */
451 if (vsi->q_vectors) {
452 devm_kfree(&pf->pdev->dev, vsi->q_vectors);
453 vsi->q_vectors = NULL;
454 }
455 if (vsi->tx_rings) {
456 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
457 vsi->tx_rings = NULL;
458 }
459 if (vsi->rx_rings) {
460 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
461 vsi->rx_rings = NULL;
462 }
463 if (vsi->txq_map) {
464 devm_kfree(&pf->pdev->dev, vsi->txq_map);
465 vsi->txq_map = NULL;
466 }
467 if (vsi->rxq_map) {
468 devm_kfree(&pf->pdev->dev, vsi->rxq_map);
469 vsi->rxq_map = NULL;
470 }
471}
472
473/**
474 * ice_vsi_clear - clean up and deallocate the provided VSI
475 * @vsi: pointer to VSI being cleared
476 *
477 * This deallocates the VSI's queue resources, removes it from the PF's
478 * VSI array if necessary, and deallocates the VSI
479 *
480 * Returns 0 on success, negative on failure
481 */
482int ice_vsi_clear(struct ice_vsi *vsi)
483{
484 struct ice_pf *pf = NULL;
485
486 if (!vsi)
487 return 0;
488
489 if (!vsi->back)
490 return -EINVAL;
491
492 pf = vsi->back;
493
494 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
495 dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
496 vsi->idx);
497 return -EINVAL;
498 }
499
500 mutex_lock(&pf->sw_mutex);
501 /* updates the PF for this cleared VSI */
502
503 pf->vsi[vsi->idx] = NULL;
504 if (vsi->idx < pf->next_vsi)
505 pf->next_vsi = vsi->idx;
506
507 ice_vsi_free_arrays(vsi);
508 mutex_unlock(&pf->sw_mutex);
509 devm_kfree(&pf->pdev->dev, vsi);
510
511 return 0;
512}
513
514/**
515 * ice_msix_clean_rings - MSIX mode Interrupt Handler
516 * @irq: interrupt number
517 * @data: pointer to a q_vector
518 */
519static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
520{
521 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
522
523 if (!q_vector->tx.ring && !q_vector->rx.ring)
524 return IRQ_HANDLED;
525
526 napi_schedule(&q_vector->napi);
527
528 return IRQ_HANDLED;
529}
530
531/**
532 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
533 * @pf: board private structure
534 * @type: type of VSI
535 * @vf_id: ID of the VF being configured
536 *
537 * returns a pointer to a VSI on success, NULL on failure.
538 */
539static struct ice_vsi *
540ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
541{
542 struct ice_vsi *vsi = NULL;
543
544 /* Need to protect the allocation of the VSIs at the PF level */
545 mutex_lock(&pf->sw_mutex);
546
547 /* If we have already allocated our maximum number of VSIs,
548 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
549 * is available to be populated
550 */
551 if (pf->next_vsi == ICE_NO_VSI) {
552 dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
553 goto unlock_pf;
554 }
555
556 vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
557 if (!vsi)
558 goto unlock_pf;
559
560 vsi->type = type;
561 vsi->back = pf;
562 set_bit(__ICE_DOWN, vsi->state);
563
564 vsi->idx = pf->next_vsi;
565
566 if (type == ICE_VSI_VF)
567 ice_vsi_set_num_qs(vsi, vf_id);
568 else
569 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
570
571 switch (vsi->type) {
572 case ICE_VSI_PF:
573 if (ice_vsi_alloc_arrays(vsi))
574 goto err_rings;
575
576 /* Setup default MSIX irq handler for VSI */
577 vsi->irq_handler = ice_msix_clean_rings;
578 break;
579 case ICE_VSI_VF:
580 if (ice_vsi_alloc_arrays(vsi))
581 goto err_rings;
582 break;
583 case ICE_VSI_LB:
584 if (ice_vsi_alloc_arrays(vsi))
585 goto err_rings;
586 break;
587 default:
588 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
589 goto unlock_pf;
590 }
591
592 /* fill VSI slot in the PF struct */
593 pf->vsi[pf->next_vsi] = vsi;
594
595 /* prepare pf->next_vsi for next use */
596 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
597 pf->next_vsi);
598 goto unlock_pf;
599
600err_rings:
601 devm_kfree(&pf->pdev->dev, vsi);
602 vsi = NULL;
603unlock_pf:
604 mutex_unlock(&pf->sw_mutex);
605 return vsi;
606}
607
608/**
609 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
610 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
611 *
612 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
613 */
614static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
615{
616 int offset, i;
617
618 mutex_lock(qs_cfg->qs_mutex);
619 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
620 0, qs_cfg->q_count, 0);
621 if (offset >= qs_cfg->pf_map_size) {
622 mutex_unlock(qs_cfg->qs_mutex);
623 return -ENOMEM;
624 }
625
626 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
627 for (i = 0; i < qs_cfg->q_count; i++)
628 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
629 mutex_unlock(qs_cfg->qs_mutex);
630
631 return 0;
632}
633
634/**
635 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
636 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
637 *
638 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
639 */
640static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
641{
642 int i, index = 0;
643
644 mutex_lock(qs_cfg->qs_mutex);
645 for (i = 0; i < qs_cfg->q_count; i++) {
646 index = find_next_zero_bit(qs_cfg->pf_map,
647 qs_cfg->pf_map_size, index);
648 if (index >= qs_cfg->pf_map_size)
649 goto err_scatter;
650 set_bit(index, qs_cfg->pf_map);
651 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
652 }
653 mutex_unlock(qs_cfg->qs_mutex);
654
655 return 0;
656err_scatter:
657 for (index = 0; index < i; index++) {
658 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
659 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
660 }
661 mutex_unlock(qs_cfg->qs_mutex);
662
663 return -ENOMEM;
664}
665
666/**
667 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
668 * @qs_cfg: gathered variables needed for pf->vsi queues assignment
669 *
670 * This function first tries to find contiguous space. If it is not successful,
671 * it tries with the scatter approach.
672 *
673 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
674 */
675static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
676{
677 int ret = 0;
678
679 ret = __ice_vsi_get_qs_contig(qs_cfg);
680 if (ret) {
681 /* contig failed, so try with scatter approach */
682 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
683 qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
684 qs_cfg->scatter_count);
685 ret = __ice_vsi_get_qs_sc(qs_cfg);
686 }
687 return ret;
688}
689
690/**
691 * ice_vsi_get_qs - Assign queues from PF to VSI
692 * @vsi: the VSI to assign queues to
693 *
694 * Returns 0 on success and a negative value on error
695 */
696static int ice_vsi_get_qs(struct ice_vsi *vsi)
697{
698 struct ice_pf *pf = vsi->back;
699 struct ice_qs_cfg tx_qs_cfg = {
700 .qs_mutex = &pf->avail_q_mutex,
701 .pf_map = pf->avail_txqs,
702 .pf_map_size = pf->max_pf_txqs,
703 .q_count = vsi->alloc_txq,
704 .scatter_count = ICE_MAX_SCATTER_TXQS,
705 .vsi_map = vsi->txq_map,
706 .vsi_map_offset = 0,
707 .mapping_mode = vsi->tx_mapping_mode
708 };
709 struct ice_qs_cfg rx_qs_cfg = {
710 .qs_mutex = &pf->avail_q_mutex,
711 .pf_map = pf->avail_rxqs,
712 .pf_map_size = pf->max_pf_rxqs,
713 .q_count = vsi->alloc_rxq,
714 .scatter_count = ICE_MAX_SCATTER_RXQS,
715 .vsi_map = vsi->rxq_map,
716 .vsi_map_offset = 0,
717 .mapping_mode = vsi->rx_mapping_mode
718 };
719 int ret = 0;
720
721 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
722 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
723
724 ret = __ice_vsi_get_qs(&tx_qs_cfg);
725 if (!ret)
726 ret = __ice_vsi_get_qs(&rx_qs_cfg);
727
728 return ret;
729}
730
731/**
732 * ice_vsi_put_qs - Release queues from VSI to PF
733 * @vsi: the VSI that is going to release queues
734 */
735void ice_vsi_put_qs(struct ice_vsi *vsi)
736{
737 struct ice_pf *pf = vsi->back;
738 int i;
739
740 mutex_lock(&pf->avail_q_mutex);
741
742 for (i = 0; i < vsi->alloc_txq; i++) {
743 clear_bit(vsi->txq_map[i], pf->avail_txqs);
744 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
745 }
746
747 for (i = 0; i < vsi->alloc_rxq; i++) {
748 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
749 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
750 }
751
752 mutex_unlock(&pf->avail_q_mutex);
753}
754
755/**
756 * ice_is_safe_mode
757 * @pf: pointer to the PF struct
758 *
759 * returns true if driver is in safe mode, false otherwise
760 */
761bool ice_is_safe_mode(struct ice_pf *pf)
762{
763 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
764}
765
766/**
767 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
768 * @vsi: the VSI being removed
769 */
770static void ice_rss_clean(struct ice_vsi *vsi)
771{
772 struct ice_pf *pf;
773
774 pf = vsi->back;
775
776 if (vsi->rss_hkey_user)
777 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
778 if (vsi->rss_lut_user)
779 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
780}
781
782/**
783 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
784 * @vsi: the VSI being configured
785 */
786static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
787{
788 struct ice_hw_common_caps *cap;
789 struct ice_pf *pf = vsi->back;
790
791 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
792 vsi->rss_size = 1;
793 return;
794 }
795
796 cap = &pf->hw.func_caps.common_cap;
797 switch (vsi->type) {
798 case ICE_VSI_PF:
799 /* PF VSI will inherit RSS instance of PF */
800 vsi->rss_table_size = cap->rss_table_size;
801 vsi->rss_size = min_t(int, num_online_cpus(),
802 BIT(cap->rss_table_entry_width));
803 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
804 break;
805 case ICE_VSI_VF:
806 /* VF VSI will gets a small RSS table
807 * For VSI_LUT, LUT size should be set to 64 bytes
808 */
809 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
810 vsi->rss_size = min_t(int, num_online_cpus(),
811 BIT(cap->rss_table_entry_width));
812 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
813 break;
814 case ICE_VSI_LB:
815 break;
816 default:
817 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
818 vsi->type);
819 break;
820 }
821}
822
823/**
824 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
825 * @ctxt: the VSI context being set
826 *
827 * This initializes a default VSI context for all sections except the Queues.
828 */
829static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
830{
831 u32 table = 0;
832
833 memset(&ctxt->info, 0, sizeof(ctxt->info));
834 /* VSI's should be allocated from shared pool */
835 ctxt->alloc_from_pool = true;
836 /* Src pruning enabled by default */
837 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
838 /* Traffic from VSI can be sent to LAN */
839 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
840 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
841 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
842 * packets untagged/tagged.
843 */
844 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
845 ICE_AQ_VSI_VLAN_MODE_M) >>
846 ICE_AQ_VSI_VLAN_MODE_S);
847 /* Have 1:1 UP mapping for both ingress/egress tables */
848 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
849 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
850 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
851 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
852 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
853 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
854 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
855 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
856 ctxt->info.ingress_table = cpu_to_le32(table);
857 ctxt->info.egress_table = cpu_to_le32(table);
858 /* Have 1:1 UP mapping for outer to inner UP table */
859 ctxt->info.outer_up_table = cpu_to_le32(table);
860 /* No Outer tag support outer_tag_flags remains to zero */
861}
862
863/**
864 * ice_vsi_setup_q_map - Setup a VSI queue map
865 * @vsi: the VSI being configured
866 * @ctxt: VSI context structure
867 */
868static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
869{
870 u16 offset = 0, qmap = 0, tx_count = 0;
871 u16 qcount_tx = vsi->alloc_txq;
872 u16 qcount_rx = vsi->alloc_rxq;
873 u16 tx_numq_tc, rx_numq_tc;
874 u16 pow = 0, max_rss = 0;
875 bool ena_tc0 = false;
876 u8 netdev_tc = 0;
877 int i;
878
879 /* at least TC0 should be enabled by default */
880 if (vsi->tc_cfg.numtc) {
881 if (!(vsi->tc_cfg.ena_tc & BIT(0)))
882 ena_tc0 = true;
883 } else {
884 ena_tc0 = true;
885 }
886
887 if (ena_tc0) {
888 vsi->tc_cfg.numtc++;
889 vsi->tc_cfg.ena_tc |= 1;
890 }
891
892 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
893 if (!rx_numq_tc)
894 rx_numq_tc = 1;
895 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
896 if (!tx_numq_tc)
897 tx_numq_tc = 1;
898
899 /* TC mapping is a function of the number of Rx queues assigned to the
900 * VSI for each traffic class and the offset of these queues.
901 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
902 * queues allocated to TC0. No:of queues is a power-of-2.
903 *
904 * If TC is not enabled, the queue offset is set to 0, and allocate one
905 * queue, this way, traffic for the given TC will be sent to the default
906 * queue.
907 *
908 * Setup number and offset of Rx queues for all TCs for the VSI
909 */
910
911 qcount_rx = rx_numq_tc;
912
913 /* qcount will change if RSS is enabled */
914 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
915 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
916 if (vsi->type == ICE_VSI_PF)
917 max_rss = ICE_MAX_LG_RSS_QS;
918 else
919 max_rss = ICE_MAX_SMALL_RSS_QS;
920 qcount_rx = min_t(int, rx_numq_tc, max_rss);
921 qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
922 }
923 }
924
925 /* find the (rounded up) power-of-2 of qcount */
926 pow = order_base_2(qcount_rx);
927
928 ice_for_each_traffic_class(i) {
929 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
930 /* TC is not enabled */
931 vsi->tc_cfg.tc_info[i].qoffset = 0;
932 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
933 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
934 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
935 ctxt->info.tc_mapping[i] = 0;
936 continue;
937 }
938
939 /* TC is enabled */
940 vsi->tc_cfg.tc_info[i].qoffset = offset;
941 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
942 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
943 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
944
945 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
946 ICE_AQ_VSI_TC_Q_OFFSET_M) |
947 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
948 ICE_AQ_VSI_TC_Q_NUM_M);
949 offset += qcount_rx;
950 tx_count += tx_numq_tc;
951 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
952 }
953
954 /* if offset is non-zero, means it is calculated correctly based on
955 * enabled TCs for a given VSI otherwise qcount_rx will always
956 * be correct and non-zero because it is based off - VSI's
957 * allocated Rx queues which is at least 1 (hence qcount_tx will be
958 * at least 1)
959 */
960 if (offset)
961 vsi->num_rxq = offset;
962 else
963 vsi->num_rxq = qcount_rx;
964
965 vsi->num_txq = tx_count;
966
967 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
968 dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
969 /* since there is a chance that num_rxq could have been changed
970 * in the above for loop, make num_txq equal to num_rxq.
971 */
972 vsi->num_txq = vsi->num_rxq;
973 }
974
975 /* Rx queue mapping */
976 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
977 /* q_mapping buffer holds the info for the first queue allocated for
978 * this VSI in the PF space and also the number of queues associated
979 * with this VSI.
980 */
981 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
982 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
983}
984
985/**
986 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
987 * @ctxt: the VSI context being set
988 * @vsi: the VSI being configured
989 */
990static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
991{
992 u8 lut_type, hash_type;
993 struct ice_pf *pf;
994
995 pf = vsi->back;
996
997 switch (vsi->type) {
998 case ICE_VSI_PF:
999 /* PF VSI will inherit RSS instance of PF */
1000 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1001 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1002 break;
1003 case ICE_VSI_VF:
1004 /* VF VSI will gets a small RSS table which is a VSI LUT type */
1005 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1006 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1007 break;
1008 case ICE_VSI_LB:
1009 dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
1010 return;
1011 default:
1012 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
1013 return;
1014 }
1015
1016 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
1017 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1018 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
1019 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
1020}
1021
1022/**
1023 * ice_vsi_init - Create and initialize a VSI
1024 * @vsi: the VSI being configured
1025 *
1026 * This initializes a VSI context depending on the VSI type to be added and
1027 * passes it down to the add_vsi aq command to create a new VSI.
1028 */
1029static int ice_vsi_init(struct ice_vsi *vsi)
1030{
1031 struct ice_pf *pf = vsi->back;
1032 struct ice_hw *hw = &pf->hw;
1033 struct ice_vsi_ctx *ctxt;
1034 int ret = 0;
1035
1036 ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
1037 if (!ctxt)
1038 return -ENOMEM;
1039
1040 ctxt->info = vsi->info;
1041 switch (vsi->type) {
1042 case ICE_VSI_LB:
1043 /* fall through */
1044 case ICE_VSI_PF:
1045 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1046 break;
1047 case ICE_VSI_VF:
1048 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1049 /* VF number here is the absolute VF number (0-255) */
1050 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
1051 break;
1052 default:
1053 return -ENODEV;
1054 }
1055
1056 ice_set_dflt_vsi_ctx(ctxt);
1057 /* if the switch is in VEB mode, allow VSI loopback */
1058 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1059 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1060
1061 /* Set LUT type and HASH type if RSS is enabled */
1062 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1063 ice_set_rss_vsi_ctx(ctxt, vsi);
1064
1065 ctxt->info.sw_id = vsi->port_info->sw_id;
1066 ice_vsi_setup_q_map(vsi, ctxt);
1067
1068 /* Enable MAC Antispoof with new VSI being initialized or updated */
1069 if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
1070 ctxt->info.valid_sections |=
1071 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1072 ctxt->info.sec_flags |=
1073 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1074 }
1075
1076 /* Allow control frames out of main VSI */
1077 if (vsi->type == ICE_VSI_PF) {
1078 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1079 ctxt->info.valid_sections |=
1080 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1081 }
1082
1083 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1084 if (ret) {
1085 dev_err(&pf->pdev->dev,
1086 "Add VSI failed, err %d\n", ret);
1087 return -EIO;
1088 }
1089
1090 /* keep context for update VSI operations */
1091 vsi->info = ctxt->info;
1092
1093 /* record VSI number returned */
1094 vsi->vsi_num = ctxt->vsi_num;
1095
1096 devm_kfree(&pf->pdev->dev, ctxt);
1097 return ret;
1098}
1099
1100/**
1101 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
1102 * @vsi: VSI having the memory freed
1103 * @v_idx: index of the vector to be freed
1104 */
1105static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
1106{
1107 struct ice_q_vector *q_vector;
1108 struct ice_pf *pf = vsi->back;
1109 struct ice_ring *ring;
1110
1111 if (!vsi->q_vectors[v_idx]) {
1112 dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
1113 v_idx);
1114 return;
1115 }
1116 q_vector = vsi->q_vectors[v_idx];
1117
1118 ice_for_each_ring(ring, q_vector->tx)
1119 ring->q_vector = NULL;
1120 ice_for_each_ring(ring, q_vector->rx)
1121 ring->q_vector = NULL;
1122
1123 /* only VSI with an associated netdev is set up with NAPI */
1124 if (vsi->netdev)
1125 netif_napi_del(&q_vector->napi);
1126
1127 devm_kfree(&pf->pdev->dev, q_vector);
1128 vsi->q_vectors[v_idx] = NULL;
1129}
1130
1131/**
1132 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
1133 * @vsi: the VSI having memory freed
1134 */
1135void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
1136{
1137 int v_idx;
1138
1139 ice_for_each_q_vector(vsi, v_idx)
1140 ice_free_q_vector(vsi, v_idx);
1141}
1142
1143/**
1144 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
1145 * @vsi: the VSI being configured
1146 * @v_idx: index of the vector in the VSI struct
1147 *
1148 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1149 */
1150static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
1151{
1152 struct ice_pf *pf = vsi->back;
1153 struct ice_q_vector *q_vector;
1154
1155 /* allocate q_vector */
1156 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
1157 if (!q_vector)
1158 return -ENOMEM;
1159
1160 q_vector->vsi = vsi;
1161 q_vector->v_idx = v_idx;
1162 if (vsi->type == ICE_VSI_VF)
1163 goto out;
1164 /* only set affinity_mask if the CPU is online */
1165 if (cpu_online(v_idx))
1166 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
1167
1168 /* This will not be called in the driver load path because the netdev
1169 * will not be created yet. All other cases with register the NAPI
1170 * handler here (i.e. resume, reset/rebuild, etc.)
1171 */
1172 if (vsi->netdev)
1173 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
1174 NAPI_POLL_WEIGHT);
1175
1176out:
1177 /* tie q_vector and VSI together */
1178 vsi->q_vectors[v_idx] = q_vector;
1179
1180 return 0;
1181}
1182
1183/**
1184 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
1185 * @vsi: the VSI being configured
1186 *
1187 * We allocate one q_vector per queue interrupt. If allocation fails we
1188 * return -ENOMEM.
1189 */
1190static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
1191{
1192 struct ice_pf *pf = vsi->back;
1193 int v_idx = 0, num_q_vectors;
1194 int err;
1195
1196 if (vsi->q_vectors[0]) {
1197 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
1198 vsi->vsi_num);
1199 return -EEXIST;
1200 }
1201
1202 num_q_vectors = vsi->num_q_vectors;
1203
1204 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
1205 err = ice_vsi_alloc_q_vector(vsi, v_idx);
1206 if (err)
1207 goto err_out;
1208 }
1209
1210 return 0;
1211
1212err_out:
1213 while (v_idx--)
1214 ice_free_q_vector(vsi, v_idx);
1215
1216 dev_err(&pf->pdev->dev,
1217 "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
1218 vsi->num_q_vectors, vsi->vsi_num, err);
1219 vsi->num_q_vectors = 0;
1220 return err;
1221}
1222
1223/**
1224 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
1225 * @vsi: ptr to the VSI
1226 *
1227 * This should only be called after ice_vsi_alloc() which allocates the
1228 * corresponding SW VSI structure and initializes num_queue_pairs for the
1229 * newly allocated VSI.
1230 *
1231 * Returns 0 on success or negative on failure
1232 */
1233static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1234{
1235 struct ice_pf *pf = vsi->back;
1236 u16 num_q_vectors;
1237
1238 /* SRIOV doesn't grab irq_tracker entries for each VSI */
1239 if (vsi->type == ICE_VSI_VF)
1240 return 0;
1241
1242 if (vsi->base_vector) {
1243 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
1244 vsi->vsi_num, vsi->base_vector);
1245 return -EEXIST;
1246 }
1247
1248 num_q_vectors = vsi->num_q_vectors;
1249 /* reserve slots from OS requested IRQs */
1250 vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
1251 vsi->idx);
1252 if (vsi->base_vector < 0) {
1253 dev_err(&pf->pdev->dev,
1254 "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
1255 num_q_vectors, vsi->vsi_num, vsi->base_vector);
1256 return -ENOENT;
1257 }
1258 pf->num_avail_sw_msix -= num_q_vectors;
1259
1260 return 0;
1261}
1262
1263/**
1264 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1265 * @vsi: the VSI having rings deallocated
1266 */
1267static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1268{
1269 int i;
1270
1271 if (vsi->tx_rings) {
1272 for (i = 0; i < vsi->alloc_txq; i++) {
1273 if (vsi->tx_rings[i]) {
1274 kfree_rcu(vsi->tx_rings[i], rcu);
1275 vsi->tx_rings[i] = NULL;
1276 }
1277 }
1278 }
1279 if (vsi->rx_rings) {
1280 for (i = 0; i < vsi->alloc_rxq; i++) {
1281 if (vsi->rx_rings[i]) {
1282 kfree_rcu(vsi->rx_rings[i], rcu);
1283 vsi->rx_rings[i] = NULL;
1284 }
1285 }
1286 }
1287}
1288
1289/**
1290 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1291 * @vsi: VSI which is having rings allocated
1292 */
1293static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1294{
1295 struct ice_pf *pf = vsi->back;
1296 int i;
1297
1298 /* Allocate Tx rings */
1299 for (i = 0; i < vsi->alloc_txq; i++) {
1300 struct ice_ring *ring;
1301
1302 /* allocate with kzalloc(), free with kfree_rcu() */
1303 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1304
1305 if (!ring)
1306 goto err_out;
1307
1308 ring->q_index = i;
1309 ring->reg_idx = vsi->txq_map[i];
1310 ring->ring_active = false;
1311 ring->vsi = vsi;
1312 ring->dev = &pf->pdev->dev;
1313 ring->count = vsi->num_tx_desc;
1314 vsi->tx_rings[i] = ring;
1315 }
1316
1317 /* Allocate Rx rings */
1318 for (i = 0; i < vsi->alloc_rxq; i++) {
1319 struct ice_ring *ring;
1320
1321 /* allocate with kzalloc(), free with kfree_rcu() */
1322 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1323 if (!ring)
1324 goto err_out;
1325
1326 ring->q_index = i;
1327 ring->reg_idx = vsi->rxq_map[i];
1328 ring->ring_active = false;
1329 ring->vsi = vsi;
1330 ring->netdev = vsi->netdev;
1331 ring->dev = &pf->pdev->dev;
1332 ring->count = vsi->num_rx_desc;
1333 vsi->rx_rings[i] = ring;
1334 }
1335
1336 return 0;
1337
1338err_out:
1339 ice_vsi_clear_rings(vsi);
1340 return -ENOMEM;
1341}
1342
1343/**
1344 * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
1345 * @vsi: the VSI being configured
1346 *
1347 * This function maps descriptor rings to the queue-specific vectors allotted
1348 * through the MSI-X enabling code. On a constrained vector budget, we map Tx
1349 * and Rx rings to the vector as "efficiently" as possible.
1350 */
1351#ifdef CONFIG_DCB
1352void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1353#else
1354static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1355#endif /* CONFIG_DCB */
1356{
1357 int q_vectors = vsi->num_q_vectors;
1358 int tx_rings_rem, rx_rings_rem;
1359 int v_id;
1360
1361 /* initially assigning remaining rings count to VSIs num queue value */
1362 tx_rings_rem = vsi->num_txq;
1363 rx_rings_rem = vsi->num_rxq;
1364
1365 for (v_id = 0; v_id < q_vectors; v_id++) {
1366 struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
1367 int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
1368
1369 /* Tx rings mapping to vector */
1370 tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
1371 q_vector->num_ring_tx = tx_rings_per_v;
1372 q_vector->tx.ring = NULL;
1373 q_vector->tx.itr_idx = ICE_TX_ITR;
1374 q_base = vsi->num_txq - tx_rings_rem;
1375
1376 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
1377 struct ice_ring *tx_ring = vsi->tx_rings[q_id];
1378
1379 tx_ring->q_vector = q_vector;
1380 tx_ring->next = q_vector->tx.ring;
1381 q_vector->tx.ring = tx_ring;
1382 }
1383 tx_rings_rem -= tx_rings_per_v;
1384
1385 /* Rx rings mapping to vector */
1386 rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
1387 q_vector->num_ring_rx = rx_rings_per_v;
1388 q_vector->rx.ring = NULL;
1389 q_vector->rx.itr_idx = ICE_RX_ITR;
1390 q_base = vsi->num_rxq - rx_rings_rem;
1391
1392 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
1393 struct ice_ring *rx_ring = vsi->rx_rings[q_id];
1394
1395 rx_ring->q_vector = q_vector;
1396 rx_ring->next = q_vector->rx.ring;
1397 q_vector->rx.ring = rx_ring;
1398 }
1399 rx_rings_rem -= rx_rings_per_v;
1400 }
1401}
1402
1403/**
1404 * ice_vsi_manage_rss_lut - disable/enable RSS
1405 * @vsi: the VSI being changed
1406 * @ena: boolean value indicating if this is an enable or disable request
1407 *
1408 * In the event of disable request for RSS, this function will zero out RSS
1409 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1410 * LUT.
1411 */
1412int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1413{
1414 int err = 0;
1415 u8 *lut;
1416
1417 lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
1418 GFP_KERNEL);
1419 if (!lut)
1420 return -ENOMEM;
1421
1422 if (ena) {
1423 if (vsi->rss_lut_user)
1424 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1425 else
1426 ice_fill_rss_lut(lut, vsi->rss_table_size,
1427 vsi->rss_size);
1428 }
1429
1430 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1431 devm_kfree(&vsi->back->pdev->dev, lut);
1432 return err;
1433}
1434
1435/**
1436 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1437 * @vsi: VSI to be configured
1438 */
1439static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1440{
1441 struct ice_aqc_get_set_rss_keys *key;
1442 struct ice_pf *pf = vsi->back;
1443 enum ice_status status;
1444 int err = 0;
1445 u8 *lut;
1446
1447 vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
1448
1449 lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
1450 if (!lut)
1451 return -ENOMEM;
1452
1453 if (vsi->rss_lut_user)
1454 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1455 else
1456 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1457
1458 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
1459 vsi->rss_table_size);
1460
1461 if (status) {
1462 dev_err(&pf->pdev->dev,
1463 "set_rss_lut failed, error %d\n", status);
1464 err = -EIO;
1465 goto ice_vsi_cfg_rss_exit;
1466 }
1467
1468 key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
1469 if (!key) {
1470 err = -ENOMEM;
1471 goto ice_vsi_cfg_rss_exit;
1472 }
1473
1474 if (vsi->rss_hkey_user)
1475 memcpy(key,
1476 (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
1477 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1478 else
1479 netdev_rss_key_fill((void *)key,
1480 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1481
1482 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1483
1484 if (status) {
1485 dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
1486 status);
1487 err = -EIO;
1488 }
1489
1490 devm_kfree(&pf->pdev->dev, key);
1491ice_vsi_cfg_rss_exit:
1492 devm_kfree(&pf->pdev->dev, lut);
1493 return err;
1494}
1495
1496/**
1497 * ice_add_mac_to_list - Add a MAC address filter entry to the list
1498 * @vsi: the VSI to be forwarded to
1499 * @add_list: pointer to the list which contains MAC filter entries
1500 * @macaddr: the MAC address to be added.
1501 *
1502 * Adds MAC address filter entry to the temp list
1503 *
1504 * Returns 0 on success or ENOMEM on failure.
1505 */
1506int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
1507 const u8 *macaddr)
1508{
1509 struct ice_fltr_list_entry *tmp;
1510 struct ice_pf *pf = vsi->back;
1511
1512 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
1513 if (!tmp)
1514 return -ENOMEM;
1515
1516 tmp->fltr_info.flag = ICE_FLTR_TX;
1517 tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1518 tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
1519 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1520 tmp->fltr_info.vsi_handle = vsi->idx;
1521 ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
1522
1523 INIT_LIST_HEAD(&tmp->list_entry);
1524 list_add(&tmp->list_entry, add_list);
1525
1526 return 0;
1527}
1528
1529/**
1530 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1531 * @vsi: the VSI to be updated
1532 */
1533void ice_update_eth_stats(struct ice_vsi *vsi)
1534{
1535 struct ice_eth_stats *prev_es, *cur_es;
1536 struct ice_hw *hw = &vsi->back->hw;
1537 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1538
1539 prev_es = &vsi->eth_stats_prev;
1540 cur_es = &vsi->eth_stats;
1541
1542 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1543 &prev_es->rx_bytes, &cur_es->rx_bytes);
1544
1545 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1546 &prev_es->rx_unicast, &cur_es->rx_unicast);
1547
1548 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1549 &prev_es->rx_multicast, &cur_es->rx_multicast);
1550
1551 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1552 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1553
1554 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1555 &prev_es->rx_discards, &cur_es->rx_discards);
1556
1557 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1558 &prev_es->tx_bytes, &cur_es->tx_bytes);
1559
1560 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1561 &prev_es->tx_unicast, &cur_es->tx_unicast);
1562
1563 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1564 &prev_es->tx_multicast, &cur_es->tx_multicast);
1565
1566 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1567 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1568
1569 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1570 &prev_es->tx_errors, &cur_es->tx_errors);
1571
1572 vsi->stat_offsets_loaded = true;
1573}
1574
1575/**
1576 * ice_free_fltr_list - free filter lists helper
1577 * @dev: pointer to the device struct
1578 * @h: pointer to the list head to be freed
1579 *
1580 * Helper function to free filter lists previously created using
1581 * ice_add_mac_to_list
1582 */
1583void ice_free_fltr_list(struct device *dev, struct list_head *h)
1584{
1585 struct ice_fltr_list_entry *e, *tmp;
1586
1587 list_for_each_entry_safe(e, tmp, h, list_entry) {
1588 list_del(&e->list_entry);
1589 devm_kfree(dev, e);
1590 }
1591}
1592
1593/**
1594 * ice_vsi_add_vlan - Add VSI membership for given VLAN
1595 * @vsi: the VSI being configured
1596 * @vid: VLAN ID to be added
1597 */
1598int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
1599{
1600 struct ice_fltr_list_entry *tmp;
1601 struct ice_pf *pf = vsi->back;
1602 LIST_HEAD(tmp_add_list);
1603 enum ice_status status;
1604 int err = 0;
1605
1606 tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
1607 if (!tmp)
1608 return -ENOMEM;
1609
1610 tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1611 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1612 tmp->fltr_info.flag = ICE_FLTR_TX;
1613 tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1614 tmp->fltr_info.vsi_handle = vsi->idx;
1615 tmp->fltr_info.l_data.vlan.vlan_id = vid;
1616
1617 INIT_LIST_HEAD(&tmp->list_entry);
1618 list_add(&tmp->list_entry, &tmp_add_list);
1619
1620 status = ice_add_vlan(&pf->hw, &tmp_add_list);
1621 if (status) {
1622 err = -ENODEV;
1623 dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
1624 vid, vsi->vsi_num);
1625 }
1626
1627 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1628 return err;
1629}
1630
1631/**
1632 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
1633 * @vsi: the VSI being configured
1634 * @vid: VLAN ID to be removed
1635 *
1636 * Returns 0 on success and negative on failure
1637 */
1638int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
1639{
1640 struct ice_fltr_list_entry *list;
1641 struct ice_pf *pf = vsi->back;
1642 LIST_HEAD(tmp_add_list);
1643 enum ice_status status;
1644 int err = 0;
1645
1646 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
1647 if (!list)
1648 return -ENOMEM;
1649
1650 list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1651 list->fltr_info.vsi_handle = vsi->idx;
1652 list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1653 list->fltr_info.l_data.vlan.vlan_id = vid;
1654 list->fltr_info.flag = ICE_FLTR_TX;
1655 list->fltr_info.src_id = ICE_SRC_ID_VSI;
1656
1657 INIT_LIST_HEAD(&list->list_entry);
1658 list_add(&list->list_entry, &tmp_add_list);
1659
1660 status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1661 if (status == ICE_ERR_DOES_NOT_EXIST) {
1662 dev_dbg(&pf->pdev->dev,
1663 "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
1664 vid, vsi->vsi_num, status);
1665 } else if (status) {
1666 dev_err(&pf->pdev->dev,
1667 "Error removing VLAN %d on vsi %i error: %d\n",
1668 vid, vsi->vsi_num, status);
1669 err = -EIO;
1670 }
1671
1672 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1673 return err;
1674}
1675
1676/**
1677 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1678 * @vsi: the VSI being configured
1679 *
1680 * Return 0 on success and a negative value on error
1681 * Configure the Rx VSI for operation.
1682 */
1683int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1684{
1685 u16 i;
1686
1687 if (vsi->type == ICE_VSI_VF)
1688 goto setup_rings;
1689
1690 if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
1691 vsi->max_frame = vsi->netdev->mtu +
1692 ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1693 else
1694 vsi->max_frame = ICE_RXBUF_2048;
1695
1696 vsi->rx_buf_len = ICE_RXBUF_2048;
1697setup_rings:
1698 /* set up individual rings */
1699 for (i = 0; i < vsi->num_rxq; i++) {
1700 int err;
1701
1702 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
1703 if (err) {
1704 dev_err(&vsi->back->pdev->dev,
1705 "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
1706 i, err);
1707 return err;
1708 }
1709 }
1710
1711 return 0;
1712}
1713
1714/**
1715 * ice_vsi_cfg_txq - Configure single Tx queue
1716 * @vsi: the VSI that queue belongs to
1717 * @ring: Tx ring to be configured
1718 * @tc_q_idx: queue index within given TC
1719 * @qg_buf: queue group buffer
1720 * @tc: TC that Tx ring belongs to
1721 */
1722static int
1723ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
1724 struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
1725{
1726 struct ice_tlan_ctx tlan_ctx = { 0 };
1727 struct ice_aqc_add_txqs_perq *txq;
1728 struct ice_pf *pf = vsi->back;
1729 u8 buf_len = sizeof(*qg_buf);
1730 enum ice_status status;
1731 u16 pf_q;
1732
1733 pf_q = ring->reg_idx;
1734 ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
1735 /* copy context contents into the qg_buf */
1736 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1737 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
1738 ice_tlan_ctx_info);
1739
1740 /* init queue specific tail reg. It is referred as
1741 * transmit comm scheduler queue doorbell.
1742 */
1743 ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1744
1745 /* Add unique software queue handle of the Tx queue per
1746 * TC into the VSI Tx ring
1747 */
1748 ring->q_handle = tc_q_idx;
1749
1750 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1751 1, qg_buf, buf_len, NULL);
1752 if (status) {
1753 dev_err(&pf->pdev->dev,
1754 "Failed to set LAN Tx queue context, error: %d\n",
1755 status);
1756 return -ENODEV;
1757 }
1758
1759 /* Add Tx Queue TEID into the VSI Tx ring from the
1760 * response. This will complete configuring and
1761 * enabling the queue.
1762 */
1763 txq = &qg_buf->txqs[0];
1764 if (pf_q == le16_to_cpu(txq->txq_id))
1765 ring->txq_teid = le32_to_cpu(txq->q_teid);
1766
1767 return 0;
1768}
1769
1770/**
1771 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1772 * @vsi: the VSI being configured
1773 * @rings: Tx ring array to be configured
1774 * @offset: offset within vsi->txq_map
1775 *
1776 * Return 0 on success and a negative value on error
1777 * Configure the Tx VSI for operation.
1778 */
1779static int
1780ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
1781{
1782 struct ice_aqc_add_tx_qgrp *qg_buf;
1783 struct ice_pf *pf = vsi->back;
1784 u16 q_idx = 0, i;
1785 int err = 0;
1786 u8 tc;
1787
1788 qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
1789 if (!qg_buf)
1790 return -ENOMEM;
1791
1792 qg_buf->num_txqs = 1;
1793
1794 /* set up and configure the Tx queues for each enabled TC */
1795 ice_for_each_traffic_class(tc) {
1796 if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
1797 break;
1798
1799 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1800 err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
1801 qg_buf, tc);
1802 if (err)
1803 goto err_cfg_txqs;
1804
1805 q_idx++;
1806 }
1807 }
1808err_cfg_txqs:
1809 devm_kfree(&pf->pdev->dev, qg_buf);
1810 return err;
1811}
1812
1813/**
1814 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1815 * @vsi: the VSI being configured
1816 *
1817 * Return 0 on success and a negative value on error
1818 * Configure the Tx VSI for operation.
1819 */
1820int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1821{
1822 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
1823}
1824
1825/**
1826 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1827 * @intrl: interrupt rate limit in usecs
1828 * @gran: interrupt rate limit granularity in usecs
1829 *
1830 * This function converts a decimal interrupt rate limit in usecs to the format
1831 * expected by firmware.
1832 */
1833u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1834{
1835 u32 val = intrl / gran;
1836
1837 if (val)
1838 return val | GLINT_RATE_INTRL_ENA_M;
1839 return 0;
1840}
1841
1842/**
1843 * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
1844 * @hw: board specific structure
1845 */
1846static void ice_cfg_itr_gran(struct ice_hw *hw)
1847{
1848 u32 regval = rd32(hw, GLINT_CTL);
1849
1850 /* no need to update global register if ITR gran is already set */
1851 if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
1852 (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
1853 GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
1854 (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
1855 GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
1856 (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
1857 GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
1858 (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
1859 GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
1860 return;
1861
1862 regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
1863 GLINT_CTL_ITR_GRAN_200_M) |
1864 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
1865 GLINT_CTL_ITR_GRAN_100_M) |
1866 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
1867 GLINT_CTL_ITR_GRAN_50_M) |
1868 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
1869 GLINT_CTL_ITR_GRAN_25_M);
1870 wr32(hw, GLINT_CTL, regval);
1871}
1872
1873/**
1874 * ice_cfg_itr - configure the initial interrupt throttle values
1875 * @hw: pointer to the HW structure
1876 * @q_vector: interrupt vector that's being configured
1877 *
1878 * Configure interrupt throttling values for the ring containers that are
1879 * associated with the interrupt vector passed in.
1880 */
1881static void
1882ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1883{
1884 ice_cfg_itr_gran(hw);
1885
1886 if (q_vector->num_ring_rx) {
1887 struct ice_ring_container *rc = &q_vector->rx;
1888
1889 /* if this value is set then don't overwrite with default */
1890 if (!rc->itr_setting)
1891 rc->itr_setting = ICE_DFLT_RX_ITR;
1892
1893 rc->target_itr = ITR_TO_REG(rc->itr_setting);
1894 rc->next_update = jiffies + 1;
1895 rc->current_itr = rc->target_itr;
1896 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1897 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1898 }
1899
1900 if (q_vector->num_ring_tx) {
1901 struct ice_ring_container *rc = &q_vector->tx;
1902
1903 /* if this value is set then don't overwrite with default */
1904 if (!rc->itr_setting)
1905 rc->itr_setting = ICE_DFLT_TX_ITR;
1906
1907 rc->target_itr = ITR_TO_REG(rc->itr_setting);
1908 rc->next_update = jiffies + 1;
1909 rc->current_itr = rc->target_itr;
1910 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1911 ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1912 }
1913}
1914
1915/**
1916 * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1917 * @vsi: the VSI being configured
1918 * @txq: Tx queue being mapped to MSI-X vector
1919 * @msix_idx: MSI-X vector index within the function
1920 * @itr_idx: ITR index of the interrupt cause
1921 *
1922 * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1923 * within the function space.
1924 */
1925#ifdef CONFIG_PCI_IOV
1926void
1927ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1928#else
1929static void
1930ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1931#endif /* CONFIG_PCI_IOV */
1932{
1933 struct ice_pf *pf = vsi->back;
1934 struct ice_hw *hw = &pf->hw;
1935 u32 val;
1936
1937 itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
1938
1939 val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
1940 ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
1941
1942 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1943}
1944
1945/**
1946 * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1947 * @vsi: the VSI being configured
1948 * @rxq: Rx queue being mapped to MSI-X vector
1949 * @msix_idx: MSI-X vector index within the function
1950 * @itr_idx: ITR index of the interrupt cause
1951 *
1952 * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1953 * within the function space.
1954 */
1955#ifdef CONFIG_PCI_IOV
1956void
1957ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1958#else
1959static void
1960ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1961#endif /* CONFIG_PCI_IOV */
1962{
1963 struct ice_pf *pf = vsi->back;
1964 struct ice_hw *hw = &pf->hw;
1965 u32 val;
1966
1967 itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
1968
1969 val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
1970 ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
1971
1972 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1973
1974 ice_flush(hw);
1975}
1976
1977/**
1978 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1979 * @vsi: the VSI being configured
1980 *
1981 * This configures MSIX mode interrupts for the PF VSI, and should not be used
1982 * for the VF VSI.
1983 */
1984void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1985{
1986 struct ice_pf *pf = vsi->back;
1987 struct ice_hw *hw = &pf->hw;
1988 u32 txq = 0, rxq = 0;
1989 int i, q;
1990
1991 for (i = 0; i < vsi->num_q_vectors; i++) {
1992 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1993 u16 reg_idx = q_vector->reg_idx;
1994
1995 ice_cfg_itr(hw, q_vector);
1996
1997 wr32(hw, GLINT_RATE(reg_idx),
1998 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1999
2000 /* Both Transmit Queue Interrupt Cause Control register
2001 * and Receive Queue Interrupt Cause control register
2002 * expects MSIX_INDX field to be the vector index
2003 * within the function space and not the absolute
2004 * vector index across PF or across device.
2005 * For SR-IOV VF VSIs queue vector index always starts
2006 * with 1 since first vector index(0) is used for OICR
2007 * in VF space. Since VMDq and other PF VSIs are within
2008 * the PF function space, use the vector index that is
2009 * tracked for this PF.
2010 */
2011 for (q = 0; q < q_vector->num_ring_tx; q++) {
2012 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
2013 q_vector->tx.itr_idx);
2014 txq++;
2015 }
2016
2017 for (q = 0; q < q_vector->num_ring_rx; q++) {
2018 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
2019 q_vector->rx.itr_idx);
2020 rxq++;
2021 }
2022 }
2023}
2024
2025/**
2026 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
2027 * @vsi: the VSI being changed
2028 */
2029int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
2030{
2031 struct device *dev = &vsi->back->pdev->dev;
2032 struct ice_hw *hw = &vsi->back->hw;
2033 struct ice_vsi_ctx *ctxt;
2034 enum ice_status status;
2035 int ret = 0;
2036
2037 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2038 if (!ctxt)
2039 return -ENOMEM;
2040
2041 /* Here we are configuring the VSI to let the driver add VLAN tags by
2042 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
2043 * insertion happens in the Tx hot path, in ice_tx_map.
2044 */
2045 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
2046
2047 /* Preserve existing VLAN strip setting */
2048 ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
2049 ICE_AQ_VSI_VLAN_EMOD_M);
2050
2051 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2052
2053 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2054 if (status) {
2055 dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
2056 status, hw->adminq.sq_last_status);
2057 ret = -EIO;
2058 goto out;
2059 }
2060
2061 vsi->info.vlan_flags = ctxt->info.vlan_flags;
2062out:
2063 devm_kfree(dev, ctxt);
2064 return ret;
2065}
2066
2067/**
2068 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
2069 * @vsi: the VSI being changed
2070 * @ena: boolean value indicating if this is a enable or disable request
2071 */
2072int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
2073{
2074 struct device *dev = &vsi->back->pdev->dev;
2075 struct ice_hw *hw = &vsi->back->hw;
2076 struct ice_vsi_ctx *ctxt;
2077 enum ice_status status;
2078 int ret = 0;
2079
2080 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2081 if (!ctxt)
2082 return -ENOMEM;
2083
2084 /* Here we are configuring what the VSI should do with the VLAN tag in
2085 * the Rx packet. We can either leave the tag in the packet or put it in
2086 * the Rx descriptor.
2087 */
2088 if (ena)
2089 /* Strip VLAN tag from Rx packet and put it in the desc */
2090 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2091 else
2092 /* Disable stripping. Leave tag in packet */
2093 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2094
2095 /* Allow all packets untagged/tagged */
2096 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
2097
2098 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2099
2100 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2101 if (status) {
2102 dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
2103 ena, status, hw->adminq.sq_last_status);
2104 ret = -EIO;
2105 goto out;
2106 }
2107
2108 vsi->info.vlan_flags = ctxt->info.vlan_flags;
2109out:
2110 devm_kfree(dev, ctxt);
2111 return ret;
2112}
2113
2114/**
2115 * ice_vsi_start_rx_rings - start VSI's Rx rings
2116 * @vsi: the VSI whose rings are to be started
2117 *
2118 * Returns 0 on success and a negative value on error
2119 */
2120int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
2121{
2122 return ice_vsi_ctrl_rx_rings(vsi, true);
2123}
2124
2125/**
2126 * ice_vsi_stop_rx_rings - stop VSI's Rx rings
2127 * @vsi: the VSI
2128 *
2129 * Returns 0 on success and a negative value on error
2130 */
2131int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
2132{
2133 return ice_vsi_ctrl_rx_rings(vsi, false);
2134}
2135
2136/**
2137 * ice_trigger_sw_intr - trigger a software interrupt
2138 * @hw: pointer to the HW structure
2139 * @q_vector: interrupt vector to trigger the software interrupt for
2140 */
2141void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
2142{
2143 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
2144 (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
2145 GLINT_DYN_CTL_SWINT_TRIG_M |
2146 GLINT_DYN_CTL_INTENA_M);
2147}
2148
2149/**
2150 * ice_vsi_stop_tx_ring - Disable single Tx ring
2151 * @vsi: the VSI being configured
2152 * @rst_src: reset source
2153 * @rel_vmvf_num: Relative ID of VF/VM
2154 * @ring: Tx ring to be stopped
2155 * @txq_meta: Meta data of Tx ring to be stopped
2156 */
2157#ifndef CONFIG_PCI_IOV
2158static
2159#endif /* !CONFIG_PCI_IOV */
2160int
2161ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2162 u16 rel_vmvf_num, struct ice_ring *ring,
2163 struct ice_txq_meta *txq_meta)
2164{
2165 struct ice_pf *pf = vsi->back;
2166 struct ice_q_vector *q_vector;
2167 struct ice_hw *hw = &pf->hw;
2168 enum ice_status status;
2169 u32 val;
2170
2171 /* clear cause_ena bit for disabled queues */
2172 val = rd32(hw, QINT_TQCTL(ring->reg_idx));
2173 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2174 wr32(hw, QINT_TQCTL(ring->reg_idx), val);
2175
2176 /* software is expected to wait for 100 ns */
2177 ndelay(100);
2178
2179 /* trigger a software interrupt for the vector
2180 * associated to the queue to schedule NAPI handler
2181 */
2182 q_vector = ring->q_vector;
2183 if (q_vector)
2184 ice_trigger_sw_intr(hw, q_vector);
2185
2186 status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
2187 txq_meta->tc, 1, &txq_meta->q_handle,
2188 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
2189 rel_vmvf_num, NULL);
2190
2191 /* if the disable queue command was exercised during an
2192 * active reset flow, ICE_ERR_RESET_ONGOING is returned.
2193 * This is not an error as the reset operation disables
2194 * queues at the hardware level anyway.
2195 */
2196 if (status == ICE_ERR_RESET_ONGOING) {
2197 dev_dbg(&vsi->back->pdev->dev,
2198 "Reset in progress. LAN Tx queues already disabled\n");
2199 } else if (status == ICE_ERR_DOES_NOT_EXIST) {
2200 dev_dbg(&vsi->back->pdev->dev,
2201 "LAN Tx queues do not exist, nothing to disable\n");
2202 } else if (status) {
2203 dev_err(&vsi->back->pdev->dev,
2204 "Failed to disable LAN Tx queues, error: %d\n", status);
2205 return -ENODEV;
2206 }
2207
2208 return 0;
2209}
2210
2211/**
2212 * ice_fill_txq_meta - Prepare the Tx queue's meta data
2213 * @vsi: VSI that ring belongs to
2214 * @ring: ring that txq_meta will be based on
2215 * @txq_meta: a helper struct that wraps Tx queue's information
2216 *
2217 * Set up a helper struct that will contain all the necessary fields that
2218 * are needed for stopping Tx queue
2219 */
2220#ifndef CONFIG_PCI_IOV
2221static
2222#endif /* !CONFIG_PCI_IOV */
2223void
2224ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
2225 struct ice_txq_meta *txq_meta)
2226{
2227 u8 tc = 0;
2228
2229#ifdef CONFIG_DCB
2230 tc = ring->dcb_tc;
2231#endif /* CONFIG_DCB */
2232 txq_meta->q_id = ring->reg_idx;
2233 txq_meta->q_teid = ring->txq_teid;
2234 txq_meta->q_handle = ring->q_handle;
2235 txq_meta->vsi_idx = vsi->idx;
2236 txq_meta->tc = tc;
2237}
2238
2239/**
2240 * ice_vsi_stop_tx_rings - Disable Tx rings
2241 * @vsi: the VSI being configured
2242 * @rst_src: reset source
2243 * @rel_vmvf_num: Relative ID of VF/VM
2244 * @rings: Tx ring array to be stopped
2245 */
2246static int
2247ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2248 u16 rel_vmvf_num, struct ice_ring **rings)
2249{
2250 u16 i, q_idx = 0;
2251 int status;
2252 u8 tc;
2253
2254 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2255 return -EINVAL;
2256
2257 /* set up the Tx queue list to be disabled for each enabled TC */
2258 ice_for_each_traffic_class(tc) {
2259 if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
2260 break;
2261
2262 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
2263 struct ice_txq_meta txq_meta = { };
2264
2265 if (!rings || !rings[q_idx])
2266 return -EINVAL;
2267
2268 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2269 status = ice_vsi_stop_tx_ring(vsi, rst_src,
2270 rel_vmvf_num,
2271 rings[q_idx], &txq_meta);
2272
2273 if (status)
2274 return status;
2275
2276 q_idx++;
2277 }
2278 }
2279
2280 return 0;
2281}
2282
2283/**
2284 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2285 * @vsi: the VSI being configured
2286 * @rst_src: reset source
2287 * @rel_vmvf_num: Relative ID of VF/VM
2288 */
2289int
2290ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2291 u16 rel_vmvf_num)
2292{
2293 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
2294}
2295
2296/**
2297 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
2298 * @vsi: VSI to enable or disable VLAN pruning on
2299 * @ena: set to true to enable VLAN pruning and false to disable it
2300 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
2301 *
2302 * returns 0 if VSI is updated, negative otherwise
2303 */
2304int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
2305{
2306 struct ice_vsi_ctx *ctxt;
2307 struct device *dev;
2308 struct ice_pf *pf;
2309 int status;
2310
2311 if (!vsi)
2312 return -EINVAL;
2313
2314 pf = vsi->back;
2315 dev = &pf->pdev->dev;
2316 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2317 if (!ctxt)
2318 return -ENOMEM;
2319
2320 ctxt->info = vsi->info;
2321
2322 if (ena) {
2323 ctxt->info.sec_flags |=
2324 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2325 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2326 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2327 } else {
2328 ctxt->info.sec_flags &=
2329 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2330 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2331 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2332 }
2333
2334 if (!vlan_promisc)
2335 ctxt->info.valid_sections =
2336 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
2337 ICE_AQ_VSI_PROP_SW_VALID);
2338
2339 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
2340 if (status) {
2341 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
2342 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
2343 pf->hw.adminq.sq_last_status);
2344 goto err_out;
2345 }
2346
2347 vsi->info.sec_flags = ctxt->info.sec_flags;
2348 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
2349
2350 devm_kfree(dev, ctxt);
2351 return 0;
2352
2353err_out:
2354 devm_kfree(dev, ctxt);
2355 return -EIO;
2356}
2357
2358static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2359{
2360 struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
2361
2362 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
2363 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
2364}
2365
2366/**
2367 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
2368 * @vsi: VSI to set the q_vectors register index on
2369 */
2370static int
2371ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
2372{
2373 u16 i;
2374
2375 if (!vsi || !vsi->q_vectors)
2376 return -EINVAL;
2377
2378 ice_for_each_q_vector(vsi, i) {
2379 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2380
2381 if (!q_vector) {
2382 dev_err(&vsi->back->pdev->dev,
2383 "Failed to set reg_idx on q_vector %d VSI %d\n",
2384 i, vsi->vsi_num);
2385 goto clear_reg_idx;
2386 }
2387
2388 if (vsi->type == ICE_VSI_VF) {
2389 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
2390
2391 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
2392 } else {
2393 q_vector->reg_idx =
2394 q_vector->v_idx + vsi->base_vector;
2395 }
2396 }
2397
2398 return 0;
2399
2400clear_reg_idx:
2401 ice_for_each_q_vector(vsi, i) {
2402 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2403
2404 if (q_vector)
2405 q_vector->reg_idx = 0;
2406 }
2407
2408 return -EINVAL;
2409}
2410
2411/**
2412 * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
2413 * @vsi: the VSI being configured
2414 * @add_rule: boolean value to add or remove ethertype filter rule
2415 */
2416static void
2417ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
2418{
2419 struct ice_fltr_list_entry *list;
2420 struct ice_pf *pf = vsi->back;
2421 LIST_HEAD(tmp_add_list);
2422 enum ice_status status;
2423
2424 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
2425 if (!list)
2426 return;
2427
2428 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2429 list->fltr_info.fltr_act = ICE_DROP_PACKET;
2430 list->fltr_info.flag = ICE_FLTR_TX;
2431 list->fltr_info.src_id = ICE_SRC_ID_VSI;
2432 list->fltr_info.vsi_handle = vsi->idx;
2433 list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
2434
2435 INIT_LIST_HEAD(&list->list_entry);
2436 list_add(&list->list_entry, &tmp_add_list);
2437
2438 if (add_rule)
2439 status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
2440 else
2441 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
2442
2443 if (status)
2444 dev_err(&pf->pdev->dev,
2445 "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
2446 vsi->vsi_num, status);
2447
2448 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2449}
2450
2451/**
2452 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2453 * @vsi: the VSI being configured
2454 * @tx: bool to determine Tx or Rx rule
2455 * @create: bool to determine create or remove Rule
2456 */
2457void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2458{
2459 struct ice_fltr_list_entry *list;
2460 struct ice_pf *pf = vsi->back;
2461 LIST_HEAD(tmp_add_list);
2462 enum ice_status status;
2463
2464 list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
2465 if (!list)
2466 return;
2467
2468 list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2469 list->fltr_info.vsi_handle = vsi->idx;
2470 list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
2471
2472 if (tx) {
2473 list->fltr_info.fltr_act = ICE_DROP_PACKET;
2474 list->fltr_info.flag = ICE_FLTR_TX;
2475 list->fltr_info.src_id = ICE_SRC_ID_VSI;
2476 } else {
2477 list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2478 list->fltr_info.flag = ICE_FLTR_RX;
2479 list->fltr_info.src_id = ICE_SRC_ID_LPORT;
2480 }
2481
2482 INIT_LIST_HEAD(&list->list_entry);
2483 list_add(&list->list_entry, &tmp_add_list);
2484
2485 if (create)
2486 status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
2487 else
2488 status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
2489
2490 if (status)
2491 dev_err(&pf->pdev->dev,
2492 "Fail %s %s LLDP rule on VSI %i error: %d\n",
2493 create ? "adding" : "removing", tx ? "TX" : "RX",
2494 vsi->vsi_num, status);
2495
2496 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2497}
2498
2499/**
2500 * ice_vsi_setup - Set up a VSI by a given type
2501 * @pf: board private structure
2502 * @pi: pointer to the port_info instance
2503 * @type: VSI type
2504 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
2505 * used only for ICE_VSI_VF VSI type. For other VSI types, should
2506 * fill-in ICE_INVAL_VFID as input.
2507 *
2508 * This allocates the sw VSI structure and its queue resources.
2509 *
2510 * Returns pointer to the successfully allocated and configured VSI sw struct on
2511 * success, NULL on failure.
2512 */
2513struct ice_vsi *
2514ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2515 enum ice_vsi_type type, u16 vf_id)
2516{
2517 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2518 struct device *dev = &pf->pdev->dev;
2519 enum ice_status status;
2520 struct ice_vsi *vsi;
2521 int ret, i;
2522
2523 if (type == ICE_VSI_VF)
2524 vsi = ice_vsi_alloc(pf, type, vf_id);
2525 else
2526 vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
2527
2528 if (!vsi) {
2529 dev_err(dev, "could not allocate VSI\n");
2530 return NULL;
2531 }
2532
2533 vsi->port_info = pi;
2534 vsi->vsw = pf->first_sw;
2535 if (vsi->type == ICE_VSI_PF)
2536 vsi->ethtype = ETH_P_PAUSE;
2537
2538 if (vsi->type == ICE_VSI_VF)
2539 vsi->vf_id = vf_id;
2540
2541 if (ice_vsi_get_qs(vsi)) {
2542 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2543 vsi->idx);
2544 goto unroll_get_qs;
2545 }
2546
2547 /* set RSS capabilities */
2548 ice_vsi_set_rss_params(vsi);
2549
2550 /* set TC configuration */
2551 ice_vsi_set_tc_cfg(vsi);
2552
2553 /* create the VSI */
2554 ret = ice_vsi_init(vsi);
2555 if (ret)
2556 goto unroll_get_qs;
2557
2558 switch (vsi->type) {
2559 case ICE_VSI_PF:
2560 ret = ice_vsi_alloc_q_vectors(vsi);
2561 if (ret)
2562 goto unroll_vsi_init;
2563
2564 ret = ice_vsi_setup_vector_base(vsi);
2565 if (ret)
2566 goto unroll_alloc_q_vector;
2567
2568 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2569 if (ret)
2570 goto unroll_vector_base;
2571
2572 ret = ice_vsi_alloc_rings(vsi);
2573 if (ret)
2574 goto unroll_vector_base;
2575
2576 ice_vsi_map_rings_to_vectors(vsi);
2577
2578 /* Do not exit if configuring RSS had an issue, at least
2579 * receive traffic on first queue. Hence no need to capture
2580 * return value
2581 */
2582 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2583 ice_vsi_cfg_rss_lut_key(vsi);
2584 break;
2585 case ICE_VSI_VF:
2586 /* VF driver will take care of creating netdev for this type and
2587 * map queues to vectors through Virtchnl, PF driver only
2588 * creates a VSI and corresponding structures for bookkeeping
2589 * purpose
2590 */
2591 ret = ice_vsi_alloc_q_vectors(vsi);
2592 if (ret)
2593 goto unroll_vsi_init;
2594
2595 ret = ice_vsi_alloc_rings(vsi);
2596 if (ret)
2597 goto unroll_alloc_q_vector;
2598
2599 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2600 if (ret)
2601 goto unroll_vector_base;
2602
2603 /* Do not exit if configuring RSS had an issue, at least
2604 * receive traffic on first queue. Hence no need to capture
2605 * return value
2606 */
2607 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2608 ice_vsi_cfg_rss_lut_key(vsi);
2609 break;
2610 case ICE_VSI_LB:
2611 ret = ice_vsi_alloc_rings(vsi);
2612 if (ret)
2613 goto unroll_vsi_init;
2614 break;
2615 default:
2616 /* clean up the resources and exit */
2617 goto unroll_vsi_init;
2618 }
2619
2620 /* configure VSI nodes based on number of queues and TC's */
2621 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2622 max_txqs[i] = vsi->alloc_txq;
2623
2624 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2625 max_txqs);
2626 if (status) {
2627 dev_err(&pf->pdev->dev,
2628 "VSI %d failed lan queue config, error %d\n",
2629 vsi->vsi_num, status);
2630 goto unroll_vector_base;
2631 }
2632
2633 /* Add switch rule to drop all Tx Flow Control Frames, of look up
2634 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2635 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2636 * The rule is added once for PF VSI in order to create appropriate
2637 * recipe, since VSI/VSI list is ignored with drop action...
2638 * Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
2639 * need to be dropped so that VFs cannot send LLDP packets to reconfig
2640 * DCB settings in the HW. Also, if the FW DCBX engine is not running
2641 * then Rx LLDP packets need to be redirected up the stack.
2642 */
2643 if (!ice_is_safe_mode(pf)) {
2644 if (vsi->type == ICE_VSI_PF) {
2645 ice_vsi_add_rem_eth_mac(vsi, true);
2646
2647 /* Tx LLDP packets */
2648 ice_cfg_sw_lldp(vsi, true, true);
2649
2650 /* Rx LLDP packets */
2651 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2652 ice_cfg_sw_lldp(vsi, false, true);
2653 }
2654 }
2655
2656 return vsi;
2657
2658unroll_vector_base:
2659 /* reclaim SW interrupts back to the common pool */
2660 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2661 pf->num_avail_sw_msix += vsi->num_q_vectors;
2662unroll_alloc_q_vector:
2663 ice_vsi_free_q_vectors(vsi);
2664unroll_vsi_init:
2665 ice_vsi_delete(vsi);
2666unroll_get_qs:
2667 ice_vsi_put_qs(vsi);
2668 ice_vsi_clear(vsi);
2669
2670 return NULL;
2671}
2672
2673/**
2674 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2675 * @vsi: the VSI being cleaned up
2676 */
2677static void ice_vsi_release_msix(struct ice_vsi *vsi)
2678{
2679 struct ice_pf *pf = vsi->back;
2680 struct ice_hw *hw = &pf->hw;
2681 u32 txq = 0;
2682 u32 rxq = 0;
2683 int i, q;
2684
2685 for (i = 0; i < vsi->num_q_vectors; i++) {
2686 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2687 u16 reg_idx = q_vector->reg_idx;
2688
2689 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
2690 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2691 for (q = 0; q < q_vector->num_ring_tx; q++) {
2692 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2693 txq++;
2694 }
2695
2696 for (q = 0; q < q_vector->num_ring_rx; q++) {
2697 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2698 rxq++;
2699 }
2700 }
2701
2702 ice_flush(hw);
2703}
2704
2705/**
2706 * ice_vsi_free_irq - Free the IRQ association with the OS
2707 * @vsi: the VSI being configured
2708 */
2709void ice_vsi_free_irq(struct ice_vsi *vsi)
2710{
2711 struct ice_pf *pf = vsi->back;
2712 int base = vsi->base_vector;
2713 int i;
2714
2715 if (!vsi->q_vectors || !vsi->irqs_ready)
2716 return;
2717
2718 ice_vsi_release_msix(vsi);
2719 if (vsi->type == ICE_VSI_VF)
2720 return;
2721
2722 vsi->irqs_ready = false;
2723 ice_for_each_q_vector(vsi, i) {
2724 u16 vector = i + base;
2725 int irq_num;
2726
2727 irq_num = pf->msix_entries[vector].vector;
2728
2729 /* free only the irqs that were actually requested */
2730 if (!vsi->q_vectors[i] ||
2731 !(vsi->q_vectors[i]->num_ring_tx ||
2732 vsi->q_vectors[i]->num_ring_rx))
2733 continue;
2734
2735 /* clear the affinity notifier in the IRQ descriptor */
2736 irq_set_affinity_notifier(irq_num, NULL);
2737
2738 /* clear the affinity_mask in the IRQ descriptor */
2739 irq_set_affinity_hint(irq_num, NULL);
2740 synchronize_irq(irq_num);
2741 devm_free_irq(&pf->pdev->dev, irq_num,
2742 vsi->q_vectors[i]);
2743 }
2744}
2745
2746/**
2747 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2748 * @vsi: the VSI having resources freed
2749 */
2750void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2751{
2752 int i;
2753
2754 if (!vsi->tx_rings)
2755 return;
2756
2757 ice_for_each_txq(vsi, i)
2758 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2759 ice_free_tx_ring(vsi->tx_rings[i]);
2760}
2761
2762/**
2763 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2764 * @vsi: the VSI having resources freed
2765 */
2766void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2767{
2768 int i;
2769
2770 if (!vsi->rx_rings)
2771 return;
2772
2773 ice_for_each_rxq(vsi, i)
2774 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2775 ice_free_rx_ring(vsi->rx_rings[i]);
2776}
2777
2778/**
2779 * ice_vsi_close - Shut down a VSI
2780 * @vsi: the VSI being shut down
2781 */
2782void ice_vsi_close(struct ice_vsi *vsi)
2783{
2784 if (!test_and_set_bit(__ICE_DOWN, vsi->state))
2785 ice_down(vsi);
2786
2787 ice_vsi_free_irq(vsi);
2788 ice_vsi_free_tx_rings(vsi);
2789 ice_vsi_free_rx_rings(vsi);
2790}
2791
2792/**
2793 * ice_free_res - free a block of resources
2794 * @res: pointer to the resource
2795 * @index: starting index previously returned by ice_get_res
2796 * @id: identifier to track owner
2797 *
2798 * Returns number of resources freed
2799 */
2800int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
2801{
2802 int count = 0;
2803 int i;
2804
2805 if (!res || index >= res->end)
2806 return -EINVAL;
2807
2808 id |= ICE_RES_VALID_BIT;
2809 for (i = index; i < res->end && res->list[i] == id; i++) {
2810 res->list[i] = 0;
2811 count++;
2812 }
2813
2814 return count;
2815}
2816
2817/**
2818 * ice_search_res - Search the tracker for a block of resources
2819 * @res: pointer to the resource
2820 * @needed: size of the block needed
2821 * @id: identifier to track owner
2822 *
2823 * Returns the base item index of the block, or -ENOMEM for error
2824 */
2825static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
2826{
2827 int start = 0, end = 0;
2828
2829 if (needed > res->end)
2830 return -ENOMEM;
2831
2832 id |= ICE_RES_VALID_BIT;
2833
2834 do {
2835 /* skip already allocated entries */
2836 if (res->list[end++] & ICE_RES_VALID_BIT) {
2837 start = end;
2838 if ((start + needed) > res->end)
2839 break;
2840 }
2841
2842 if (end == (start + needed)) {
2843 int i = start;
2844
2845 /* there was enough, so assign it to the requestor */
2846 while (i != end)
2847 res->list[i++] = id;
2848
2849 return start;
2850 }
2851 } while (end < res->end);
2852
2853 return -ENOMEM;
2854}
2855
2856/**
2857 * ice_get_res - get a block of resources
2858 * @pf: board private structure
2859 * @res: pointer to the resource
2860 * @needed: size of the block needed
2861 * @id: identifier to track owner
2862 *
2863 * Returns the base item index of the block, or negative for error
2864 */
2865int
2866ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
2867{
2868 if (!res || !pf)
2869 return -EINVAL;
2870
2871 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
2872 dev_err(&pf->pdev->dev,
2873 "param err: needed=%d, num_entries = %d id=0x%04x\n",
2874 needed, res->num_entries, id);
2875 return -EINVAL;
2876 }
2877
2878 return ice_search_res(res, needed, id);
2879}
2880
2881/**
2882 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2883 * @vsi: the VSI being un-configured
2884 */
2885void ice_vsi_dis_irq(struct ice_vsi *vsi)
2886{
2887 int base = vsi->base_vector;
2888 struct ice_pf *pf = vsi->back;
2889 struct ice_hw *hw = &pf->hw;
2890 u32 val;
2891 int i;
2892
2893 /* disable interrupt causation from each queue */
2894 if (vsi->tx_rings) {
2895 ice_for_each_txq(vsi, i) {
2896 if (vsi->tx_rings[i]) {
2897 u16 reg;
2898
2899 reg = vsi->tx_rings[i]->reg_idx;
2900 val = rd32(hw, QINT_TQCTL(reg));
2901 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2902 wr32(hw, QINT_TQCTL(reg), val);
2903 }
2904 }
2905 }
2906
2907 if (vsi->rx_rings) {
2908 ice_for_each_rxq(vsi, i) {
2909 if (vsi->rx_rings[i]) {
2910 u16 reg;
2911
2912 reg = vsi->rx_rings[i]->reg_idx;
2913 val = rd32(hw, QINT_RQCTL(reg));
2914 val &= ~QINT_RQCTL_CAUSE_ENA_M;
2915 wr32(hw, QINT_RQCTL(reg), val);
2916 }
2917 }
2918 }
2919
2920 /* disable each interrupt */
2921 ice_for_each_q_vector(vsi, i) {
2922 if (!vsi->q_vectors[i])
2923 continue;
2924 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2925 }
2926
2927 ice_flush(hw);
2928
2929 /* don't call synchronize_irq() for VF's from the host */
2930 if (vsi->type == ICE_VSI_VF)
2931 return;
2932
2933 ice_for_each_q_vector(vsi, i)
2934 synchronize_irq(pf->msix_entries[i + base].vector);
2935}
2936
2937/**
2938 * ice_napi_del - Remove NAPI handler for the VSI
2939 * @vsi: VSI for which NAPI handler is to be removed
2940 */
2941void ice_napi_del(struct ice_vsi *vsi)
2942{
2943 int v_idx;
2944
2945 if (!vsi->netdev)
2946 return;
2947
2948 ice_for_each_q_vector(vsi, v_idx)
2949 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
2950}
2951
2952/**
2953 * ice_vsi_release - Delete a VSI and free its resources
2954 * @vsi: the VSI being removed
2955 *
2956 * Returns 0 on success or < 0 on error
2957 */
2958int ice_vsi_release(struct ice_vsi *vsi)
2959{
2960 struct ice_pf *pf;
2961
2962 if (!vsi->back)
2963 return -ENODEV;
2964 pf = vsi->back;
2965
2966 /* do not unregister while driver is in the reset recovery pending
2967 * state. Since reset/rebuild happens through PF service task workqueue,
2968 * it's not a good idea to unregister netdev that is associated to the
2969 * PF that is running the work queue items currently. This is done to
2970 * avoid check_flush_dependency() warning on this wq
2971 */
2972 if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2973 unregister_netdev(vsi->netdev);
2974
2975 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2976 ice_rss_clean(vsi);
2977
2978 /* Disable VSI and free resources */
2979 if (vsi->type != ICE_VSI_LB)
2980 ice_vsi_dis_irq(vsi);
2981 ice_vsi_close(vsi);
2982
2983 /* SR-IOV determines needed MSIX resources all at once instead of per
2984 * VSI since when VFs are spawned we know how many VFs there are and how
2985 * many interrupts each VF needs. SR-IOV MSIX resources are also
2986 * cleared in the same manner.
2987 */
2988 if (vsi->type != ICE_VSI_VF) {
2989 /* reclaim SW interrupts back to the common pool */
2990 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2991 pf->num_avail_sw_msix += vsi->num_q_vectors;
2992 }
2993
2994 if (!ice_is_safe_mode(pf)) {
2995 if (vsi->type == ICE_VSI_PF) {
2996 ice_vsi_add_rem_eth_mac(vsi, false);
2997 ice_cfg_sw_lldp(vsi, true, false);
2998 /* The Rx rule will only exist to remove if the LLDP FW
2999 * engine is currently stopped
3000 */
3001 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
3002 ice_cfg_sw_lldp(vsi, false, false);
3003 }
3004 }
3005
3006 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
3007 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3008 ice_vsi_delete(vsi);
3009 ice_vsi_free_q_vectors(vsi);
3010
3011 /* make sure unregister_netdev() was called by checking __ICE_DOWN */
3012 if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
3013 free_netdev(vsi->netdev);
3014 vsi->netdev = NULL;
3015 }
3016
3017 ice_vsi_clear_rings(vsi);
3018
3019 ice_vsi_put_qs(vsi);
3020
3021 /* retain SW VSI data structure since it is needed to unregister and
3022 * free VSI netdev when PF is not in reset recovery pending state,\
3023 * for ex: during rmmod.
3024 */
3025 if (!ice_is_reset_in_progress(pf->state))
3026 ice_vsi_clear(vsi);
3027
3028 return 0;
3029}
3030
3031/**
3032 * ice_vsi_rebuild - Rebuild VSI after reset
3033 * @vsi: VSI to be rebuild
3034 *
3035 * Returns 0 on success and negative value on failure
3036 */
3037int ice_vsi_rebuild(struct ice_vsi *vsi)
3038{
3039 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3040 struct ice_vf *vf = NULL;
3041 enum ice_status status;
3042 struct ice_pf *pf;
3043 int ret, i;
3044
3045 if (!vsi)
3046 return -EINVAL;
3047
3048 pf = vsi->back;
3049 if (vsi->type == ICE_VSI_VF)
3050 vf = &pf->vf[vsi->vf_id];
3051
3052 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3053 ice_vsi_free_q_vectors(vsi);
3054
3055 /* SR-IOV determines needed MSIX resources all at once instead of per
3056 * VSI since when VFs are spawned we know how many VFs there are and how
3057 * many interrupts each VF needs. SR-IOV MSIX resources are also
3058 * cleared in the same manner.
3059 */
3060 if (vsi->type != ICE_VSI_VF) {
3061 /* reclaim SW interrupts back to the common pool */
3062 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
3063 pf->num_avail_sw_msix += vsi->num_q_vectors;
3064 vsi->base_vector = 0;
3065 }
3066
3067 ice_vsi_put_qs(vsi);
3068 ice_vsi_clear_rings(vsi);
3069 ice_vsi_free_arrays(vsi);
3070 ice_dev_onetime_setup(&pf->hw);
3071 if (vsi->type == ICE_VSI_VF)
3072 ice_vsi_set_num_qs(vsi, vf->vf_id);
3073 else
3074 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
3075
3076 ret = ice_vsi_alloc_arrays(vsi);
3077 if (ret < 0)
3078 goto err_vsi;
3079
3080 ice_vsi_get_qs(vsi);
3081 ice_vsi_set_tc_cfg(vsi);
3082
3083 /* Initialize VSI struct elements and create VSI in FW */
3084 ret = ice_vsi_init(vsi);
3085 if (ret < 0)
3086 goto err_vsi;
3087
3088
3089 switch (vsi->type) {
3090 case ICE_VSI_PF:
3091 ret = ice_vsi_alloc_q_vectors(vsi);
3092 if (ret)
3093 goto err_rings;
3094
3095 ret = ice_vsi_setup_vector_base(vsi);
3096 if (ret)
3097 goto err_vectors;
3098
3099 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3100 if (ret)
3101 goto err_vectors;
3102
3103 ret = ice_vsi_alloc_rings(vsi);
3104 if (ret)
3105 goto err_vectors;
3106
3107 ice_vsi_map_rings_to_vectors(vsi);
3108 /* Do not exit if configuring RSS had an issue, at least
3109 * receive traffic on first queue. Hence no need to capture
3110 * return value
3111 */
3112 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3113 ice_vsi_cfg_rss_lut_key(vsi);
3114 break;
3115 case ICE_VSI_VF:
3116 ret = ice_vsi_alloc_q_vectors(vsi);
3117 if (ret)
3118 goto err_rings;
3119
3120 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3121 if (ret)
3122 goto err_vectors;
3123
3124 ret = ice_vsi_alloc_rings(vsi);
3125 if (ret)
3126 goto err_vectors;
3127
3128 break;
3129 default:
3130 break;
3131 }
3132
3133 /* configure VSI nodes based on number of queues and TC's */
3134 for (i = 0; i < vsi->tc_cfg.numtc; i++)
3135 max_txqs[i] = vsi->alloc_txq;
3136
3137 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3138 max_txqs);
3139 if (status) {
3140 dev_err(&pf->pdev->dev,
3141 "VSI %d failed lan queue config, error %d\n",
3142 vsi->vsi_num, status);
3143 goto err_vectors;
3144 }
3145 return 0;
3146
3147err_vectors:
3148 ice_vsi_free_q_vectors(vsi);
3149err_rings:
3150 if (vsi->netdev) {
3151 vsi->current_netdev_flags = 0;
3152 unregister_netdev(vsi->netdev);
3153 free_netdev(vsi->netdev);
3154 vsi->netdev = NULL;
3155 }
3156err_vsi:
3157 ice_vsi_clear(vsi);
3158 set_bit(__ICE_RESET_FAILED, pf->state);
3159 return ret;
3160}
3161
3162/**
3163 * ice_is_reset_in_progress - check for a reset in progress
3164 * @state: PF state field
3165 */
3166bool ice_is_reset_in_progress(unsigned long *state)
3167{
3168 return test_bit(__ICE_RESET_OICR_RECV, state) ||
3169 test_bit(__ICE_PFR_REQ, state) ||
3170 test_bit(__ICE_CORER_REQ, state) ||
3171 test_bit(__ICE_GLOBR_REQ, state);
3172}
3173
3174#ifdef CONFIG_DCB
3175/**
3176 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3177 * @vsi: VSI being configured
3178 * @ctx: the context buffer returned from AQ VSI update command
3179 */
3180static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3181{
3182 vsi->info.mapping_flags = ctx->info.mapping_flags;
3183 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3184 sizeof(vsi->info.q_mapping));
3185 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3186 sizeof(vsi->info.tc_mapping));
3187}
3188
3189/**
3190 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3191 * @vsi: VSI to be configured
3192 * @ena_tc: TC bitmap
3193 *
3194 * VSI queues expected to be quiesced before calling this function
3195 */
3196int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3197{
3198 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3199 struct ice_vsi_ctx *ctx;
3200 struct ice_pf *pf = vsi->back;
3201 enum ice_status status;
3202 int i, ret = 0;
3203 u8 num_tc = 0;
3204
3205 ice_for_each_traffic_class(i) {
3206 /* build bitmap of enabled TCs */
3207 if (ena_tc & BIT(i))
3208 num_tc++;
3209 /* populate max_txqs per TC */
3210 max_txqs[i] = vsi->alloc_txq;
3211 }
3212
3213 vsi->tc_cfg.ena_tc = ena_tc;
3214 vsi->tc_cfg.numtc = num_tc;
3215
3216 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
3217 if (!ctx)
3218 return -ENOMEM;
3219
3220 ctx->vf_num = 0;
3221 ctx->info = vsi->info;
3222
3223 ice_vsi_setup_q_map(vsi, ctx);
3224
3225 /* must to indicate which section of VSI context are being modified */
3226 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3227 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3228 if (status) {
3229 dev_info(&pf->pdev->dev, "Failed VSI Update\n");
3230 ret = -EIO;
3231 goto out;
3232 }
3233
3234 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3235 max_txqs);
3236
3237 if (status) {
3238 dev_err(&pf->pdev->dev,
3239 "VSI %d failed TC config, error %d\n",
3240 vsi->vsi_num, status);
3241 ret = -EIO;
3242 goto out;
3243 }
3244 ice_vsi_update_q_map(vsi, ctx);
3245 vsi->info.valid_sections = 0;
3246
3247 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3248out:
3249 devm_kfree(&pf->pdev->dev, ctx);
3250 return ret;
3251}
3252#endif /* CONFIG_DCB */
3253
3254/**
3255 * ice_nvm_version_str - format the NVM version strings
3256 * @hw: ptr to the hardware info
3257 */
3258char *ice_nvm_version_str(struct ice_hw *hw)
3259{
3260 u8 oem_ver, oem_patch, ver_hi, ver_lo;
3261 static char buf[ICE_NVM_VER_LEN];
3262 u16 oem_build;
3263
3264 ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
3265 &ver_lo);
3266
3267 snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
3268 hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
3269
3270 return buf;
3271}
3272
3273/**
3274 * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
3275 * @vsi: the VSI being configured MAC filter
3276 * @macaddr: the MAC address to be added.
3277 * @set: Add or delete a MAC filter
3278 *
3279 * Adds or removes MAC address filter entry for VF VSI
3280 */
3281enum ice_status
3282ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
3283{
3284 LIST_HEAD(tmp_add_list);
3285 enum ice_status status;
3286
3287 /* Update MAC filter list to be added or removed for a VSI */
3288 if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
3289 status = ICE_ERR_NO_MEMORY;
3290 goto cfg_mac_fltr_exit;
3291 }
3292
3293 if (set)
3294 status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
3295 else
3296 status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
3297
3298cfg_mac_fltr_exit:
3299 ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
3300 return status;
3301}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_flow.h"
7#include "ice_lib.h"
8#include "ice_fltr.h"
9#include "ice_dcb_lib.h"
10#include "ice_devlink.h"
11#include "ice_vsi_vlan_ops.h"
12
13/**
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
15 * @vsi_type: VSI type enum
16 */
17const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
18{
19 switch (vsi_type) {
20 case ICE_VSI_PF:
21 return "ICE_VSI_PF";
22 case ICE_VSI_VF:
23 return "ICE_VSI_VF";
24 case ICE_VSI_CTRL:
25 return "ICE_VSI_CTRL";
26 case ICE_VSI_CHNL:
27 return "ICE_VSI_CHNL";
28 case ICE_VSI_LB:
29 return "ICE_VSI_LB";
30 case ICE_VSI_SWITCHDEV_CTRL:
31 return "ICE_VSI_SWITCHDEV_CTRL";
32 default:
33 return "unknown";
34 }
35}
36
37/**
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
39 * @vsi: the VSI being configured
40 * @ena: start or stop the Rx rings
41 *
42 * First enable/disable all of the Rx rings, flush any remaining writes, and
43 * then verify that they have all been enabled/disabled successfully. This will
44 * let all of the register writes complete when enabling/disabling the Rx rings
45 * before waiting for the change in hardware to complete.
46 */
47static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
48{
49 int ret = 0;
50 u16 i;
51
52 ice_for_each_rxq(vsi, i)
53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
54
55 ice_flush(&vsi->back->hw);
56
57 ice_for_each_rxq(vsi, i) {
58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
59 if (ret)
60 break;
61 }
62
63 return ret;
64}
65
66/**
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
68 * @vsi: VSI pointer
69 *
70 * On error: returns error code (negative)
71 * On success: returns 0
72 */
73static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
74{
75 struct ice_pf *pf = vsi->back;
76 struct device *dev;
77
78 dev = ice_pf_to_dev(pf);
79 if (vsi->type == ICE_VSI_CHNL)
80 return 0;
81
82 /* allocate memory for both Tx and Rx ring pointers */
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84 sizeof(*vsi->tx_rings), GFP_KERNEL);
85 if (!vsi->tx_rings)
86 return -ENOMEM;
87
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89 sizeof(*vsi->rx_rings), GFP_KERNEL);
90 if (!vsi->rx_rings)
91 goto err_rings;
92
93 /* txq_map needs to have enough space to track both Tx (stack) rings
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
95 * so use num_possible_cpus() as we want to always provide XDP ring
96 * per CPU, regardless of queue count settings from user that might
97 * have come from ethtool's set_channels() callback;
98 */
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
100 sizeof(*vsi->txq_map), GFP_KERNEL);
101
102 if (!vsi->txq_map)
103 goto err_txq_map;
104
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
106 sizeof(*vsi->rxq_map), GFP_KERNEL);
107 if (!vsi->rxq_map)
108 goto err_rxq_map;
109
110 /* There is no need to allocate q_vectors for a loopback VSI. */
111 if (vsi->type == ICE_VSI_LB)
112 return 0;
113
114 /* allocate memory for q_vector pointers */
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116 sizeof(*vsi->q_vectors), GFP_KERNEL);
117 if (!vsi->q_vectors)
118 goto err_vectors;
119
120 vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
121 if (!vsi->af_xdp_zc_qps)
122 goto err_zc_qps;
123
124 return 0;
125
126err_zc_qps:
127 devm_kfree(dev, vsi->q_vectors);
128err_vectors:
129 devm_kfree(dev, vsi->rxq_map);
130err_rxq_map:
131 devm_kfree(dev, vsi->txq_map);
132err_txq_map:
133 devm_kfree(dev, vsi->rx_rings);
134err_rings:
135 devm_kfree(dev, vsi->tx_rings);
136 return -ENOMEM;
137}
138
139/**
140 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
141 * @vsi: the VSI being configured
142 */
143static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
144{
145 switch (vsi->type) {
146 case ICE_VSI_PF:
147 case ICE_VSI_SWITCHDEV_CTRL:
148 case ICE_VSI_CTRL:
149 case ICE_VSI_LB:
150 /* a user could change the values of num_[tr]x_desc using
151 * ethtool -G so we should keep those values instead of
152 * overwriting them with the defaults.
153 */
154 if (!vsi->num_rx_desc)
155 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
156 if (!vsi->num_tx_desc)
157 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
158 break;
159 default:
160 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
161 vsi->type);
162 break;
163 }
164}
165
166/**
167 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
168 * @vsi: the VSI being configured
169 *
170 * Return 0 on success and a negative value on error
171 */
172static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
173{
174 enum ice_vsi_type vsi_type = vsi->type;
175 struct ice_pf *pf = vsi->back;
176 struct ice_vf *vf = vsi->vf;
177
178 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
179 return;
180
181 switch (vsi_type) {
182 case ICE_VSI_PF:
183 if (vsi->req_txq) {
184 vsi->alloc_txq = vsi->req_txq;
185 vsi->num_txq = vsi->req_txq;
186 } else {
187 vsi->alloc_txq = min3(pf->num_lan_msix,
188 ice_get_avail_txq_count(pf),
189 (u16)num_online_cpus());
190 }
191
192 pf->num_lan_tx = vsi->alloc_txq;
193
194 /* only 1 Rx queue unless RSS is enabled */
195 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
196 vsi->alloc_rxq = 1;
197 } else {
198 if (vsi->req_rxq) {
199 vsi->alloc_rxq = vsi->req_rxq;
200 vsi->num_rxq = vsi->req_rxq;
201 } else {
202 vsi->alloc_rxq = min3(pf->num_lan_msix,
203 ice_get_avail_rxq_count(pf),
204 (u16)num_online_cpus());
205 }
206 }
207
208 pf->num_lan_rx = vsi->alloc_rxq;
209
210 vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
211 max_t(int, vsi->alloc_rxq,
212 vsi->alloc_txq));
213 break;
214 case ICE_VSI_SWITCHDEV_CTRL:
215 /* The number of queues for ctrl VSI is equal to number of PRs
216 * Each ring is associated to the corresponding VF_PR netdev.
217 * Tx and Rx rings are always equal
218 */
219 if (vsi->req_txq && vsi->req_rxq) {
220 vsi->alloc_txq = vsi->req_txq;
221 vsi->alloc_rxq = vsi->req_rxq;
222 } else {
223 vsi->alloc_txq = 1;
224 vsi->alloc_rxq = 1;
225 }
226
227 vsi->num_q_vectors = 1;
228 break;
229 case ICE_VSI_VF:
230 if (vf->num_req_qs)
231 vf->num_vf_qs = vf->num_req_qs;
232 vsi->alloc_txq = vf->num_vf_qs;
233 vsi->alloc_rxq = vf->num_vf_qs;
234 /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
235 * data queue interrupts). Since vsi->num_q_vectors is number
236 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
237 * original vector count
238 */
239 vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
240 break;
241 case ICE_VSI_CTRL:
242 vsi->alloc_txq = 1;
243 vsi->alloc_rxq = 1;
244 vsi->num_q_vectors = 1;
245 break;
246 case ICE_VSI_CHNL:
247 vsi->alloc_txq = 0;
248 vsi->alloc_rxq = 0;
249 break;
250 case ICE_VSI_LB:
251 vsi->alloc_txq = 1;
252 vsi->alloc_rxq = 1;
253 break;
254 default:
255 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
256 break;
257 }
258
259 ice_vsi_set_num_desc(vsi);
260}
261
262/**
263 * ice_get_free_slot - get the next non-NULL location index in array
264 * @array: array to search
265 * @size: size of the array
266 * @curr: last known occupied index to be used as a search hint
267 *
268 * void * is being used to keep the functionality generic. This lets us use this
269 * function on any array of pointers.
270 */
271static int ice_get_free_slot(void *array, int size, int curr)
272{
273 int **tmp_array = (int **)array;
274 int next;
275
276 if (curr < (size - 1) && !tmp_array[curr + 1]) {
277 next = curr + 1;
278 } else {
279 int i = 0;
280
281 while ((i < size) && (tmp_array[i]))
282 i++;
283 if (i == size)
284 next = ICE_NO_VSI;
285 else
286 next = i;
287 }
288 return next;
289}
290
291/**
292 * ice_vsi_delete_from_hw - delete a VSI from the switch
293 * @vsi: pointer to VSI being removed
294 */
295static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
296{
297 struct ice_pf *pf = vsi->back;
298 struct ice_vsi_ctx *ctxt;
299 int status;
300
301 ice_fltr_remove_all(vsi);
302 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
303 if (!ctxt)
304 return;
305
306 if (vsi->type == ICE_VSI_VF)
307 ctxt->vf_num = vsi->vf->vf_id;
308 ctxt->vsi_num = vsi->vsi_num;
309
310 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
311
312 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
313 if (status)
314 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
315 vsi->vsi_num, status);
316
317 kfree(ctxt);
318}
319
320/**
321 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
322 * @vsi: pointer to VSI being cleared
323 */
324static void ice_vsi_free_arrays(struct ice_vsi *vsi)
325{
326 struct ice_pf *pf = vsi->back;
327 struct device *dev;
328
329 dev = ice_pf_to_dev(pf);
330
331 bitmap_free(vsi->af_xdp_zc_qps);
332 vsi->af_xdp_zc_qps = NULL;
333 /* free the ring and vector containers */
334 devm_kfree(dev, vsi->q_vectors);
335 vsi->q_vectors = NULL;
336 devm_kfree(dev, vsi->tx_rings);
337 vsi->tx_rings = NULL;
338 devm_kfree(dev, vsi->rx_rings);
339 vsi->rx_rings = NULL;
340 devm_kfree(dev, vsi->txq_map);
341 vsi->txq_map = NULL;
342 devm_kfree(dev, vsi->rxq_map);
343 vsi->rxq_map = NULL;
344}
345
346/**
347 * ice_vsi_free_stats - Free the ring statistics structures
348 * @vsi: VSI pointer
349 */
350static void ice_vsi_free_stats(struct ice_vsi *vsi)
351{
352 struct ice_vsi_stats *vsi_stat;
353 struct ice_pf *pf = vsi->back;
354 int i;
355
356 if (vsi->type == ICE_VSI_CHNL)
357 return;
358 if (!pf->vsi_stats)
359 return;
360
361 vsi_stat = pf->vsi_stats[vsi->idx];
362 if (!vsi_stat)
363 return;
364
365 ice_for_each_alloc_txq(vsi, i) {
366 if (vsi_stat->tx_ring_stats[i]) {
367 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
368 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
369 }
370 }
371
372 ice_for_each_alloc_rxq(vsi, i) {
373 if (vsi_stat->rx_ring_stats[i]) {
374 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
375 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
376 }
377 }
378
379 kfree(vsi_stat->tx_ring_stats);
380 kfree(vsi_stat->rx_ring_stats);
381 kfree(vsi_stat);
382 pf->vsi_stats[vsi->idx] = NULL;
383}
384
385/**
386 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
387 * @vsi: VSI which is having stats allocated
388 */
389static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
390{
391 struct ice_ring_stats **tx_ring_stats;
392 struct ice_ring_stats **rx_ring_stats;
393 struct ice_vsi_stats *vsi_stats;
394 struct ice_pf *pf = vsi->back;
395 u16 i;
396
397 vsi_stats = pf->vsi_stats[vsi->idx];
398 tx_ring_stats = vsi_stats->tx_ring_stats;
399 rx_ring_stats = vsi_stats->rx_ring_stats;
400
401 /* Allocate Tx ring stats */
402 ice_for_each_alloc_txq(vsi, i) {
403 struct ice_ring_stats *ring_stats;
404 struct ice_tx_ring *ring;
405
406 ring = vsi->tx_rings[i];
407 ring_stats = tx_ring_stats[i];
408
409 if (!ring_stats) {
410 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
411 if (!ring_stats)
412 goto err_out;
413
414 WRITE_ONCE(tx_ring_stats[i], ring_stats);
415 }
416
417 ring->ring_stats = ring_stats;
418 }
419
420 /* Allocate Rx ring stats */
421 ice_for_each_alloc_rxq(vsi, i) {
422 struct ice_ring_stats *ring_stats;
423 struct ice_rx_ring *ring;
424
425 ring = vsi->rx_rings[i];
426 ring_stats = rx_ring_stats[i];
427
428 if (!ring_stats) {
429 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
430 if (!ring_stats)
431 goto err_out;
432
433 WRITE_ONCE(rx_ring_stats[i], ring_stats);
434 }
435
436 ring->ring_stats = ring_stats;
437 }
438
439 return 0;
440
441err_out:
442 ice_vsi_free_stats(vsi);
443 return -ENOMEM;
444}
445
446/**
447 * ice_vsi_free - clean up and deallocate the provided VSI
448 * @vsi: pointer to VSI being cleared
449 *
450 * This deallocates the VSI's queue resources, removes it from the PF's
451 * VSI array if necessary, and deallocates the VSI
452 */
453static void ice_vsi_free(struct ice_vsi *vsi)
454{
455 struct ice_pf *pf = NULL;
456 struct device *dev;
457
458 if (!vsi || !vsi->back)
459 return;
460
461 pf = vsi->back;
462 dev = ice_pf_to_dev(pf);
463
464 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
465 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
466 return;
467 }
468
469 mutex_lock(&pf->sw_mutex);
470 /* updates the PF for this cleared VSI */
471
472 pf->vsi[vsi->idx] = NULL;
473 pf->next_vsi = vsi->idx;
474
475 ice_vsi_free_stats(vsi);
476 ice_vsi_free_arrays(vsi);
477 mutex_unlock(&pf->sw_mutex);
478 devm_kfree(dev, vsi);
479}
480
481void ice_vsi_delete(struct ice_vsi *vsi)
482{
483 ice_vsi_delete_from_hw(vsi);
484 ice_vsi_free(vsi);
485}
486
487/**
488 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
489 * @irq: interrupt number
490 * @data: pointer to a q_vector
491 */
492static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
493{
494 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
495
496 if (!q_vector->tx.tx_ring)
497 return IRQ_HANDLED;
498
499#define FDIR_RX_DESC_CLEAN_BUDGET 64
500 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
501 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
502
503 return IRQ_HANDLED;
504}
505
506/**
507 * ice_msix_clean_rings - MSIX mode Interrupt Handler
508 * @irq: interrupt number
509 * @data: pointer to a q_vector
510 */
511static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
512{
513 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
514
515 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
516 return IRQ_HANDLED;
517
518 q_vector->total_events++;
519
520 napi_schedule(&q_vector->napi);
521
522 return IRQ_HANDLED;
523}
524
525static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
526{
527 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
528 struct ice_pf *pf = q_vector->vsi->back;
529 struct ice_repr *repr;
530 unsigned long id;
531
532 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
533 return IRQ_HANDLED;
534
535 xa_for_each(&pf->eswitch.reprs, id, repr)
536 napi_schedule(&repr->q_vector->napi);
537
538 return IRQ_HANDLED;
539}
540
541/**
542 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
543 * @vsi: VSI pointer
544 */
545static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
546{
547 struct ice_vsi_stats *vsi_stat;
548 struct ice_pf *pf = vsi->back;
549
550 if (vsi->type == ICE_VSI_CHNL)
551 return 0;
552 if (!pf->vsi_stats)
553 return -ENOENT;
554
555 if (pf->vsi_stats[vsi->idx])
556 /* realloc will happen in rebuild path */
557 return 0;
558
559 vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
560 if (!vsi_stat)
561 return -ENOMEM;
562
563 vsi_stat->tx_ring_stats =
564 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
565 GFP_KERNEL);
566 if (!vsi_stat->tx_ring_stats)
567 goto err_alloc_tx;
568
569 vsi_stat->rx_ring_stats =
570 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
571 GFP_KERNEL);
572 if (!vsi_stat->rx_ring_stats)
573 goto err_alloc_rx;
574
575 pf->vsi_stats[vsi->idx] = vsi_stat;
576
577 return 0;
578
579err_alloc_rx:
580 kfree(vsi_stat->rx_ring_stats);
581err_alloc_tx:
582 kfree(vsi_stat->tx_ring_stats);
583 kfree(vsi_stat);
584 pf->vsi_stats[vsi->idx] = NULL;
585 return -ENOMEM;
586}
587
588/**
589 * ice_vsi_alloc_def - set default values for already allocated VSI
590 * @vsi: ptr to VSI
591 * @ch: ptr to channel
592 */
593static int
594ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
595{
596 if (vsi->type != ICE_VSI_CHNL) {
597 ice_vsi_set_num_qs(vsi);
598 if (ice_vsi_alloc_arrays(vsi))
599 return -ENOMEM;
600 }
601
602 switch (vsi->type) {
603 case ICE_VSI_SWITCHDEV_CTRL:
604 /* Setup eswitch MSIX irq handler for VSI */
605 vsi->irq_handler = ice_eswitch_msix_clean_rings;
606 break;
607 case ICE_VSI_PF:
608 /* Setup default MSIX irq handler for VSI */
609 vsi->irq_handler = ice_msix_clean_rings;
610 break;
611 case ICE_VSI_CTRL:
612 /* Setup ctrl VSI MSIX irq handler */
613 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
614 break;
615 case ICE_VSI_CHNL:
616 if (!ch)
617 return -EINVAL;
618
619 vsi->num_rxq = ch->num_rxq;
620 vsi->num_txq = ch->num_txq;
621 vsi->next_base_q = ch->base_q;
622 break;
623 case ICE_VSI_VF:
624 case ICE_VSI_LB:
625 break;
626 default:
627 ice_vsi_free_arrays(vsi);
628 return -EINVAL;
629 }
630
631 return 0;
632}
633
634/**
635 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
636 * @pf: board private structure
637 *
638 * Reserves a VSI index from the PF and allocates an empty VSI structure
639 * without a type. The VSI structure must later be initialized by calling
640 * ice_vsi_cfg().
641 *
642 * returns a pointer to a VSI on success, NULL on failure.
643 */
644static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
645{
646 struct device *dev = ice_pf_to_dev(pf);
647 struct ice_vsi *vsi = NULL;
648
649 /* Need to protect the allocation of the VSIs at the PF level */
650 mutex_lock(&pf->sw_mutex);
651
652 /* If we have already allocated our maximum number of VSIs,
653 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
654 * is available to be populated
655 */
656 if (pf->next_vsi == ICE_NO_VSI) {
657 dev_dbg(dev, "out of VSI slots!\n");
658 goto unlock_pf;
659 }
660
661 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
662 if (!vsi)
663 goto unlock_pf;
664
665 vsi->back = pf;
666 set_bit(ICE_VSI_DOWN, vsi->state);
667
668 /* fill slot and make note of the index */
669 vsi->idx = pf->next_vsi;
670 pf->vsi[pf->next_vsi] = vsi;
671
672 /* prepare pf->next_vsi for next use */
673 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
674 pf->next_vsi);
675
676unlock_pf:
677 mutex_unlock(&pf->sw_mutex);
678 return vsi;
679}
680
681/**
682 * ice_alloc_fd_res - Allocate FD resource for a VSI
683 * @vsi: pointer to the ice_vsi
684 *
685 * This allocates the FD resources
686 *
687 * Returns 0 on success, -EPERM on no-op or -EIO on failure
688 */
689static int ice_alloc_fd_res(struct ice_vsi *vsi)
690{
691 struct ice_pf *pf = vsi->back;
692 u32 g_val, b_val;
693
694 /* Flow Director filters are only allocated/assigned to the PF VSI or
695 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
696 * add/delete filters so resources are not allocated to it
697 */
698 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
699 return -EPERM;
700
701 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
702 vsi->type == ICE_VSI_CHNL))
703 return -EPERM;
704
705 /* FD filters from guaranteed pool per VSI */
706 g_val = pf->hw.func_caps.fd_fltr_guar;
707 if (!g_val)
708 return -EPERM;
709
710 /* FD filters from best effort pool */
711 b_val = pf->hw.func_caps.fd_fltr_best_effort;
712 if (!b_val)
713 return -EPERM;
714
715 /* PF main VSI gets only 64 FD resources from guaranteed pool
716 * when ADQ is configured.
717 */
718#define ICE_PF_VSI_GFLTR 64
719
720 /* determine FD filter resources per VSI from shared(best effort) and
721 * dedicated pool
722 */
723 if (vsi->type == ICE_VSI_PF) {
724 vsi->num_gfltr = g_val;
725 /* if MQPRIO is configured, main VSI doesn't get all FD
726 * resources from guaranteed pool. PF VSI gets 64 FD resources
727 */
728 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
729 if (g_val < ICE_PF_VSI_GFLTR)
730 return -EPERM;
731 /* allow bare minimum entries for PF VSI */
732 vsi->num_gfltr = ICE_PF_VSI_GFLTR;
733 }
734
735 /* each VSI gets same "best_effort" quota */
736 vsi->num_bfltr = b_val;
737 } else if (vsi->type == ICE_VSI_VF) {
738 vsi->num_gfltr = 0;
739
740 /* each VSI gets same "best_effort" quota */
741 vsi->num_bfltr = b_val;
742 } else {
743 struct ice_vsi *main_vsi;
744 int numtc;
745
746 main_vsi = ice_get_main_vsi(pf);
747 if (!main_vsi)
748 return -EPERM;
749
750 if (!main_vsi->all_numtc)
751 return -EINVAL;
752
753 /* figure out ADQ numtc */
754 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
755
756 /* only one TC but still asking resources for channels,
757 * invalid config
758 */
759 if (numtc < ICE_CHNL_START_TC)
760 return -EPERM;
761
762 g_val -= ICE_PF_VSI_GFLTR;
763 /* channel VSIs gets equal share from guaranteed pool */
764 vsi->num_gfltr = g_val / numtc;
765
766 /* each VSI gets same "best_effort" quota */
767 vsi->num_bfltr = b_val;
768 }
769
770 return 0;
771}
772
773/**
774 * ice_vsi_get_qs - Assign queues from PF to VSI
775 * @vsi: the VSI to assign queues to
776 *
777 * Returns 0 on success and a negative value on error
778 */
779static int ice_vsi_get_qs(struct ice_vsi *vsi)
780{
781 struct ice_pf *pf = vsi->back;
782 struct ice_qs_cfg tx_qs_cfg = {
783 .qs_mutex = &pf->avail_q_mutex,
784 .pf_map = pf->avail_txqs,
785 .pf_map_size = pf->max_pf_txqs,
786 .q_count = vsi->alloc_txq,
787 .scatter_count = ICE_MAX_SCATTER_TXQS,
788 .vsi_map = vsi->txq_map,
789 .vsi_map_offset = 0,
790 .mapping_mode = ICE_VSI_MAP_CONTIG
791 };
792 struct ice_qs_cfg rx_qs_cfg = {
793 .qs_mutex = &pf->avail_q_mutex,
794 .pf_map = pf->avail_rxqs,
795 .pf_map_size = pf->max_pf_rxqs,
796 .q_count = vsi->alloc_rxq,
797 .scatter_count = ICE_MAX_SCATTER_RXQS,
798 .vsi_map = vsi->rxq_map,
799 .vsi_map_offset = 0,
800 .mapping_mode = ICE_VSI_MAP_CONTIG
801 };
802 int ret;
803
804 if (vsi->type == ICE_VSI_CHNL)
805 return 0;
806
807 ret = __ice_vsi_get_qs(&tx_qs_cfg);
808 if (ret)
809 return ret;
810 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
811
812 ret = __ice_vsi_get_qs(&rx_qs_cfg);
813 if (ret)
814 return ret;
815 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
816
817 return 0;
818}
819
820/**
821 * ice_vsi_put_qs - Release queues from VSI to PF
822 * @vsi: the VSI that is going to release queues
823 */
824static void ice_vsi_put_qs(struct ice_vsi *vsi)
825{
826 struct ice_pf *pf = vsi->back;
827 int i;
828
829 mutex_lock(&pf->avail_q_mutex);
830
831 ice_for_each_alloc_txq(vsi, i) {
832 clear_bit(vsi->txq_map[i], pf->avail_txqs);
833 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
834 }
835
836 ice_for_each_alloc_rxq(vsi, i) {
837 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
838 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
839 }
840
841 mutex_unlock(&pf->avail_q_mutex);
842}
843
844/**
845 * ice_is_safe_mode
846 * @pf: pointer to the PF struct
847 *
848 * returns true if driver is in safe mode, false otherwise
849 */
850bool ice_is_safe_mode(struct ice_pf *pf)
851{
852 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
853}
854
855/**
856 * ice_is_rdma_ena
857 * @pf: pointer to the PF struct
858 *
859 * returns true if RDMA is currently supported, false otherwise
860 */
861bool ice_is_rdma_ena(struct ice_pf *pf)
862{
863 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
864}
865
866/**
867 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
868 * @vsi: the VSI being cleaned up
869 *
870 * This function deletes RSS input set for all flows that were configured
871 * for this VSI
872 */
873static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
874{
875 struct ice_pf *pf = vsi->back;
876 int status;
877
878 if (ice_is_safe_mode(pf))
879 return;
880
881 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
882 if (status)
883 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
884 vsi->vsi_num, status);
885}
886
887/**
888 * ice_rss_clean - Delete RSS related VSI structures and configuration
889 * @vsi: the VSI being removed
890 */
891static void ice_rss_clean(struct ice_vsi *vsi)
892{
893 struct ice_pf *pf = vsi->back;
894 struct device *dev;
895
896 dev = ice_pf_to_dev(pf);
897
898 devm_kfree(dev, vsi->rss_hkey_user);
899 devm_kfree(dev, vsi->rss_lut_user);
900
901 ice_vsi_clean_rss_flow_fld(vsi);
902 /* remove RSS replay list */
903 if (!ice_is_safe_mode(pf))
904 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
905}
906
907/**
908 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
909 * @vsi: the VSI being configured
910 */
911static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
912{
913 struct ice_hw_common_caps *cap;
914 struct ice_pf *pf = vsi->back;
915 u16 max_rss_size;
916
917 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
918 vsi->rss_size = 1;
919 return;
920 }
921
922 cap = &pf->hw.func_caps.common_cap;
923 max_rss_size = BIT(cap->rss_table_entry_width);
924 switch (vsi->type) {
925 case ICE_VSI_CHNL:
926 case ICE_VSI_PF:
927 /* PF VSI will inherit RSS instance of PF */
928 vsi->rss_table_size = (u16)cap->rss_table_size;
929 if (vsi->type == ICE_VSI_CHNL)
930 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
931 else
932 vsi->rss_size = min_t(u16, num_online_cpus(),
933 max_rss_size);
934 vsi->rss_lut_type = ICE_LUT_PF;
935 break;
936 case ICE_VSI_SWITCHDEV_CTRL:
937 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
938 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
939 vsi->rss_lut_type = ICE_LUT_VSI;
940 break;
941 case ICE_VSI_VF:
942 /* VF VSI will get a small RSS table.
943 * For VSI_LUT, LUT size should be set to 64 bytes.
944 */
945 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
946 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
947 vsi->rss_lut_type = ICE_LUT_VSI;
948 break;
949 case ICE_VSI_LB:
950 break;
951 default:
952 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
953 ice_vsi_type_str(vsi->type));
954 break;
955 }
956}
957
958/**
959 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
960 * @hw: HW structure used to determine the VLAN mode of the device
961 * @ctxt: the VSI context being set
962 *
963 * This initializes a default VSI context for all sections except the Queues.
964 */
965static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
966{
967 u32 table = 0;
968
969 memset(&ctxt->info, 0, sizeof(ctxt->info));
970 /* VSI's should be allocated from shared pool */
971 ctxt->alloc_from_pool = true;
972 /* Src pruning enabled by default */
973 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
974 /* Traffic from VSI can be sent to LAN */
975 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
976 /* allow all untagged/tagged packets by default on Tx */
977 ctxt->info.inner_vlan_flags = FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
978 ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
979 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
980 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
981 *
982 * DVM - leave inner VLAN in packet by default
983 */
984 if (ice_is_dvm_ena(hw)) {
985 ctxt->info.inner_vlan_flags |=
986 FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
987 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
988 ctxt->info.outer_vlan_flags =
989 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
990 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
991 ctxt->info.outer_vlan_flags |=
992 FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
993 ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
994 ctxt->info.outer_vlan_flags |=
995 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
996 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
997 }
998 /* Have 1:1 UP mapping for both ingress/egress tables */
999 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1000 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
1001 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
1002 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
1003 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
1004 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
1005 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
1006 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
1007 ctxt->info.ingress_table = cpu_to_le32(table);
1008 ctxt->info.egress_table = cpu_to_le32(table);
1009 /* Have 1:1 UP mapping for outer to inner UP table */
1010 ctxt->info.outer_up_table = cpu_to_le32(table);
1011 /* No Outer tag support outer_tag_flags remains to zero */
1012}
1013
1014/**
1015 * ice_vsi_setup_q_map - Setup a VSI queue map
1016 * @vsi: the VSI being configured
1017 * @ctxt: VSI context structure
1018 */
1019static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1020{
1021 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
1022 u16 num_txq_per_tc, num_rxq_per_tc;
1023 u16 qcount_tx = vsi->alloc_txq;
1024 u16 qcount_rx = vsi->alloc_rxq;
1025 u8 netdev_tc = 0;
1026 int i;
1027
1028 if (!vsi->tc_cfg.numtc) {
1029 /* at least TC0 should be enabled by default */
1030 vsi->tc_cfg.numtc = 1;
1031 vsi->tc_cfg.ena_tc = 1;
1032 }
1033
1034 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
1035 if (!num_rxq_per_tc)
1036 num_rxq_per_tc = 1;
1037 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
1038 if (!num_txq_per_tc)
1039 num_txq_per_tc = 1;
1040
1041 /* find the (rounded up) power-of-2 of qcount */
1042 pow = (u16)order_base_2(num_rxq_per_tc);
1043
1044 /* TC mapping is a function of the number of Rx queues assigned to the
1045 * VSI for each traffic class and the offset of these queues.
1046 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1047 * queues allocated to TC0. No:of queues is a power-of-2.
1048 *
1049 * If TC is not enabled, the queue offset is set to 0, and allocate one
1050 * queue, this way, traffic for the given TC will be sent to the default
1051 * queue.
1052 *
1053 * Setup number and offset of Rx queues for all TCs for the VSI
1054 */
1055 ice_for_each_traffic_class(i) {
1056 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1057 /* TC is not enabled */
1058 vsi->tc_cfg.tc_info[i].qoffset = 0;
1059 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1060 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1061 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1062 ctxt->info.tc_mapping[i] = 0;
1063 continue;
1064 }
1065
1066 /* TC is enabled */
1067 vsi->tc_cfg.tc_info[i].qoffset = offset;
1068 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1069 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1070 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1071
1072 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1073 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1074 offset += num_rxq_per_tc;
1075 tx_count += num_txq_per_tc;
1076 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1077 }
1078
1079 /* if offset is non-zero, means it is calculated correctly based on
1080 * enabled TCs for a given VSI otherwise qcount_rx will always
1081 * be correct and non-zero because it is based off - VSI's
1082 * allocated Rx queues which is at least 1 (hence qcount_tx will be
1083 * at least 1)
1084 */
1085 if (offset)
1086 rx_count = offset;
1087 else
1088 rx_count = num_rxq_per_tc;
1089
1090 if (rx_count > vsi->alloc_rxq) {
1091 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1092 rx_count, vsi->alloc_rxq);
1093 return -EINVAL;
1094 }
1095
1096 if (tx_count > vsi->alloc_txq) {
1097 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1098 tx_count, vsi->alloc_txq);
1099 return -EINVAL;
1100 }
1101
1102 vsi->num_txq = tx_count;
1103 vsi->num_rxq = rx_count;
1104
1105 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1106 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1107 /* since there is a chance that num_rxq could have been changed
1108 * in the above for loop, make num_txq equal to num_rxq.
1109 */
1110 vsi->num_txq = vsi->num_rxq;
1111 }
1112
1113 /* Rx queue mapping */
1114 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1115 /* q_mapping buffer holds the info for the first queue allocated for
1116 * this VSI in the PF space and also the number of queues associated
1117 * with this VSI.
1118 */
1119 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1120 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1121
1122 return 0;
1123}
1124
1125/**
1126 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1127 * @ctxt: the VSI context being set
1128 * @vsi: the VSI being configured
1129 */
1130static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1131{
1132 u8 dflt_q_group, dflt_q_prio;
1133 u16 dflt_q, report_q, val;
1134
1135 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1136 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1137 return;
1138
1139 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1140 ctxt->info.valid_sections |= cpu_to_le16(val);
1141 dflt_q = 0;
1142 dflt_q_group = 0;
1143 report_q = 0;
1144 dflt_q_prio = 0;
1145
1146 /* enable flow director filtering/programming */
1147 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1148 ctxt->info.fd_options = cpu_to_le16(val);
1149 /* max of allocated flow director filters */
1150 ctxt->info.max_fd_fltr_dedicated =
1151 cpu_to_le16(vsi->num_gfltr);
1152 /* max of shared flow director filters any VSI may program */
1153 ctxt->info.max_fd_fltr_shared =
1154 cpu_to_le16(vsi->num_bfltr);
1155 /* default queue index within the VSI of the default FD */
1156 val = FIELD_PREP(ICE_AQ_VSI_FD_DEF_Q_M, dflt_q);
1157 /* target queue or queue group to the FD filter */
1158 val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_GRP_M, dflt_q_group);
1159 ctxt->info.fd_def_q = cpu_to_le16(val);
1160 /* queue index on which FD filter completion is reported */
1161 val = FIELD_PREP(ICE_AQ_VSI_FD_REPORT_Q_M, report_q);
1162 /* priority of the default qindex action */
1163 val |= FIELD_PREP(ICE_AQ_VSI_FD_DEF_PRIORITY_M, dflt_q_prio);
1164 ctxt->info.fd_report_opt = cpu_to_le16(val);
1165}
1166
1167/**
1168 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1169 * @ctxt: the VSI context being set
1170 * @vsi: the VSI being configured
1171 */
1172static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1173{
1174 u8 lut_type, hash_type;
1175 struct device *dev;
1176 struct ice_pf *pf;
1177
1178 pf = vsi->back;
1179 dev = ice_pf_to_dev(pf);
1180
1181 switch (vsi->type) {
1182 case ICE_VSI_CHNL:
1183 case ICE_VSI_PF:
1184 /* PF VSI will inherit RSS instance of PF */
1185 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1186 break;
1187 case ICE_VSI_VF:
1188 /* VF VSI will gets a small RSS table which is a VSI LUT type */
1189 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1190 break;
1191 default:
1192 dev_dbg(dev, "Unsupported VSI type %s\n",
1193 ice_vsi_type_str(vsi->type));
1194 return;
1195 }
1196
1197 hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1198 vsi->rss_hfunc = hash_type;
1199
1200 ctxt->info.q_opt_rss =
1201 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
1202 FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
1203}
1204
1205static void
1206ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1207{
1208 struct ice_pf *pf = vsi->back;
1209 u16 qcount, qmap;
1210 u8 offset = 0;
1211 int pow;
1212
1213 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
1214
1215 pow = order_base_2(qcount);
1216 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
1217 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
1218
1219 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1220 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1221 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1222 ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1223}
1224
1225/**
1226 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1227 * @vsi: VSI to check whether or not VLAN pruning is enabled.
1228 *
1229 * returns true if Rx VLAN pruning is enabled and false otherwise.
1230 */
1231static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
1232{
1233 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1234}
1235
1236/**
1237 * ice_vsi_init - Create and initialize a VSI
1238 * @vsi: the VSI being configured
1239 * @vsi_flags: VSI configuration flags
1240 *
1241 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
1242 * reconfigure an existing context.
1243 *
1244 * This initializes a VSI context depending on the VSI type to be added and
1245 * passes it down to the add_vsi aq command to create a new VSI.
1246 */
1247static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
1248{
1249 struct ice_pf *pf = vsi->back;
1250 struct ice_hw *hw = &pf->hw;
1251 struct ice_vsi_ctx *ctxt;
1252 struct device *dev;
1253 int ret = 0;
1254
1255 dev = ice_pf_to_dev(pf);
1256 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1257 if (!ctxt)
1258 return -ENOMEM;
1259
1260 switch (vsi->type) {
1261 case ICE_VSI_CTRL:
1262 case ICE_VSI_LB:
1263 case ICE_VSI_PF:
1264 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1265 break;
1266 case ICE_VSI_SWITCHDEV_CTRL:
1267 case ICE_VSI_CHNL:
1268 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1269 break;
1270 case ICE_VSI_VF:
1271 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1272 /* VF number here is the absolute VF number (0-255) */
1273 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1274 break;
1275 default:
1276 ret = -ENODEV;
1277 goto out;
1278 }
1279
1280 /* Handle VLAN pruning for channel VSI if main VSI has VLAN
1281 * prune enabled
1282 */
1283 if (vsi->type == ICE_VSI_CHNL) {
1284 struct ice_vsi *main_vsi;
1285
1286 main_vsi = ice_get_main_vsi(pf);
1287 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
1288 ctxt->info.sw_flags2 |=
1289 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1290 else
1291 ctxt->info.sw_flags2 &=
1292 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1293 }
1294
1295 ice_set_dflt_vsi_ctx(hw, ctxt);
1296 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1297 ice_set_fd_vsi_ctx(ctxt, vsi);
1298 /* if the switch is in VEB mode, allow VSI loopback */
1299 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1300 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1301
1302 /* Set LUT type and HASH type if RSS is enabled */
1303 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1304 vsi->type != ICE_VSI_CTRL) {
1305 ice_set_rss_vsi_ctx(ctxt, vsi);
1306 /* if updating VSI context, make sure to set valid_section:
1307 * to indicate which section of VSI context being updated
1308 */
1309 if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1310 ctxt->info.valid_sections |=
1311 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1312 }
1313
1314 ctxt->info.sw_id = vsi->port_info->sw_id;
1315 if (vsi->type == ICE_VSI_CHNL) {
1316 ice_chnl_vsi_setup_q_map(vsi, ctxt);
1317 } else {
1318 ret = ice_vsi_setup_q_map(vsi, ctxt);
1319 if (ret)
1320 goto out;
1321
1322 if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1323 /* means VSI being updated */
1324 /* must to indicate which section of VSI context are
1325 * being modified
1326 */
1327 ctxt->info.valid_sections |=
1328 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
1329 }
1330
1331 /* Allow control frames out of main VSI */
1332 if (vsi->type == ICE_VSI_PF) {
1333 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1334 ctxt->info.valid_sections |=
1335 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1336 }
1337
1338 if (vsi_flags & ICE_VSI_FLAG_INIT) {
1339 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1340 if (ret) {
1341 dev_err(dev, "Add VSI failed, err %d\n", ret);
1342 ret = -EIO;
1343 goto out;
1344 }
1345 } else {
1346 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1347 if (ret) {
1348 dev_err(dev, "Update VSI failed, err %d\n", ret);
1349 ret = -EIO;
1350 goto out;
1351 }
1352 }
1353
1354 /* keep context for update VSI operations */
1355 vsi->info = ctxt->info;
1356
1357 /* record VSI number returned */
1358 vsi->vsi_num = ctxt->vsi_num;
1359
1360out:
1361 kfree(ctxt);
1362 return ret;
1363}
1364
1365/**
1366 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1367 * @vsi: the VSI having rings deallocated
1368 */
1369static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1370{
1371 int i;
1372
1373 /* Avoid stale references by clearing map from vector to ring */
1374 if (vsi->q_vectors) {
1375 ice_for_each_q_vector(vsi, i) {
1376 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1377
1378 if (q_vector) {
1379 q_vector->tx.tx_ring = NULL;
1380 q_vector->rx.rx_ring = NULL;
1381 }
1382 }
1383 }
1384
1385 if (vsi->tx_rings) {
1386 ice_for_each_alloc_txq(vsi, i) {
1387 if (vsi->tx_rings[i]) {
1388 kfree_rcu(vsi->tx_rings[i], rcu);
1389 WRITE_ONCE(vsi->tx_rings[i], NULL);
1390 }
1391 }
1392 }
1393 if (vsi->rx_rings) {
1394 ice_for_each_alloc_rxq(vsi, i) {
1395 if (vsi->rx_rings[i]) {
1396 kfree_rcu(vsi->rx_rings[i], rcu);
1397 WRITE_ONCE(vsi->rx_rings[i], NULL);
1398 }
1399 }
1400 }
1401}
1402
1403/**
1404 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1405 * @vsi: VSI which is having rings allocated
1406 */
1407static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1408{
1409 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1410 struct ice_pf *pf = vsi->back;
1411 struct device *dev;
1412 u16 i;
1413
1414 dev = ice_pf_to_dev(pf);
1415 /* Allocate Tx rings */
1416 ice_for_each_alloc_txq(vsi, i) {
1417 struct ice_tx_ring *ring;
1418
1419 /* allocate with kzalloc(), free with kfree_rcu() */
1420 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1421
1422 if (!ring)
1423 goto err_out;
1424
1425 ring->q_index = i;
1426 ring->reg_idx = vsi->txq_map[i];
1427 ring->vsi = vsi;
1428 ring->tx_tstamps = &pf->ptp.port.tx;
1429 ring->dev = dev;
1430 ring->count = vsi->num_tx_desc;
1431 ring->txq_teid = ICE_INVAL_TEID;
1432 if (dvm_ena)
1433 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1434 else
1435 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1436 WRITE_ONCE(vsi->tx_rings[i], ring);
1437 }
1438
1439 /* Allocate Rx rings */
1440 ice_for_each_alloc_rxq(vsi, i) {
1441 struct ice_rx_ring *ring;
1442
1443 /* allocate with kzalloc(), free with kfree_rcu() */
1444 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1445 if (!ring)
1446 goto err_out;
1447
1448 ring->q_index = i;
1449 ring->reg_idx = vsi->rxq_map[i];
1450 ring->vsi = vsi;
1451 ring->netdev = vsi->netdev;
1452 ring->dev = dev;
1453 ring->count = vsi->num_rx_desc;
1454 ring->cached_phctime = pf->ptp.cached_phc_time;
1455 WRITE_ONCE(vsi->rx_rings[i], ring);
1456 }
1457
1458 return 0;
1459
1460err_out:
1461 ice_vsi_clear_rings(vsi);
1462 return -ENOMEM;
1463}
1464
1465/**
1466 * ice_vsi_manage_rss_lut - disable/enable RSS
1467 * @vsi: the VSI being changed
1468 * @ena: boolean value indicating if this is an enable or disable request
1469 *
1470 * In the event of disable request for RSS, this function will zero out RSS
1471 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1472 * LUT.
1473 */
1474void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1475{
1476 u8 *lut;
1477
1478 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1479 if (!lut)
1480 return;
1481
1482 if (ena) {
1483 if (vsi->rss_lut_user)
1484 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1485 else
1486 ice_fill_rss_lut(lut, vsi->rss_table_size,
1487 vsi->rss_size);
1488 }
1489
1490 ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1491 kfree(lut);
1492}
1493
1494/**
1495 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1496 * @vsi: VSI to be configured
1497 * @disable: set to true to have FCS / CRC in the frame data
1498 */
1499void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1500{
1501 int i;
1502
1503 ice_for_each_rxq(vsi, i)
1504 if (disable)
1505 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1506 else
1507 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1508}
1509
1510/**
1511 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1512 * @vsi: VSI to be configured
1513 */
1514int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1515{
1516 struct ice_pf *pf = vsi->back;
1517 struct device *dev;
1518 u8 *lut, *key;
1519 int err;
1520
1521 dev = ice_pf_to_dev(pf);
1522 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1523 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1524 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1525 } else {
1526 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1527
1528 /* If orig_rss_size is valid and it is less than determined
1529 * main VSI's rss_size, update main VSI's rss_size to be
1530 * orig_rss_size so that when tc-qdisc is deleted, main VSI
1531 * RSS table gets programmed to be correct (whatever it was
1532 * to begin with (prior to setup-tc for ADQ config)
1533 */
1534 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1535 vsi->orig_rss_size <= vsi->num_rxq) {
1536 vsi->rss_size = vsi->orig_rss_size;
1537 /* now orig_rss_size is used, reset it to zero */
1538 vsi->orig_rss_size = 0;
1539 }
1540 }
1541
1542 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1543 if (!lut)
1544 return -ENOMEM;
1545
1546 if (vsi->rss_lut_user)
1547 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1548 else
1549 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1550
1551 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1552 if (err) {
1553 dev_err(dev, "set_rss_lut failed, error %d\n", err);
1554 goto ice_vsi_cfg_rss_exit;
1555 }
1556
1557 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
1558 if (!key) {
1559 err = -ENOMEM;
1560 goto ice_vsi_cfg_rss_exit;
1561 }
1562
1563 if (vsi->rss_hkey_user)
1564 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1565 else
1566 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1567
1568 err = ice_set_rss_key(vsi, key);
1569 if (err)
1570 dev_err(dev, "set_rss_key failed, error %d\n", err);
1571
1572 kfree(key);
1573ice_vsi_cfg_rss_exit:
1574 kfree(lut);
1575 return err;
1576}
1577
1578/**
1579 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1580 * @vsi: VSI to be configured
1581 *
1582 * This function will only be called during the VF VSI setup. Upon successful
1583 * completion of package download, this function will configure default RSS
1584 * input sets for VF VSI.
1585 */
1586static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1587{
1588 struct ice_pf *pf = vsi->back;
1589 struct device *dev;
1590 int status;
1591
1592 dev = ice_pf_to_dev(pf);
1593 if (ice_is_safe_mode(pf)) {
1594 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1595 vsi->vsi_num);
1596 return;
1597 }
1598
1599 status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
1600 if (status)
1601 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1602 vsi->vsi_num, status);
1603}
1604
1605static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
1606 /* configure RSS for IPv4 with input set IP src/dst */
1607 {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
1608 /* configure RSS for IPv6 with input set IPv6 src/dst */
1609 {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
1610 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1611 {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
1612 ICE_HASH_TCP_IPV4, ICE_RSS_ANY_HEADERS, false},
1613 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1614 {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
1615 ICE_HASH_UDP_IPV4, ICE_RSS_ANY_HEADERS, false},
1616 /* configure RSS for sctp4 with input set IP src/dst - only support
1617 * RSS on SCTPv4 on outer headers (non-tunneled)
1618 */
1619 {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
1620 ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
1621 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1622 {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
1623 ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
1624 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1625 {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
1626 ICE_HASH_UDP_IPV6, ICE_RSS_ANY_HEADERS, false},
1627 /* configure RSS for sctp6 with input set IPv6 src/dst - only support
1628 * RSS on SCTPv6 on outer headers (non-tunneled)
1629 */
1630 {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
1631 ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
1632 /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
1633 {ICE_FLOW_SEG_HDR_ESP,
1634 ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
1635};
1636
1637/**
1638 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1639 * @vsi: VSI to be configured
1640 *
1641 * This function will only be called after successful download package call
1642 * during initialization of PF. Since the downloaded package will erase the
1643 * RSS section, this function will configure RSS input sets for different
1644 * flow types. The last profile added has the highest priority, therefore 2
1645 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1646 * (i.e. IPv4 src/dst TCP src/dst port).
1647 */
1648static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1649{
1650 u16 vsi_num = vsi->vsi_num;
1651 struct ice_pf *pf = vsi->back;
1652 struct ice_hw *hw = &pf->hw;
1653 struct device *dev;
1654 int status;
1655 u32 i;
1656
1657 dev = ice_pf_to_dev(pf);
1658 if (ice_is_safe_mode(pf)) {
1659 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1660 vsi_num);
1661 return;
1662 }
1663 for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
1664 const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
1665
1666 status = ice_add_rss_cfg(hw, vsi, cfg);
1667 if (status)
1668 dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
1669 cfg->addl_hdrs, cfg->hash_flds,
1670 cfg->hdr_type, cfg->symm);
1671 }
1672}
1673
1674/**
1675 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1676 * @vsi: VSI
1677 */
1678static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
1679{
1680 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
1681 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
1682 vsi->rx_buf_len = ICE_RXBUF_1664;
1683#if (PAGE_SIZE < 8192)
1684 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
1685 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
1686 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
1687 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
1688#endif
1689 } else {
1690 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1691 vsi->rx_buf_len = ICE_RXBUF_3072;
1692 }
1693}
1694
1695/**
1696 * ice_pf_state_is_nominal - checks the PF for nominal state
1697 * @pf: pointer to PF to check
1698 *
1699 * Check the PF's state for a collection of bits that would indicate
1700 * the PF is in a state that would inhibit normal operation for
1701 * driver functionality.
1702 *
1703 * Returns true if PF is in a nominal state, false otherwise
1704 */
1705bool ice_pf_state_is_nominal(struct ice_pf *pf)
1706{
1707 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1708
1709 if (!pf)
1710 return false;
1711
1712 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
1713 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1714 return false;
1715
1716 return true;
1717}
1718
1719/**
1720 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1721 * @vsi: the VSI to be updated
1722 */
1723void ice_update_eth_stats(struct ice_vsi *vsi)
1724{
1725 struct ice_eth_stats *prev_es, *cur_es;
1726 struct ice_hw *hw = &vsi->back->hw;
1727 struct ice_pf *pf = vsi->back;
1728 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1729
1730 prev_es = &vsi->eth_stats_prev;
1731 cur_es = &vsi->eth_stats;
1732
1733 if (ice_is_reset_in_progress(pf->state))
1734 vsi->stat_offsets_loaded = false;
1735
1736 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1737 &prev_es->rx_bytes, &cur_es->rx_bytes);
1738
1739 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1740 &prev_es->rx_unicast, &cur_es->rx_unicast);
1741
1742 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1743 &prev_es->rx_multicast, &cur_es->rx_multicast);
1744
1745 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1746 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1747
1748 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1749 &prev_es->rx_discards, &cur_es->rx_discards);
1750
1751 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1752 &prev_es->tx_bytes, &cur_es->tx_bytes);
1753
1754 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1755 &prev_es->tx_unicast, &cur_es->tx_unicast);
1756
1757 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1758 &prev_es->tx_multicast, &cur_es->tx_multicast);
1759
1760 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1761 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1762
1763 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1764 &prev_es->tx_errors, &cur_es->tx_errors);
1765
1766 vsi->stat_offsets_loaded = true;
1767}
1768
1769/**
1770 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1771 * @hw: HW pointer
1772 * @pf_q: index of the Rx queue in the PF's queue space
1773 * @rxdid: flexible descriptor RXDID
1774 * @prio: priority for the RXDID for this queue
1775 * @ena_ts: true to enable timestamp and false to disable timestamp
1776 */
1777void
1778ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
1779 bool ena_ts)
1780{
1781 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1782
1783 /* clear any previous values */
1784 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1785 QRXFLXP_CNTXT_RXDID_PRIO_M |
1786 QRXFLXP_CNTXT_TS_M);
1787
1788 regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_IDX_M, rxdid);
1789 regval |= FIELD_PREP(QRXFLXP_CNTXT_RXDID_PRIO_M, prio);
1790
1791 if (ena_ts)
1792 /* Enable TimeSync on this queue */
1793 regval |= QRXFLXP_CNTXT_TS_M;
1794
1795 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1796}
1797
1798int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
1799{
1800 if (q_idx >= vsi->num_rxq)
1801 return -EINVAL;
1802
1803 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
1804}
1805
1806int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
1807{
1808 DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
1809
1810 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
1811 return -EINVAL;
1812
1813 qg_buf->num_txqs = 1;
1814
1815 return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
1816}
1817
1818/**
1819 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1820 * @vsi: the VSI being configured
1821 *
1822 * Return 0 on success and a negative value on error
1823 * Configure the Rx VSI for operation.
1824 */
1825int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1826{
1827 u16 i;
1828
1829 if (vsi->type == ICE_VSI_VF)
1830 goto setup_rings;
1831
1832 ice_vsi_cfg_frame_size(vsi);
1833setup_rings:
1834 /* set up individual rings */
1835 ice_for_each_rxq(vsi, i) {
1836 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
1837
1838 if (err)
1839 return err;
1840 }
1841
1842 return 0;
1843}
1844
1845/**
1846 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1847 * @vsi: the VSI being configured
1848 * @rings: Tx ring array to be configured
1849 * @count: number of Tx ring array elements
1850 *
1851 * Return 0 on success and a negative value on error
1852 * Configure the Tx VSI for operation.
1853 */
1854static int
1855ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
1856{
1857 DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
1858 int err = 0;
1859 u16 q_idx;
1860
1861 qg_buf->num_txqs = 1;
1862
1863 for (q_idx = 0; q_idx < count; q_idx++) {
1864 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1865 if (err)
1866 break;
1867 }
1868
1869 return err;
1870}
1871
1872/**
1873 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1874 * @vsi: the VSI being configured
1875 *
1876 * Return 0 on success and a negative value on error
1877 * Configure the Tx VSI for operation.
1878 */
1879int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1880{
1881 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1882}
1883
1884/**
1885 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1886 * @vsi: the VSI being configured
1887 *
1888 * Return 0 on success and a negative value on error
1889 * Configure the Tx queues dedicated for XDP in given VSI for operation.
1890 */
1891int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1892{
1893 int ret;
1894 int i;
1895
1896 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1897 if (ret)
1898 return ret;
1899
1900 ice_for_each_rxq(vsi, i)
1901 ice_tx_xsk_pool(vsi, i);
1902
1903 return 0;
1904}
1905
1906/**
1907 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1908 * @intrl: interrupt rate limit in usecs
1909 * @gran: interrupt rate limit granularity in usecs
1910 *
1911 * This function converts a decimal interrupt rate limit in usecs to the format
1912 * expected by firmware.
1913 */
1914static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1915{
1916 u32 val = intrl / gran;
1917
1918 if (val)
1919 return val | GLINT_RATE_INTRL_ENA_M;
1920 return 0;
1921}
1922
1923/**
1924 * ice_write_intrl - write throttle rate limit to interrupt specific register
1925 * @q_vector: pointer to interrupt specific structure
1926 * @intrl: throttle rate limit in microseconds to write
1927 */
1928void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1929{
1930 struct ice_hw *hw = &q_vector->vsi->back->hw;
1931
1932 wr32(hw, GLINT_RATE(q_vector->reg_idx),
1933 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1934}
1935
1936static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1937{
1938 switch (rc->type) {
1939 case ICE_RX_CONTAINER:
1940 if (rc->rx_ring)
1941 return rc->rx_ring->q_vector;
1942 break;
1943 case ICE_TX_CONTAINER:
1944 if (rc->tx_ring)
1945 return rc->tx_ring->q_vector;
1946 break;
1947 default:
1948 break;
1949 }
1950
1951 return NULL;
1952}
1953
1954/**
1955 * __ice_write_itr - write throttle rate to register
1956 * @q_vector: pointer to interrupt data structure
1957 * @rc: pointer to ring container
1958 * @itr: throttle rate in microseconds to write
1959 */
1960static void __ice_write_itr(struct ice_q_vector *q_vector,
1961 struct ice_ring_container *rc, u16 itr)
1962{
1963 struct ice_hw *hw = &q_vector->vsi->back->hw;
1964
1965 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1966 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
1967}
1968
1969/**
1970 * ice_write_itr - write throttle rate to queue specific register
1971 * @rc: pointer to ring container
1972 * @itr: throttle rate in microseconds to write
1973 */
1974void ice_write_itr(struct ice_ring_container *rc, u16 itr)
1975{
1976 struct ice_q_vector *q_vector;
1977
1978 q_vector = ice_pull_qvec_from_rc(rc);
1979 if (!q_vector)
1980 return;
1981
1982 __ice_write_itr(q_vector, rc, itr);
1983}
1984
1985/**
1986 * ice_set_q_vector_intrl - set up interrupt rate limiting
1987 * @q_vector: the vector to be configured
1988 *
1989 * Interrupt rate limiting is local to the vector, not per-queue so we must
1990 * detect if either ring container has dynamic moderation enabled to decide
1991 * what to set the interrupt rate limit to via INTRL settings. In the case that
1992 * dynamic moderation is disabled on both, write the value with the cached
1993 * setting to make sure INTRL register matches the user visible value.
1994 */
1995void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
1996{
1997 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
1998 /* in the case of dynamic enabled, cap each vector to no more
1999 * than (4 us) 250,000 ints/sec, which allows low latency
2000 * but still less than 500,000 interrupts per second, which
2001 * reduces CPU a bit in the case of the lowest latency
2002 * setting. The 4 here is a value in microseconds.
2003 */
2004 ice_write_intrl(q_vector, 4);
2005 } else {
2006 ice_write_intrl(q_vector, q_vector->intrl);
2007 }
2008}
2009
2010/**
2011 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
2012 * @vsi: the VSI being configured
2013 *
2014 * This configures MSIX mode interrupts for the PF VSI, and should not be used
2015 * for the VF VSI.
2016 */
2017void ice_vsi_cfg_msix(struct ice_vsi *vsi)
2018{
2019 struct ice_pf *pf = vsi->back;
2020 struct ice_hw *hw = &pf->hw;
2021 u16 txq = 0, rxq = 0;
2022 int i, q;
2023
2024 ice_for_each_q_vector(vsi, i) {
2025 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2026 u16 reg_idx = q_vector->reg_idx;
2027
2028 ice_cfg_itr(hw, q_vector);
2029
2030 /* Both Transmit Queue Interrupt Cause Control register
2031 * and Receive Queue Interrupt Cause control register
2032 * expects MSIX_INDX field to be the vector index
2033 * within the function space and not the absolute
2034 * vector index across PF or across device.
2035 * For SR-IOV VF VSIs queue vector index always starts
2036 * with 1 since first vector index(0) is used for OICR
2037 * in VF space. Since VMDq and other PF VSIs are within
2038 * the PF function space, use the vector index that is
2039 * tracked for this PF.
2040 */
2041 for (q = 0; q < q_vector->num_ring_tx; q++) {
2042 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
2043 q_vector->tx.itr_idx);
2044 txq++;
2045 }
2046
2047 for (q = 0; q < q_vector->num_ring_rx; q++) {
2048 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
2049 q_vector->rx.itr_idx);
2050 rxq++;
2051 }
2052 }
2053}
2054
2055/**
2056 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
2057 * @vsi: the VSI whose rings are to be enabled
2058 *
2059 * Returns 0 on success and a negative value on error
2060 */
2061int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
2062{
2063 return ice_vsi_ctrl_all_rx_rings(vsi, true);
2064}
2065
2066/**
2067 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
2068 * @vsi: the VSI whose rings are to be disabled
2069 *
2070 * Returns 0 on success and a negative value on error
2071 */
2072int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
2073{
2074 return ice_vsi_ctrl_all_rx_rings(vsi, false);
2075}
2076
2077/**
2078 * ice_vsi_stop_tx_rings - Disable Tx rings
2079 * @vsi: the VSI being configured
2080 * @rst_src: reset source
2081 * @rel_vmvf_num: Relative ID of VF/VM
2082 * @rings: Tx ring array to be stopped
2083 * @count: number of Tx ring array elements
2084 */
2085static int
2086ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2087 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
2088{
2089 u16 q_idx;
2090
2091 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2092 return -EINVAL;
2093
2094 for (q_idx = 0; q_idx < count; q_idx++) {
2095 struct ice_txq_meta txq_meta = { };
2096 int status;
2097
2098 if (!rings || !rings[q_idx])
2099 return -EINVAL;
2100
2101 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2102 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
2103 rings[q_idx], &txq_meta);
2104
2105 if (status)
2106 return status;
2107 }
2108
2109 return 0;
2110}
2111
2112/**
2113 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2114 * @vsi: the VSI being configured
2115 * @rst_src: reset source
2116 * @rel_vmvf_num: Relative ID of VF/VM
2117 */
2118int
2119ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2120 u16 rel_vmvf_num)
2121{
2122 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2123}
2124
2125/**
2126 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2127 * @vsi: the VSI being configured
2128 */
2129int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2130{
2131 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2132}
2133
2134/**
2135 * ice_vsi_is_rx_queue_active
2136 * @vsi: the VSI being configured
2137 *
2138 * Return true if at least one queue is active.
2139 */
2140bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2141{
2142 struct ice_pf *pf = vsi->back;
2143 struct ice_hw *hw = &pf->hw;
2144 int i;
2145
2146 ice_for_each_rxq(vsi, i) {
2147 u32 rx_reg;
2148 int pf_q;
2149
2150 pf_q = vsi->rxq_map[i];
2151 rx_reg = rd32(hw, QRX_CTRL(pf_q));
2152 if (rx_reg & QRX_CTRL_QENA_STAT_M)
2153 return true;
2154 }
2155
2156 return false;
2157}
2158
2159static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2160{
2161 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2162 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2163 vsi->tc_cfg.numtc = 1;
2164 return;
2165 }
2166
2167 /* set VSI TC information based on DCB config */
2168 ice_vsi_set_dcb_tc_cfg(vsi);
2169}
2170
2171/**
2172 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2173 * @vsi: the VSI being configured
2174 * @tx: bool to determine Tx or Rx rule
2175 * @create: bool to determine create or remove Rule
2176 */
2177void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2178{
2179 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2180 enum ice_sw_fwd_act_type act);
2181 struct ice_pf *pf = vsi->back;
2182 struct device *dev;
2183 int status;
2184
2185 dev = ice_pf_to_dev(pf);
2186 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2187
2188 if (tx) {
2189 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2190 ICE_DROP_PACKET);
2191 } else {
2192 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
2193 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2194 create);
2195 } else {
2196 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2197 ICE_FWD_TO_VSI);
2198 }
2199 }
2200
2201 if (status)
2202 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
2203 create ? "adding" : "removing", tx ? "TX" : "RX",
2204 vsi->vsi_num, status);
2205}
2206
2207/**
2208 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2209 * @vsi: pointer to the VSI
2210 *
2211 * This function will allocate new scheduler aggregator now if needed and will
2212 * move specified VSI into it.
2213 */
2214static void ice_set_agg_vsi(struct ice_vsi *vsi)
2215{
2216 struct device *dev = ice_pf_to_dev(vsi->back);
2217 struct ice_agg_node *agg_node_iter = NULL;
2218 u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2219 struct ice_agg_node *agg_node = NULL;
2220 int node_offset, max_agg_nodes = 0;
2221 struct ice_port_info *port_info;
2222 struct ice_pf *pf = vsi->back;
2223 u32 agg_node_id_start = 0;
2224 int status;
2225
2226 /* create (as needed) scheduler aggregator node and move VSI into
2227 * corresponding aggregator node
2228 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2229 * - VF aggregator nodes will contain VF VSI
2230 */
2231 port_info = pf->hw.port_info;
2232 if (!port_info)
2233 return;
2234
2235 switch (vsi->type) {
2236 case ICE_VSI_CTRL:
2237 case ICE_VSI_CHNL:
2238 case ICE_VSI_LB:
2239 case ICE_VSI_PF:
2240 case ICE_VSI_SWITCHDEV_CTRL:
2241 max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2242 agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2243 agg_node_iter = &pf->pf_agg_node[0];
2244 break;
2245 case ICE_VSI_VF:
2246 /* user can create 'n' VFs on a given PF, but since max children
2247 * per aggregator node can be only 64. Following code handles
2248 * aggregator(s) for VF VSIs, either selects a agg_node which
2249 * was already created provided num_vsis < 64, otherwise
2250 * select next available node, which will be created
2251 */
2252 max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2253 agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2254 agg_node_iter = &pf->vf_agg_node[0];
2255 break;
2256 default:
2257 /* other VSI type, handle later if needed */
2258 dev_dbg(dev, "unexpected VSI type %s\n",
2259 ice_vsi_type_str(vsi->type));
2260 return;
2261 }
2262
2263 /* find the appropriate aggregator node */
2264 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2265 /* see if we can find space in previously created
2266 * node if num_vsis < 64, otherwise skip
2267 */
2268 if (agg_node_iter->num_vsis &&
2269 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2270 agg_node_iter++;
2271 continue;
2272 }
2273
2274 if (agg_node_iter->valid &&
2275 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2276 agg_id = agg_node_iter->agg_id;
2277 agg_node = agg_node_iter;
2278 break;
2279 }
2280
2281 /* find unclaimed agg_id */
2282 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2283 agg_id = node_offset + agg_node_id_start;
2284 agg_node = agg_node_iter;
2285 break;
2286 }
2287 /* move to next agg_node */
2288 agg_node_iter++;
2289 }
2290
2291 if (!agg_node)
2292 return;
2293
2294 /* if selected aggregator node was not created, create it */
2295 if (!agg_node->valid) {
2296 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2297 (u8)vsi->tc_cfg.ena_tc);
2298 if (status) {
2299 dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2300 agg_id);
2301 return;
2302 }
2303 /* aggregator node is created, store the needed info */
2304 agg_node->valid = true;
2305 agg_node->agg_id = agg_id;
2306 }
2307
2308 /* move VSI to corresponding aggregator node */
2309 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2310 (u8)vsi->tc_cfg.ena_tc);
2311 if (status) {
2312 dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2313 vsi->idx, agg_id);
2314 return;
2315 }
2316
2317 /* keep active children count for aggregator node */
2318 agg_node->num_vsis++;
2319
2320 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2321 * to aggregator node
2322 */
2323 vsi->agg_node = agg_node;
2324 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2325 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2326 vsi->agg_node->num_vsis);
2327}
2328
2329static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
2330{
2331 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2332 struct device *dev = ice_pf_to_dev(pf);
2333 int ret, i;
2334
2335 /* configure VSI nodes based on number of queues and TC's */
2336 ice_for_each_traffic_class(i) {
2337 if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2338 continue;
2339
2340 if (vsi->type == ICE_VSI_CHNL) {
2341 if (!vsi->alloc_txq && vsi->num_txq)
2342 max_txqs[i] = vsi->num_txq;
2343 else
2344 max_txqs[i] = pf->num_lan_tx;
2345 } else {
2346 max_txqs[i] = vsi->alloc_txq;
2347 }
2348
2349 if (vsi->type == ICE_VSI_PF)
2350 max_txqs[i] += vsi->num_xdp_txq;
2351 }
2352
2353 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2354 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2355 max_txqs);
2356 if (ret) {
2357 dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2358 vsi->vsi_num, ret);
2359 return ret;
2360 }
2361
2362 return 0;
2363}
2364
2365/**
2366 * ice_vsi_cfg_def - configure default VSI based on the type
2367 * @vsi: pointer to VSI
2368 * @params: the parameters to configure this VSI with
2369 */
2370static int
2371ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
2372{
2373 struct device *dev = ice_pf_to_dev(vsi->back);
2374 struct ice_pf *pf = vsi->back;
2375 int ret;
2376
2377 vsi->vsw = pf->first_sw;
2378
2379 ret = ice_vsi_alloc_def(vsi, params->ch);
2380 if (ret)
2381 return ret;
2382
2383 /* allocate memory for Tx/Rx ring stat pointers */
2384 ret = ice_vsi_alloc_stat_arrays(vsi);
2385 if (ret)
2386 goto unroll_vsi_alloc;
2387
2388 ice_alloc_fd_res(vsi);
2389
2390 ret = ice_vsi_get_qs(vsi);
2391 if (ret) {
2392 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2393 vsi->idx);
2394 goto unroll_vsi_alloc_stat;
2395 }
2396
2397 /* set RSS capabilities */
2398 ice_vsi_set_rss_params(vsi);
2399
2400 /* set TC configuration */
2401 ice_vsi_set_tc_cfg(vsi);
2402
2403 /* create the VSI */
2404 ret = ice_vsi_init(vsi, params->flags);
2405 if (ret)
2406 goto unroll_get_qs;
2407
2408 ice_vsi_init_vlan_ops(vsi);
2409
2410 switch (vsi->type) {
2411 case ICE_VSI_CTRL:
2412 case ICE_VSI_SWITCHDEV_CTRL:
2413 case ICE_VSI_PF:
2414 ret = ice_vsi_alloc_q_vectors(vsi);
2415 if (ret)
2416 goto unroll_vsi_init;
2417
2418 ret = ice_vsi_alloc_rings(vsi);
2419 if (ret)
2420 goto unroll_vector_base;
2421
2422 ret = ice_vsi_alloc_ring_stats(vsi);
2423 if (ret)
2424 goto unroll_vector_base;
2425
2426 ice_vsi_map_rings_to_vectors(vsi);
2427
2428 /* Associate q_vector rings to napi */
2429 ice_vsi_set_napi_queues(vsi);
2430
2431 vsi->stat_offsets_loaded = false;
2432
2433 if (ice_is_xdp_ena_vsi(vsi)) {
2434 ret = ice_vsi_determine_xdp_res(vsi);
2435 if (ret)
2436 goto unroll_vector_base;
2437 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
2438 if (ret)
2439 goto unroll_vector_base;
2440 }
2441
2442 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2443 if (vsi->type != ICE_VSI_CTRL)
2444 /* Do not exit if configuring RSS had an issue, at
2445 * least receive traffic on first queue. Hence no
2446 * need to capture return value
2447 */
2448 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2449 ice_vsi_cfg_rss_lut_key(vsi);
2450 ice_vsi_set_rss_flow_fld(vsi);
2451 }
2452 ice_init_arfs(vsi);
2453 break;
2454 case ICE_VSI_CHNL:
2455 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2456 ice_vsi_cfg_rss_lut_key(vsi);
2457 ice_vsi_set_rss_flow_fld(vsi);
2458 }
2459 break;
2460 case ICE_VSI_VF:
2461 /* VF driver will take care of creating netdev for this type and
2462 * map queues to vectors through Virtchnl, PF driver only
2463 * creates a VSI and corresponding structures for bookkeeping
2464 * purpose
2465 */
2466 ret = ice_vsi_alloc_q_vectors(vsi);
2467 if (ret)
2468 goto unroll_vsi_init;
2469
2470 ret = ice_vsi_alloc_rings(vsi);
2471 if (ret)
2472 goto unroll_alloc_q_vector;
2473
2474 ret = ice_vsi_alloc_ring_stats(vsi);
2475 if (ret)
2476 goto unroll_vector_base;
2477
2478 vsi->stat_offsets_loaded = false;
2479
2480 /* Do not exit if configuring RSS had an issue, at least
2481 * receive traffic on first queue. Hence no need to capture
2482 * return value
2483 */
2484 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2485 ice_vsi_cfg_rss_lut_key(vsi);
2486 ice_vsi_set_vf_rss_flow_fld(vsi);
2487 }
2488 break;
2489 case ICE_VSI_LB:
2490 ret = ice_vsi_alloc_rings(vsi);
2491 if (ret)
2492 goto unroll_vsi_init;
2493
2494 ret = ice_vsi_alloc_ring_stats(vsi);
2495 if (ret)
2496 goto unroll_vector_base;
2497
2498 break;
2499 default:
2500 /* clean up the resources and exit */
2501 ret = -EINVAL;
2502 goto unroll_vsi_init;
2503 }
2504
2505 return 0;
2506
2507unroll_vector_base:
2508 /* reclaim SW interrupts back to the common pool */
2509unroll_alloc_q_vector:
2510 ice_vsi_free_q_vectors(vsi);
2511unroll_vsi_init:
2512 ice_vsi_delete_from_hw(vsi);
2513unroll_get_qs:
2514 ice_vsi_put_qs(vsi);
2515unroll_vsi_alloc_stat:
2516 ice_vsi_free_stats(vsi);
2517unroll_vsi_alloc:
2518 ice_vsi_free_arrays(vsi);
2519 return ret;
2520}
2521
2522/**
2523 * ice_vsi_cfg - configure a previously allocated VSI
2524 * @vsi: pointer to VSI
2525 * @params: parameters used to configure this VSI
2526 */
2527int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
2528{
2529 struct ice_pf *pf = vsi->back;
2530 int ret;
2531
2532 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
2533 return -EINVAL;
2534
2535 vsi->type = params->type;
2536 vsi->port_info = params->pi;
2537
2538 /* For VSIs which don't have a connected VF, this will be NULL */
2539 vsi->vf = params->vf;
2540
2541 ret = ice_vsi_cfg_def(vsi, params);
2542 if (ret)
2543 return ret;
2544
2545 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2546 if (ret)
2547 ice_vsi_decfg(vsi);
2548
2549 if (vsi->type == ICE_VSI_CTRL) {
2550 if (vsi->vf) {
2551 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2552 vsi->vf->ctrl_vsi_idx = vsi->idx;
2553 } else {
2554 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2555 pf->ctrl_vsi_idx = vsi->idx;
2556 }
2557 }
2558
2559 return ret;
2560}
2561
2562/**
2563 * ice_vsi_decfg - remove all VSI configuration
2564 * @vsi: pointer to VSI
2565 */
2566void ice_vsi_decfg(struct ice_vsi *vsi)
2567{
2568 struct ice_pf *pf = vsi->back;
2569 int err;
2570
2571 /* The Rx rule will only exist to remove if the LLDP FW
2572 * engine is currently stopped
2573 */
2574 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
2575 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2576 ice_cfg_sw_lldp(vsi, false, false);
2577
2578 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2579 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2580 if (err)
2581 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
2582 vsi->vsi_num, err);
2583
2584 if (ice_is_xdp_ena_vsi(vsi))
2585 /* return value check can be skipped here, it always returns
2586 * 0 if reset is in progress
2587 */
2588 ice_destroy_xdp_rings(vsi);
2589
2590 ice_vsi_clear_rings(vsi);
2591 ice_vsi_free_q_vectors(vsi);
2592 ice_vsi_put_qs(vsi);
2593 ice_vsi_free_arrays(vsi);
2594
2595 /* SR-IOV determines needed MSIX resources all at once instead of per
2596 * VSI since when VFs are spawned we know how many VFs there are and how
2597 * many interrupts each VF needs. SR-IOV MSIX resources are also
2598 * cleared in the same manner.
2599 */
2600
2601 if (vsi->type == ICE_VSI_VF &&
2602 vsi->agg_node && vsi->agg_node->valid)
2603 vsi->agg_node->num_vsis--;
2604}
2605
2606/**
2607 * ice_vsi_setup - Set up a VSI by a given type
2608 * @pf: board private structure
2609 * @params: parameters to use when creating the VSI
2610 *
2611 * This allocates the sw VSI structure and its queue resources.
2612 *
2613 * Returns pointer to the successfully allocated and configured VSI sw struct on
2614 * success, NULL on failure.
2615 */
2616struct ice_vsi *
2617ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
2618{
2619 struct device *dev = ice_pf_to_dev(pf);
2620 struct ice_vsi *vsi;
2621 int ret;
2622
2623 /* ice_vsi_setup can only initialize a new VSI, and we must have
2624 * a port_info structure for it.
2625 */
2626 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
2627 WARN_ON(!params->pi))
2628 return NULL;
2629
2630 vsi = ice_vsi_alloc(pf);
2631 if (!vsi) {
2632 dev_err(dev, "could not allocate VSI\n");
2633 return NULL;
2634 }
2635
2636 ret = ice_vsi_cfg(vsi, params);
2637 if (ret)
2638 goto err_vsi_cfg;
2639
2640 /* Add switch rule to drop all Tx Flow Control Frames, of look up
2641 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2642 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2643 * The rule is added once for PF VSI in order to create appropriate
2644 * recipe, since VSI/VSI list is ignored with drop action...
2645 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
2646 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2647 * settings in the HW.
2648 */
2649 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2650 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2651 ICE_DROP_PACKET);
2652 ice_cfg_sw_lldp(vsi, true, true);
2653 }
2654
2655 if (!vsi->agg_node)
2656 ice_set_agg_vsi(vsi);
2657
2658 return vsi;
2659
2660err_vsi_cfg:
2661 ice_vsi_free(vsi);
2662
2663 return NULL;
2664}
2665
2666/**
2667 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2668 * @vsi: the VSI being cleaned up
2669 */
2670static void ice_vsi_release_msix(struct ice_vsi *vsi)
2671{
2672 struct ice_pf *pf = vsi->back;
2673 struct ice_hw *hw = &pf->hw;
2674 u32 txq = 0;
2675 u32 rxq = 0;
2676 int i, q;
2677
2678 ice_for_each_q_vector(vsi, i) {
2679 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2680
2681 ice_write_intrl(q_vector, 0);
2682 for (q = 0; q < q_vector->num_ring_tx; q++) {
2683 ice_write_itr(&q_vector->tx, 0);
2684 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2685 if (ice_is_xdp_ena_vsi(vsi)) {
2686 u32 xdp_txq = txq + vsi->num_xdp_txq;
2687
2688 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2689 }
2690 txq++;
2691 }
2692
2693 for (q = 0; q < q_vector->num_ring_rx; q++) {
2694 ice_write_itr(&q_vector->rx, 0);
2695 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2696 rxq++;
2697 }
2698 }
2699
2700 ice_flush(hw);
2701}
2702
2703/**
2704 * ice_vsi_free_irq - Free the IRQ association with the OS
2705 * @vsi: the VSI being configured
2706 */
2707void ice_vsi_free_irq(struct ice_vsi *vsi)
2708{
2709 struct ice_pf *pf = vsi->back;
2710 int i;
2711
2712 if (!vsi->q_vectors || !vsi->irqs_ready)
2713 return;
2714
2715 ice_vsi_release_msix(vsi);
2716 if (vsi->type == ICE_VSI_VF)
2717 return;
2718
2719 vsi->irqs_ready = false;
2720 ice_free_cpu_rx_rmap(vsi);
2721
2722 ice_for_each_q_vector(vsi, i) {
2723 int irq_num;
2724
2725 irq_num = vsi->q_vectors[i]->irq.virq;
2726
2727 /* free only the irqs that were actually requested */
2728 if (!vsi->q_vectors[i] ||
2729 !(vsi->q_vectors[i]->num_ring_tx ||
2730 vsi->q_vectors[i]->num_ring_rx))
2731 continue;
2732
2733 /* clear the affinity notifier in the IRQ descriptor */
2734 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2735 irq_set_affinity_notifier(irq_num, NULL);
2736
2737 /* clear the affinity_mask in the IRQ descriptor */
2738 irq_set_affinity_hint(irq_num, NULL);
2739 synchronize_irq(irq_num);
2740 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2741 }
2742}
2743
2744/**
2745 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2746 * @vsi: the VSI having resources freed
2747 */
2748void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2749{
2750 int i;
2751
2752 if (!vsi->tx_rings)
2753 return;
2754
2755 ice_for_each_txq(vsi, i)
2756 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2757 ice_free_tx_ring(vsi->tx_rings[i]);
2758}
2759
2760/**
2761 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2762 * @vsi: the VSI having resources freed
2763 */
2764void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2765{
2766 int i;
2767
2768 if (!vsi->rx_rings)
2769 return;
2770
2771 ice_for_each_rxq(vsi, i)
2772 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2773 ice_free_rx_ring(vsi->rx_rings[i]);
2774}
2775
2776/**
2777 * ice_vsi_close - Shut down a VSI
2778 * @vsi: the VSI being shut down
2779 */
2780void ice_vsi_close(struct ice_vsi *vsi)
2781{
2782 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2783 ice_down(vsi);
2784
2785 ice_vsi_free_irq(vsi);
2786 ice_vsi_free_tx_rings(vsi);
2787 ice_vsi_free_rx_rings(vsi);
2788}
2789
2790/**
2791 * ice_ena_vsi - resume a VSI
2792 * @vsi: the VSI being resume
2793 * @locked: is the rtnl_lock already held
2794 */
2795int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2796{
2797 int err = 0;
2798
2799 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2800 return 0;
2801
2802 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2803
2804 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2805 if (netif_running(vsi->netdev)) {
2806 if (!locked)
2807 rtnl_lock();
2808
2809 err = ice_open_internal(vsi->netdev);
2810
2811 if (!locked)
2812 rtnl_unlock();
2813 }
2814 } else if (vsi->type == ICE_VSI_CTRL) {
2815 err = ice_vsi_open_ctrl(vsi);
2816 }
2817
2818 return err;
2819}
2820
2821/**
2822 * ice_dis_vsi - pause a VSI
2823 * @vsi: the VSI being paused
2824 * @locked: is the rtnl_lock already held
2825 */
2826void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2827{
2828 if (test_bit(ICE_VSI_DOWN, vsi->state))
2829 return;
2830
2831 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2832
2833 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2834 if (netif_running(vsi->netdev)) {
2835 if (!locked)
2836 rtnl_lock();
2837
2838 ice_vsi_close(vsi);
2839
2840 if (!locked)
2841 rtnl_unlock();
2842 } else {
2843 ice_vsi_close(vsi);
2844 }
2845 } else if (vsi->type == ICE_VSI_CTRL ||
2846 vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
2847 ice_vsi_close(vsi);
2848 }
2849}
2850
2851/**
2852 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2853 * @vsi: the VSI being un-configured
2854 */
2855void ice_vsi_dis_irq(struct ice_vsi *vsi)
2856{
2857 struct ice_pf *pf = vsi->back;
2858 struct ice_hw *hw = &pf->hw;
2859 u32 val;
2860 int i;
2861
2862 /* disable interrupt causation from each queue */
2863 if (vsi->tx_rings) {
2864 ice_for_each_txq(vsi, i) {
2865 if (vsi->tx_rings[i]) {
2866 u16 reg;
2867
2868 reg = vsi->tx_rings[i]->reg_idx;
2869 val = rd32(hw, QINT_TQCTL(reg));
2870 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2871 wr32(hw, QINT_TQCTL(reg), val);
2872 }
2873 }
2874 }
2875
2876 if (vsi->rx_rings) {
2877 ice_for_each_rxq(vsi, i) {
2878 if (vsi->rx_rings[i]) {
2879 u16 reg;
2880
2881 reg = vsi->rx_rings[i]->reg_idx;
2882 val = rd32(hw, QINT_RQCTL(reg));
2883 val &= ~QINT_RQCTL_CAUSE_ENA_M;
2884 wr32(hw, QINT_RQCTL(reg), val);
2885 }
2886 }
2887 }
2888
2889 /* disable each interrupt */
2890 ice_for_each_q_vector(vsi, i) {
2891 if (!vsi->q_vectors[i])
2892 continue;
2893 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2894 }
2895
2896 ice_flush(hw);
2897
2898 /* don't call synchronize_irq() for VF's from the host */
2899 if (vsi->type == ICE_VSI_VF)
2900 return;
2901
2902 ice_for_each_q_vector(vsi, i)
2903 synchronize_irq(vsi->q_vectors[i]->irq.virq);
2904}
2905
2906/**
2907 * __ice_queue_set_napi - Set the napi instance for the queue
2908 * @dev: device to which NAPI and queue belong
2909 * @queue_index: Index of queue
2910 * @type: queue type as RX or TX
2911 * @napi: NAPI context
2912 * @locked: is the rtnl_lock already held
2913 *
2914 * Set the napi instance for the queue. Caller indicates the lock status.
2915 */
2916static void
2917__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2918 enum netdev_queue_type type, struct napi_struct *napi,
2919 bool locked)
2920{
2921 if (!locked)
2922 rtnl_lock();
2923 netif_queue_set_napi(dev, queue_index, type, napi);
2924 if (!locked)
2925 rtnl_unlock();
2926}
2927
2928/**
2929 * ice_queue_set_napi - Set the napi instance for the queue
2930 * @vsi: VSI being configured
2931 * @queue_index: Index of queue
2932 * @type: queue type as RX or TX
2933 * @napi: NAPI context
2934 *
2935 * Set the napi instance for the queue. The rtnl lock state is derived from the
2936 * execution path.
2937 */
2938void
2939ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
2940 enum netdev_queue_type type, struct napi_struct *napi)
2941{
2942 struct ice_pf *pf = vsi->back;
2943
2944 if (!vsi->netdev)
2945 return;
2946
2947 if (current_work() == &pf->serv_task ||
2948 test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
2949 test_bit(ICE_DOWN, pf->state) ||
2950 test_bit(ICE_SUSPENDED, pf->state))
2951 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
2952 false);
2953 else
2954 __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
2955 true);
2956}
2957
2958/**
2959 * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2960 * @q_vector: q_vector pointer
2961 * @locked: is the rtnl_lock already held
2962 *
2963 * Associate the q_vector napi with all the queue[s] on the vector.
2964 * Caller indicates the lock status.
2965 */
2966void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
2967{
2968 struct ice_rx_ring *rx_ring;
2969 struct ice_tx_ring *tx_ring;
2970
2971 ice_for_each_rx_ring(rx_ring, q_vector->rx)
2972 __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
2973 NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
2974 locked);
2975
2976 ice_for_each_tx_ring(tx_ring, q_vector->tx)
2977 __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
2978 NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
2979 locked);
2980 /* Also set the interrupt number for the NAPI */
2981 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
2982}
2983
2984/**
2985 * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
2986 * @q_vector: q_vector pointer
2987 *
2988 * Associate the q_vector napi with all the queue[s] on the vector
2989 */
2990void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
2991{
2992 struct ice_rx_ring *rx_ring;
2993 struct ice_tx_ring *tx_ring;
2994
2995 ice_for_each_rx_ring(rx_ring, q_vector->rx)
2996 ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
2997 NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
2998
2999 ice_for_each_tx_ring(tx_ring, q_vector->tx)
3000 ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
3001 NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
3002 /* Also set the interrupt number for the NAPI */
3003 netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
3004}
3005
3006/**
3007 * ice_vsi_set_napi_queues
3008 * @vsi: VSI pointer
3009 *
3010 * Associate queue[s] with napi for all vectors
3011 */
3012void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
3013{
3014 int i;
3015
3016 if (!vsi->netdev)
3017 return;
3018
3019 ice_for_each_q_vector(vsi, i)
3020 ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
3021}
3022
3023/**
3024 * ice_vsi_release - Delete a VSI and free its resources
3025 * @vsi: the VSI being removed
3026 *
3027 * Returns 0 on success or < 0 on error
3028 */
3029int ice_vsi_release(struct ice_vsi *vsi)
3030{
3031 struct ice_pf *pf;
3032
3033 if (!vsi->back)
3034 return -ENODEV;
3035 pf = vsi->back;
3036
3037 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3038 ice_rss_clean(vsi);
3039
3040 ice_vsi_close(vsi);
3041 ice_vsi_decfg(vsi);
3042
3043 /* retain SW VSI data structure since it is needed to unregister and
3044 * free VSI netdev when PF is not in reset recovery pending state,\
3045 * for ex: during rmmod.
3046 */
3047 if (!ice_is_reset_in_progress(pf->state))
3048 ice_vsi_delete(vsi);
3049
3050 return 0;
3051}
3052
3053/**
3054 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
3055 * @vsi: VSI connected with q_vectors
3056 * @coalesce: array of struct with stored coalesce
3057 *
3058 * Returns array size.
3059 */
3060static int
3061ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
3062 struct ice_coalesce_stored *coalesce)
3063{
3064 int i;
3065
3066 ice_for_each_q_vector(vsi, i) {
3067 struct ice_q_vector *q_vector = vsi->q_vectors[i];
3068
3069 coalesce[i].itr_tx = q_vector->tx.itr_settings;
3070 coalesce[i].itr_rx = q_vector->rx.itr_settings;
3071 coalesce[i].intrl = q_vector->intrl;
3072
3073 if (i < vsi->num_txq)
3074 coalesce[i].tx_valid = true;
3075 if (i < vsi->num_rxq)
3076 coalesce[i].rx_valid = true;
3077 }
3078
3079 return vsi->num_q_vectors;
3080}
3081
3082/**
3083 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
3084 * @vsi: VSI connected with q_vectors
3085 * @coalesce: pointer to array of struct with stored coalesce
3086 * @size: size of coalesce array
3087 *
3088 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
3089 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
3090 * to default value.
3091 */
3092static void
3093ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
3094 struct ice_coalesce_stored *coalesce, int size)
3095{
3096 struct ice_ring_container *rc;
3097 int i;
3098
3099 if ((size && !coalesce) || !vsi)
3100 return;
3101
3102 /* There are a couple of cases that have to be handled here:
3103 * 1. The case where the number of queue vectors stays the same, but
3104 * the number of Tx or Rx rings changes (the first for loop)
3105 * 2. The case where the number of queue vectors increased (the
3106 * second for loop)
3107 */
3108 for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
3109 /* There are 2 cases to handle here and they are the same for
3110 * both Tx and Rx:
3111 * if the entry was valid previously (coalesce[i].[tr]x_valid
3112 * and the loop variable is less than the number of rings
3113 * allocated, then write the previous values
3114 *
3115 * if the entry was not valid previously, but the number of
3116 * rings is less than are allocated (this means the number of
3117 * rings increased from previously), then write out the
3118 * values in the first element
3119 *
3120 * Also, always write the ITR, even if in ITR_IS_DYNAMIC
3121 * as there is no harm because the dynamic algorithm
3122 * will just overwrite.
3123 */
3124 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
3125 rc = &vsi->q_vectors[i]->rx;
3126 rc->itr_settings = coalesce[i].itr_rx;
3127 ice_write_itr(rc, rc->itr_setting);
3128 } else if (i < vsi->alloc_rxq) {
3129 rc = &vsi->q_vectors[i]->rx;
3130 rc->itr_settings = coalesce[0].itr_rx;
3131 ice_write_itr(rc, rc->itr_setting);
3132 }
3133
3134 if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
3135 rc = &vsi->q_vectors[i]->tx;
3136 rc->itr_settings = coalesce[i].itr_tx;
3137 ice_write_itr(rc, rc->itr_setting);
3138 } else if (i < vsi->alloc_txq) {
3139 rc = &vsi->q_vectors[i]->tx;
3140 rc->itr_settings = coalesce[0].itr_tx;
3141 ice_write_itr(rc, rc->itr_setting);
3142 }
3143
3144 vsi->q_vectors[i]->intrl = coalesce[i].intrl;
3145 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3146 }
3147
3148 /* the number of queue vectors increased so write whatever is in
3149 * the first element
3150 */
3151 for (; i < vsi->num_q_vectors; i++) {
3152 /* transmit */
3153 rc = &vsi->q_vectors[i]->tx;
3154 rc->itr_settings = coalesce[0].itr_tx;
3155 ice_write_itr(rc, rc->itr_setting);
3156
3157 /* receive */
3158 rc = &vsi->q_vectors[i]->rx;
3159 rc->itr_settings = coalesce[0].itr_rx;
3160 ice_write_itr(rc, rc->itr_setting);
3161
3162 vsi->q_vectors[i]->intrl = coalesce[0].intrl;
3163 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3164 }
3165}
3166
3167/**
3168 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3169 * @vsi: VSI pointer
3170 */
3171static int
3172ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
3173{
3174 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
3175 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
3176 struct ice_ring_stats **tx_ring_stats;
3177 struct ice_ring_stats **rx_ring_stats;
3178 struct ice_vsi_stats *vsi_stat;
3179 struct ice_pf *pf = vsi->back;
3180 u16 prev_txq = vsi->alloc_txq;
3181 u16 prev_rxq = vsi->alloc_rxq;
3182 int i;
3183
3184 vsi_stat = pf->vsi_stats[vsi->idx];
3185
3186 if (req_txq < prev_txq) {
3187 for (i = req_txq; i < prev_txq; i++) {
3188 if (vsi_stat->tx_ring_stats[i]) {
3189 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3190 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3191 }
3192 }
3193 }
3194
3195 tx_ring_stats = vsi_stat->tx_ring_stats;
3196 vsi_stat->tx_ring_stats =
3197 krealloc_array(vsi_stat->tx_ring_stats, req_txq,
3198 sizeof(*vsi_stat->tx_ring_stats),
3199 GFP_KERNEL | __GFP_ZERO);
3200 if (!vsi_stat->tx_ring_stats) {
3201 vsi_stat->tx_ring_stats = tx_ring_stats;
3202 return -ENOMEM;
3203 }
3204
3205 if (req_rxq < prev_rxq) {
3206 for (i = req_rxq; i < prev_rxq; i++) {
3207 if (vsi_stat->rx_ring_stats[i]) {
3208 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3209 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3210 }
3211 }
3212 }
3213
3214 rx_ring_stats = vsi_stat->rx_ring_stats;
3215 vsi_stat->rx_ring_stats =
3216 krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
3217 sizeof(*vsi_stat->rx_ring_stats),
3218 GFP_KERNEL | __GFP_ZERO);
3219 if (!vsi_stat->rx_ring_stats) {
3220 vsi_stat->rx_ring_stats = rx_ring_stats;
3221 return -ENOMEM;
3222 }
3223
3224 return 0;
3225}
3226
3227/**
3228 * ice_vsi_rebuild - Rebuild VSI after reset
3229 * @vsi: VSI to be rebuild
3230 * @vsi_flags: flags used for VSI rebuild flow
3231 *
3232 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
3233 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3234 *
3235 * Returns 0 on success and negative value on failure
3236 */
3237int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3238{
3239 struct ice_vsi_cfg_params params = {};
3240 struct ice_coalesce_stored *coalesce;
3241 int prev_num_q_vectors = 0;
3242 struct ice_pf *pf;
3243 int ret;
3244
3245 if (!vsi)
3246 return -EINVAL;
3247
3248 params = ice_vsi_to_params(vsi);
3249 params.flags = vsi_flags;
3250
3251 pf = vsi->back;
3252 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3253 return -EINVAL;
3254
3255 coalesce = kcalloc(vsi->num_q_vectors,
3256 sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3257 if (!coalesce)
3258 return -ENOMEM;
3259
3260 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3261
3262 ret = ice_vsi_realloc_stat_arrays(vsi);
3263 if (ret)
3264 goto err_vsi_cfg;
3265
3266 ice_vsi_decfg(vsi);
3267 ret = ice_vsi_cfg_def(vsi, ¶ms);
3268 if (ret)
3269 goto err_vsi_cfg;
3270
3271 ret = ice_vsi_cfg_tc_lan(pf, vsi);
3272 if (ret) {
3273 if (vsi_flags & ICE_VSI_FLAG_INIT) {
3274 ret = -EIO;
3275 goto err_vsi_cfg_tc_lan;
3276 }
3277
3278 kfree(coalesce);
3279 return ice_schedule_reset(pf, ICE_RESET_PFR);
3280 }
3281
3282 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3283 kfree(coalesce);
3284
3285 return 0;
3286
3287err_vsi_cfg_tc_lan:
3288 ice_vsi_decfg(vsi);
3289err_vsi_cfg:
3290 kfree(coalesce);
3291 return ret;
3292}
3293
3294/**
3295 * ice_is_reset_in_progress - check for a reset in progress
3296 * @state: PF state field
3297 */
3298bool ice_is_reset_in_progress(unsigned long *state)
3299{
3300 return test_bit(ICE_RESET_OICR_RECV, state) ||
3301 test_bit(ICE_PFR_REQ, state) ||
3302 test_bit(ICE_CORER_REQ, state) ||
3303 test_bit(ICE_GLOBR_REQ, state);
3304}
3305
3306/**
3307 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3308 * @pf: pointer to the PF structure
3309 * @timeout: length of time to wait, in jiffies
3310 *
3311 * Wait (sleep) for a short time until the driver finishes cleaning up from
3312 * a device reset. The caller must be able to sleep. Use this to delay
3313 * operations that could fail while the driver is cleaning up after a device
3314 * reset.
3315 *
3316 * Returns 0 on success, -EBUSY if the reset is not finished within the
3317 * timeout, and -ERESTARTSYS if the thread was interrupted.
3318 */
3319int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
3320{
3321 long ret;
3322
3323 ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3324 !ice_is_reset_in_progress(pf->state),
3325 timeout);
3326 if (ret < 0)
3327 return ret;
3328 else if (!ret)
3329 return -EBUSY;
3330 else
3331 return 0;
3332}
3333
3334/**
3335 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3336 * @vsi: VSI being configured
3337 * @ctx: the context buffer returned from AQ VSI update command
3338 */
3339static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3340{
3341 vsi->info.mapping_flags = ctx->info.mapping_flags;
3342 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3343 sizeof(vsi->info.q_mapping));
3344 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3345 sizeof(vsi->info.tc_mapping));
3346}
3347
3348/**
3349 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3350 * @vsi: the VSI being configured
3351 * @ena_tc: TC map to be enabled
3352 */
3353void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3354{
3355 struct net_device *netdev = vsi->netdev;
3356 struct ice_pf *pf = vsi->back;
3357 int numtc = vsi->tc_cfg.numtc;
3358 struct ice_dcbx_cfg *dcbcfg;
3359 u8 netdev_tc;
3360 int i;
3361
3362 if (!netdev)
3363 return;
3364
3365 /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
3366 if (vsi->type == ICE_VSI_CHNL)
3367 return;
3368
3369 if (!ena_tc) {
3370 netdev_reset_tc(netdev);
3371 return;
3372 }
3373
3374 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3375 numtc = vsi->all_numtc;
3376
3377 if (netdev_set_num_tc(netdev, numtc))
3378 return;
3379
3380 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3381
3382 ice_for_each_traffic_class(i)
3383 if (vsi->tc_cfg.ena_tc & BIT(i))
3384 netdev_set_tc_queue(netdev,
3385 vsi->tc_cfg.tc_info[i].netdev_tc,
3386 vsi->tc_cfg.tc_info[i].qcount_tx,
3387 vsi->tc_cfg.tc_info[i].qoffset);
3388 /* setup TC queue map for CHNL TCs */
3389 ice_for_each_chnl_tc(i) {
3390 if (!(vsi->all_enatc & BIT(i)))
3391 break;
3392 if (!vsi->mqprio_qopt.qopt.count[i])
3393 break;
3394 netdev_set_tc_queue(netdev, i,
3395 vsi->mqprio_qopt.qopt.count[i],
3396 vsi->mqprio_qopt.qopt.offset[i]);
3397 }
3398
3399 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3400 return;
3401
3402 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3403 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3404
3405 /* Get the mapped netdev TC# for the UP */
3406 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3407 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3408 }
3409}
3410
3411/**
3412 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3413 * @vsi: the VSI being configured,
3414 * @ctxt: VSI context structure
3415 * @ena_tc: number of traffic classes to enable
3416 *
3417 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
3418 */
3419static int
3420ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3421 u8 ena_tc)
3422{
3423 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3424 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3425 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3426 u16 new_txq, new_rxq;
3427 u8 netdev_tc = 0;
3428 int i;
3429
3430 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3431
3432 pow = order_base_2(tc0_qcount);
3433 qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, tc0_offset);
3434 qmap |= FIELD_PREP(ICE_AQ_VSI_TC_Q_NUM_M, pow);
3435
3436 ice_for_each_traffic_class(i) {
3437 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3438 /* TC is not enabled */
3439 vsi->tc_cfg.tc_info[i].qoffset = 0;
3440 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3441 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3442 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3443 ctxt->info.tc_mapping[i] = 0;
3444 continue;
3445 }
3446
3447 offset = vsi->mqprio_qopt.qopt.offset[i];
3448 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3449 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3450 vsi->tc_cfg.tc_info[i].qoffset = offset;
3451 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3452 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3453 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3454 }
3455
3456 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3457 ice_for_each_chnl_tc(i) {
3458 if (!(vsi->all_enatc & BIT(i)))
3459 continue;
3460 offset = vsi->mqprio_qopt.qopt.offset[i];
3461 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3462 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3463 }
3464 }
3465
3466 new_txq = offset + qcount_tx;
3467 if (new_txq > vsi->alloc_txq) {
3468 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3469 new_txq, vsi->alloc_txq);
3470 return -EINVAL;
3471 }
3472
3473 new_rxq = offset + qcount_rx;
3474 if (new_rxq > vsi->alloc_rxq) {
3475 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3476 new_rxq, vsi->alloc_rxq);
3477 return -EINVAL;
3478 }
3479
3480 /* Set actual Tx/Rx queue pairs */
3481 vsi->num_txq = new_txq;
3482 vsi->num_rxq = new_rxq;
3483
3484 /* Setup queue TC[0].qmap for given VSI context */
3485 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3486 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3487 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3488
3489 /* Find queue count available for channel VSIs and starting offset
3490 * for channel VSIs
3491 */
3492 if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3493 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3494 vsi->next_base_q = tc0_qcount;
3495 }
3496 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
3497 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
3498 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3499 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3500
3501 return 0;
3502}
3503
3504/**
3505 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3506 * @vsi: VSI to be configured
3507 * @ena_tc: TC bitmap
3508 *
3509 * VSI queues expected to be quiesced before calling this function
3510 */
3511int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3512{
3513 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3514 struct ice_pf *pf = vsi->back;
3515 struct ice_tc_cfg old_tc_cfg;
3516 struct ice_vsi_ctx *ctx;
3517 struct device *dev;
3518 int i, ret = 0;
3519 u8 num_tc = 0;
3520
3521 dev = ice_pf_to_dev(pf);
3522 if (vsi->tc_cfg.ena_tc == ena_tc &&
3523 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3524 return 0;
3525
3526 ice_for_each_traffic_class(i) {
3527 /* build bitmap of enabled TCs */
3528 if (ena_tc & BIT(i))
3529 num_tc++;
3530 /* populate max_txqs per TC */
3531 max_txqs[i] = vsi->alloc_txq;
3532 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
3533 * zero for CHNL VSI, hence use num_txq instead as max_txqs
3534 */
3535 if (vsi->type == ICE_VSI_CHNL &&
3536 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3537 max_txqs[i] = vsi->num_txq;
3538 }
3539
3540 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3541 vsi->tc_cfg.ena_tc = ena_tc;
3542 vsi->tc_cfg.numtc = num_tc;
3543
3544 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3545 if (!ctx)
3546 return -ENOMEM;
3547
3548 ctx->vf_num = 0;
3549 ctx->info = vsi->info;
3550
3551 if (vsi->type == ICE_VSI_PF &&
3552 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3553 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3554 else
3555 ret = ice_vsi_setup_q_map(vsi, ctx);
3556
3557 if (ret) {
3558 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3559 goto out;
3560 }
3561
3562 /* must to indicate which section of VSI context are being modified */
3563 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3564 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3565 if (ret) {
3566 dev_info(dev, "Failed VSI Update\n");
3567 goto out;
3568 }
3569
3570 if (vsi->type == ICE_VSI_PF &&
3571 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3572 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3573 else
3574 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3575 vsi->tc_cfg.ena_tc, max_txqs);
3576
3577 if (ret) {
3578 dev_err(dev, "VSI %d failed TC config, error %d\n",
3579 vsi->vsi_num, ret);
3580 goto out;
3581 }
3582 ice_vsi_update_q_map(vsi, ctx);
3583 vsi->info.valid_sections = 0;
3584
3585 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3586out:
3587 kfree(ctx);
3588 return ret;
3589}
3590
3591/**
3592 * ice_update_ring_stats - Update ring statistics
3593 * @stats: stats to be updated
3594 * @pkts: number of processed packets
3595 * @bytes: number of processed bytes
3596 *
3597 * This function assumes that caller has acquired a u64_stats_sync lock.
3598 */
3599static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
3600{
3601 stats->bytes += bytes;
3602 stats->pkts += pkts;
3603}
3604
3605/**
3606 * ice_update_tx_ring_stats - Update Tx ring specific counters
3607 * @tx_ring: ring to update
3608 * @pkts: number of processed packets
3609 * @bytes: number of processed bytes
3610 */
3611void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
3612{
3613 u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3614 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3615 u64_stats_update_end(&tx_ring->ring_stats->syncp);
3616}
3617
3618/**
3619 * ice_update_rx_ring_stats - Update Rx ring specific counters
3620 * @rx_ring: ring to update
3621 * @pkts: number of processed packets
3622 * @bytes: number of processed bytes
3623 */
3624void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
3625{
3626 u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3627 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3628 u64_stats_update_end(&rx_ring->ring_stats->syncp);
3629}
3630
3631/**
3632 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3633 * @pi: port info of the switch with default VSI
3634 *
3635 * Return true if the there is a single VSI in default forwarding VSI list
3636 */
3637bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3638{
3639 bool exists = false;
3640
3641 ice_check_if_dflt_vsi(pi, 0, &exists);
3642 return exists;
3643}
3644
3645/**
3646 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3647 * @vsi: VSI to compare against default forwarding VSI
3648 *
3649 * If this VSI passed in is the default forwarding VSI then return true, else
3650 * return false
3651 */
3652bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3653{
3654 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3655}
3656
3657/**
3658 * ice_set_dflt_vsi - set the default forwarding VSI
3659 * @vsi: VSI getting set as the default forwarding VSI on the switch
3660 *
3661 * If the VSI passed in is already the default VSI and it's enabled just return
3662 * success.
3663 *
3664 * Otherwise try to set the VSI passed in as the switch's default VSI and
3665 * return the result.
3666 */
3667int ice_set_dflt_vsi(struct ice_vsi *vsi)
3668{
3669 struct device *dev;
3670 int status;
3671
3672 if (!vsi)
3673 return -EINVAL;
3674
3675 dev = ice_pf_to_dev(vsi->back);
3676
3677 if (ice_lag_is_switchdev_running(vsi->back)) {
3678 dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3679 vsi->vsi_num);
3680 return 0;
3681 }
3682
3683 /* the VSI passed in is already the default VSI */
3684 if (ice_is_vsi_dflt_vsi(vsi)) {
3685 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3686 vsi->vsi_num);
3687 return 0;
3688 }
3689
3690 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3691 if (status) {
3692 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
3693 vsi->vsi_num, status);
3694 return status;
3695 }
3696
3697 return 0;
3698}
3699
3700/**
3701 * ice_clear_dflt_vsi - clear the default forwarding VSI
3702 * @vsi: VSI to remove from filter list
3703 *
3704 * If the switch has no default VSI or it's not enabled then return error.
3705 *
3706 * Otherwise try to clear the default VSI and return the result.
3707 */
3708int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3709{
3710 struct device *dev;
3711 int status;
3712
3713 if (!vsi)
3714 return -EINVAL;
3715
3716 dev = ice_pf_to_dev(vsi->back);
3717
3718 /* there is no default VSI configured */
3719 if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3720 return -ENODEV;
3721
3722 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3723 ICE_FLTR_RX);
3724 if (status) {
3725 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3726 vsi->vsi_num, status);
3727 return -EIO;
3728 }
3729
3730 return 0;
3731}
3732
3733/**
3734 * ice_get_link_speed_mbps - get link speed in Mbps
3735 * @vsi: the VSI whose link speed is being queried
3736 *
3737 * Return current VSI link speed and 0 if the speed is unknown.
3738 */
3739int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3740{
3741 unsigned int link_speed;
3742
3743 link_speed = vsi->port_info->phy.link_info.link_speed;
3744
3745 return (int)ice_get_link_speed(fls(link_speed) - 1);
3746}
3747
3748/**
3749 * ice_get_link_speed_kbps - get link speed in Kbps
3750 * @vsi: the VSI whose link speed is being queried
3751 *
3752 * Return current VSI link speed and 0 if the speed is unknown.
3753 */
3754int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3755{
3756 int speed_mbps;
3757
3758 speed_mbps = ice_get_link_speed_mbps(vsi);
3759
3760 return speed_mbps * 1000;
3761}
3762
3763/**
3764 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3765 * @vsi: VSI to be configured
3766 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
3767 *
3768 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
3769 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3770 * on TC 0.
3771 */
3772int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3773{
3774 struct ice_pf *pf = vsi->back;
3775 struct device *dev;
3776 int status;
3777 int speed;
3778
3779 dev = ice_pf_to_dev(pf);
3780 if (!vsi->port_info) {
3781 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3782 vsi->idx, vsi->type);
3783 return -EINVAL;
3784 }
3785
3786 speed = ice_get_link_speed_kbps(vsi);
3787 if (min_tx_rate > (u64)speed) {
3788 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3789 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3790 speed);
3791 return -EINVAL;
3792 }
3793
3794 /* Configure min BW for VSI limit */
3795 if (min_tx_rate) {
3796 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3797 ICE_MIN_BW, min_tx_rate);
3798 if (status) {
3799 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
3800 min_tx_rate, ice_vsi_type_str(vsi->type),
3801 vsi->idx);
3802 return status;
3803 }
3804
3805 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
3806 min_tx_rate, ice_vsi_type_str(vsi->type));
3807 } else {
3808 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3809 vsi->idx, 0,
3810 ICE_MIN_BW);
3811 if (status) {
3812 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
3813 ice_vsi_type_str(vsi->type), vsi->idx);
3814 return status;
3815 }
3816
3817 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
3818 ice_vsi_type_str(vsi->type), vsi->idx);
3819 }
3820
3821 return 0;
3822}
3823
3824/**
3825 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3826 * @vsi: VSI to be configured
3827 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
3828 *
3829 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
3830 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3831 * on TC 0.
3832 */
3833int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3834{
3835 struct ice_pf *pf = vsi->back;
3836 struct device *dev;
3837 int status;
3838 int speed;
3839
3840 dev = ice_pf_to_dev(pf);
3841 if (!vsi->port_info) {
3842 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3843 vsi->idx, vsi->type);
3844 return -EINVAL;
3845 }
3846
3847 speed = ice_get_link_speed_kbps(vsi);
3848 if (max_tx_rate > (u64)speed) {
3849 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3850 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3851 speed);
3852 return -EINVAL;
3853 }
3854
3855 /* Configure max BW for VSI limit */
3856 if (max_tx_rate) {
3857 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3858 ICE_MAX_BW, max_tx_rate);
3859 if (status) {
3860 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
3861 max_tx_rate, ice_vsi_type_str(vsi->type),
3862 vsi->idx);
3863 return status;
3864 }
3865
3866 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
3867 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3868 } else {
3869 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3870 vsi->idx, 0,
3871 ICE_MAX_BW);
3872 if (status) {
3873 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
3874 ice_vsi_type_str(vsi->type), vsi->idx);
3875 return status;
3876 }
3877
3878 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
3879 ice_vsi_type_str(vsi->type), vsi->idx);
3880 }
3881
3882 return 0;
3883}
3884
3885/**
3886 * ice_set_link - turn on/off physical link
3887 * @vsi: VSI to modify physical link on
3888 * @ena: turn on/off physical link
3889 */
3890int ice_set_link(struct ice_vsi *vsi, bool ena)
3891{
3892 struct device *dev = ice_pf_to_dev(vsi->back);
3893 struct ice_port_info *pi = vsi->port_info;
3894 struct ice_hw *hw = pi->hw;
3895 int status;
3896
3897 if (vsi->type != ICE_VSI_PF)
3898 return -EINVAL;
3899
3900 status = ice_aq_set_link_restart_an(pi, ena, NULL);
3901
3902 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
3903 * this is not a fatal error, so print a warning message and return
3904 * a success code. Return an error if FW returns an error code other
3905 * than ICE_AQ_RC_EMODE
3906 */
3907 if (status == -EIO) {
3908 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3909 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
3910 (ena ? "ON" : "OFF"), status,
3911 ice_aq_str(hw->adminq.sq_last_status));
3912 } else if (status) {
3913 dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
3914 (ena ? "ON" : "OFF"), status,
3915 ice_aq_str(hw->adminq.sq_last_status));
3916 return status;
3917 }
3918
3919 return 0;
3920}
3921
3922/**
3923 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3924 * @vsi: VSI used to add VLAN filters
3925 *
3926 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3927 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3928 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3929 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3930 *
3931 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3932 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3933 * traffic in SVM, since the VLAN TPID isn't part of filtering.
3934 *
3935 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3936 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3937 * part of filtering.
3938 */
3939int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3940{
3941 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3942 struct ice_vlan vlan;
3943 int err;
3944
3945 vlan = ICE_VLAN(0, 0, 0);
3946 err = vlan_ops->add_vlan(vsi, &vlan);
3947 if (err && err != -EEXIST)
3948 return err;
3949
3950 /* in SVM both VLAN 0 filters are identical */
3951 if (!ice_is_dvm_ena(&vsi->back->hw))
3952 return 0;
3953
3954 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3955 err = vlan_ops->add_vlan(vsi, &vlan);
3956 if (err && err != -EEXIST)
3957 return err;
3958
3959 return 0;
3960}
3961
3962/**
3963 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3964 * @vsi: VSI used to add VLAN filters
3965 *
3966 * Delete the VLAN 0 filters in the same manner that they were added in
3967 * ice_vsi_add_vlan_zero.
3968 */
3969int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3970{
3971 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3972 struct ice_vlan vlan;
3973 int err;
3974
3975 vlan = ICE_VLAN(0, 0, 0);
3976 err = vlan_ops->del_vlan(vsi, &vlan);
3977 if (err && err != -EEXIST)
3978 return err;
3979
3980 /* in SVM both VLAN 0 filters are identical */
3981 if (!ice_is_dvm_ena(&vsi->back->hw))
3982 return 0;
3983
3984 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3985 err = vlan_ops->del_vlan(vsi, &vlan);
3986 if (err && err != -EEXIST)
3987 return err;
3988
3989 /* when deleting the last VLAN filter, make sure to disable the VLAN
3990 * promisc mode so the filter isn't left by accident
3991 */
3992 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3993 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3994}
3995
3996/**
3997 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3998 * @vsi: VSI used to get the VLAN mode
3999 *
4000 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
4001 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
4002 */
4003static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
4004{
4005#define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
4006#define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
4007 /* no VLAN 0 filter is created when a port VLAN is active */
4008 if (vsi->type == ICE_VSI_VF) {
4009 if (WARN_ON(!vsi->vf))
4010 return 0;
4011
4012 if (ice_vf_is_port_vlan_ena(vsi->vf))
4013 return 0;
4014 }
4015
4016 if (ice_is_dvm_ena(&vsi->back->hw))
4017 return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
4018 else
4019 return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
4020}
4021
4022/**
4023 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
4024 * @vsi: VSI used to determine if any non-zero VLANs have been added
4025 */
4026bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
4027{
4028 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
4029}
4030
4031/**
4032 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
4033 * @vsi: VSI used to get the number of non-zero VLANs added
4034 */
4035u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
4036{
4037 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
4038}
4039
4040/**
4041 * ice_is_feature_supported
4042 * @pf: pointer to the struct ice_pf instance
4043 * @f: feature enum to be checked
4044 *
4045 * returns true if feature is supported, false otherwise
4046 */
4047bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
4048{
4049 if (f < 0 || f >= ICE_F_MAX)
4050 return false;
4051
4052 return test_bit(f, pf->features);
4053}
4054
4055/**
4056 * ice_set_feature_support
4057 * @pf: pointer to the struct ice_pf instance
4058 * @f: feature enum to set
4059 */
4060void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
4061{
4062 if (f < 0 || f >= ICE_F_MAX)
4063 return;
4064
4065 set_bit(f, pf->features);
4066}
4067
4068/**
4069 * ice_clear_feature_support
4070 * @pf: pointer to the struct ice_pf instance
4071 * @f: feature enum to clear
4072 */
4073void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
4074{
4075 if (f < 0 || f >= ICE_F_MAX)
4076 return;
4077
4078 clear_bit(f, pf->features);
4079}
4080
4081/**
4082 * ice_init_feature_support
4083 * @pf: pointer to the struct ice_pf instance
4084 *
4085 * called during init to setup supported feature
4086 */
4087void ice_init_feature_support(struct ice_pf *pf)
4088{
4089 switch (pf->hw.device_id) {
4090 case ICE_DEV_ID_E810C_BACKPLANE:
4091 case ICE_DEV_ID_E810C_QSFP:
4092 case ICE_DEV_ID_E810C_SFP:
4093 case ICE_DEV_ID_E810_XXV_BACKPLANE:
4094 case ICE_DEV_ID_E810_XXV_QSFP:
4095 case ICE_DEV_ID_E810_XXV_SFP:
4096 ice_set_feature_support(pf, ICE_F_DSCP);
4097 if (ice_is_phy_rclk_in_netlist(&pf->hw))
4098 ice_set_feature_support(pf, ICE_F_PHY_RCLK);
4099 /* If we don't own the timer - don't enable other caps */
4100 if (!ice_pf_src_tmr_owned(pf))
4101 break;
4102 if (ice_is_cgu_in_netlist(&pf->hw))
4103 ice_set_feature_support(pf, ICE_F_CGU);
4104 if (ice_is_clock_mux_in_netlist(&pf->hw))
4105 ice_set_feature_support(pf, ICE_F_SMA_CTRL);
4106 if (ice_gnss_is_gps_present(&pf->hw))
4107 ice_set_feature_support(pf, ICE_F_GNSS);
4108 break;
4109 default:
4110 break;
4111 }
4112}
4113
4114/**
4115 * ice_vsi_update_security - update security block in VSI
4116 * @vsi: pointer to VSI structure
4117 * @fill: function pointer to fill ctx
4118 */
4119int
4120ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
4121{
4122 struct ice_vsi_ctx ctx = { 0 };
4123
4124 ctx.info = vsi->info;
4125 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
4126 fill(&ctx);
4127
4128 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4129 return -ENODEV;
4130
4131 vsi->info = ctx.info;
4132 return 0;
4133}
4134
4135/**
4136 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4137 * @ctx: pointer to VSI ctx structure
4138 */
4139void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
4140{
4141 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
4142 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4143 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4144}
4145
4146/**
4147 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4148 * @ctx: pointer to VSI ctx structure
4149 */
4150void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
4151{
4152 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
4153 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4154 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4155}
4156
4157/**
4158 * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4159 * @ctx: pointer to VSI ctx structure
4160 */
4161void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
4162{
4163 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4164}
4165
4166/**
4167 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4168 * @ctx: pointer to VSI ctx structure
4169 */
4170void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
4171{
4172 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4173}
4174
4175/**
4176 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4177 * @vsi: pointer to VSI structure
4178 * @set: set or unset the bit
4179 */
4180int
4181ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
4182{
4183 struct ice_vsi_ctx ctx = {
4184 .info = vsi->info,
4185 };
4186
4187 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
4188 if (set)
4189 ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
4190 else
4191 ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
4192
4193 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4194 return -ENODEV;
4195
4196 vsi->info = ctx.info;
4197 return 0;
4198}