Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2019-2021, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6#include "ice_eswitch.h"
7#include "ice_eswitch_br.h"
8#include "ice_fltr.h"
9#include "ice_repr.h"
10#include "devlink/devlink.h"
11#include "ice_tc_lib.h"
12
13/**
14 * ice_eswitch_setup_env - configure eswitch HW filters
15 * @pf: pointer to PF struct
16 *
17 * This function adds HW filters configuration specific for switchdev
18 * mode.
19 */
20static int ice_eswitch_setup_env(struct ice_pf *pf)
21{
22 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
23 struct net_device *netdev = uplink_vsi->netdev;
24 bool if_running = netif_running(netdev);
25 struct ice_vsi_vlan_ops *vlan_ops;
26
27 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
28 if (ice_down(uplink_vsi))
29 return -ENODEV;
30
31 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
32
33 netif_addr_lock_bh(netdev);
34 __dev_uc_unsync(netdev, NULL);
35 __dev_mc_unsync(netdev, NULL);
36 netif_addr_unlock_bh(netdev);
37
38 if (ice_vsi_add_vlan_zero(uplink_vsi))
39 goto err_vlan_zero;
40
41 if (ice_set_dflt_vsi(uplink_vsi))
42 goto err_def_rx;
43
44 if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
45 ICE_FLTR_TX))
46 goto err_def_tx;
47
48 vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
49 if (vlan_ops->dis_rx_filtering(uplink_vsi))
50 goto err_vlan_filtering;
51
52 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
53 goto err_override_uplink;
54
55 if (ice_vsi_update_local_lb(uplink_vsi, true))
56 goto err_override_local_lb;
57
58 if (if_running && ice_up(uplink_vsi))
59 goto err_up;
60
61 return 0;
62
63err_up:
64 ice_vsi_update_local_lb(uplink_vsi, false);
65err_override_local_lb:
66 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
67err_override_uplink:
68 vlan_ops->ena_rx_filtering(uplink_vsi);
69err_vlan_filtering:
70 ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
71 ICE_FLTR_TX);
72err_def_tx:
73 ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
74 ICE_FLTR_RX);
75err_def_rx:
76 ice_vsi_del_vlan_zero(uplink_vsi);
77err_vlan_zero:
78 ice_fltr_add_mac_and_broadcast(uplink_vsi,
79 uplink_vsi->port_info->mac.perm_addr,
80 ICE_FWD_TO_VSI);
81 if (if_running)
82 ice_up(uplink_vsi);
83
84 return -ENODEV;
85}
86
87/**
88 * ice_eswitch_release_repr - clear PR VSI configuration
89 * @pf: poiner to PF struct
90 * @repr: pointer to PR
91 */
92static void
93ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
94{
95 struct ice_vsi *vsi = repr->src_vsi;
96
97 /* Skip representors that aren't configured */
98 if (!repr->dst)
99 return;
100
101 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
102 metadata_dst_free(repr->dst);
103 repr->dst = NULL;
104 ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
105 ICE_FWD_TO_VSI);
106}
107
108/**
109 * ice_eswitch_setup_repr - configure PR to run in switchdev mode
110 * @pf: pointer to PF struct
111 * @repr: pointer to PR struct
112 */
113static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
114{
115 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
116 struct ice_vsi *vsi = repr->src_vsi;
117 struct metadata_dst *dst;
118
119 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
120 GFP_KERNEL);
121 if (!repr->dst)
122 return -ENOMEM;
123
124 netif_keep_dst(uplink_vsi->netdev);
125
126 dst = repr->dst;
127 dst->u.port_info.port_id = vsi->vsi_num;
128 dst->u.port_info.lower_dev = uplink_vsi->netdev;
129
130 return 0;
131}
132
133/**
134 * ice_eswitch_cfg_vsi - configure VSI to work in slow-path
135 * @vsi: VSI structure of representee
136 * @mac: representee MAC
137 *
138 * Return: 0 on success, non-zero on error.
139 */
140int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
141{
142 int err;
143
144 ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
145
146 err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
147 if (err)
148 goto err_update_security;
149
150 err = ice_vsi_add_vlan_zero(vsi);
151 if (err)
152 goto err_vlan_zero;
153
154 return 0;
155
156err_vlan_zero:
157 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
158err_update_security:
159 ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
160
161 return err;
162}
163
164/**
165 * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
166 * @vsi: VSI structure of representee
167 * @mac: representee MAC
168 */
169void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
170{
171 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
172 ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
173}
174
175/**
176 * ice_eswitch_update_repr - reconfigure port representor
177 * @repr_id: representor ID
178 * @vsi: VSI for which port representor is configured
179 */
180void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
181{
182 struct ice_pf *pf = vsi->back;
183 struct ice_repr *repr;
184 int err;
185
186 if (!ice_is_switchdev_running(pf))
187 return;
188
189 repr = xa_load(&pf->eswitch.reprs, *repr_id);
190 if (!repr)
191 return;
192
193 repr->src_vsi = vsi;
194 repr->dst->u.port_info.port_id = vsi->vsi_num;
195
196 if (repr->br_port)
197 repr->br_port->vsi = vsi;
198
199 err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
200 if (err)
201 dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
202 repr->id);
203
204 /* The VSI number is different, reload the PR with new id */
205 if (repr->id != vsi->vsi_num) {
206 xa_erase(&pf->eswitch.reprs, repr->id);
207 repr->id = vsi->vsi_num;
208 if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
209 dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
210 repr->id);
211 *repr_id = repr->id;
212 }
213}
214
215/**
216 * ice_eswitch_port_start_xmit - callback for packets transmit
217 * @skb: send buffer
218 * @netdev: network interface device structure
219 *
220 * Returns NETDEV_TX_OK if sent, else an error code
221 */
222netdev_tx_t
223ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
224{
225 struct ice_repr *repr = ice_netdev_to_repr(netdev);
226 unsigned int len = skb->len;
227 int ret;
228
229 skb_dst_drop(skb);
230 dst_hold((struct dst_entry *)repr->dst);
231 skb_dst_set(skb, (struct dst_entry *)repr->dst);
232 skb->dev = repr->dst->u.port_info.lower_dev;
233
234 ret = dev_queue_xmit(skb);
235 ice_repr_inc_tx_stats(repr, len, ret);
236
237 return ret;
238}
239
240/**
241 * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
242 * @skb: pointer to send buffer
243 * @off: pointer to offload struct
244 */
245void
246ice_eswitch_set_target_vsi(struct sk_buff *skb,
247 struct ice_tx_offload_params *off)
248{
249 struct metadata_dst *dst = skb_metadata_dst(skb);
250 u64 cd_cmd, dst_vsi;
251
252 if (!dst) {
253 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
254 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
255 } else {
256 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
257 dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
258 dst->u.port_info.port_id);
259 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
260 }
261}
262
263/**
264 * ice_eswitch_release_env - clear eswitch HW filters
265 * @pf: pointer to PF struct
266 *
267 * This function removes HW filters configuration specific for switchdev
268 * mode and restores default legacy mode settings.
269 */
270static void ice_eswitch_release_env(struct ice_pf *pf)
271{
272 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
273 struct ice_vsi_vlan_ops *vlan_ops;
274
275 vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
276
277 ice_vsi_update_local_lb(uplink_vsi, false);
278 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
279 vlan_ops->ena_rx_filtering(uplink_vsi);
280 ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
281 ICE_FLTR_TX);
282 ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
283 ICE_FLTR_RX);
284 ice_fltr_add_mac_and_broadcast(uplink_vsi,
285 uplink_vsi->port_info->mac.perm_addr,
286 ICE_FWD_TO_VSI);
287}
288
289/**
290 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
291 * @pf: pointer to PF structure
292 */
293static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
294{
295 struct ice_vsi *uplink_vsi;
296
297 uplink_vsi = ice_get_main_vsi(pf);
298 if (!uplink_vsi)
299 return -ENODEV;
300
301 if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
302 dev_err(ice_pf_to_dev(pf),
303 "Uplink port cannot be a bridge port\n");
304 return -EINVAL;
305 }
306
307 pf->eswitch.uplink_vsi = uplink_vsi;
308
309 if (ice_eswitch_setup_env(pf))
310 return -ENODEV;
311
312 if (ice_eswitch_br_offloads_init(pf))
313 goto err_br_offloads;
314
315 pf->eswitch.is_running = true;
316
317 return 0;
318
319err_br_offloads:
320 ice_eswitch_release_env(pf);
321 return -ENODEV;
322}
323
324/**
325 * ice_eswitch_disable_switchdev - disable eswitch resources
326 * @pf: pointer to PF structure
327 */
328static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
329{
330 ice_eswitch_br_offloads_deinit(pf);
331 ice_eswitch_release_env(pf);
332
333 pf->eswitch.is_running = false;
334}
335
336/**
337 * ice_eswitch_mode_set - set new eswitch mode
338 * @devlink: pointer to devlink structure
339 * @mode: eswitch mode to switch to
340 * @extack: pointer to extack structure
341 */
342int
343ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
344 struct netlink_ext_ack *extack)
345{
346 struct ice_pf *pf = devlink_priv(devlink);
347
348 if (pf->eswitch_mode == mode)
349 return 0;
350
351 if (ice_has_vfs(pf)) {
352 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
353 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
354 return -EOPNOTSUPP;
355 }
356
357 switch (mode) {
358 case DEVLINK_ESWITCH_MODE_LEGACY:
359 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
360 pf->hw.pf_id);
361 xa_destroy(&pf->eswitch.reprs);
362 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
363 break;
364 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
365 {
366 if (ice_is_adq_active(pf)) {
367 dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
368 NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
369 return -EOPNOTSUPP;
370 }
371
372 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
373 pf->hw.pf_id);
374 xa_init(&pf->eswitch.reprs);
375 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
376 break;
377 }
378 default:
379 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
380 return -EINVAL;
381 }
382
383 pf->eswitch_mode = mode;
384 return 0;
385}
386
387/**
388 * ice_eswitch_mode_get - get current eswitch mode
389 * @devlink: pointer to devlink structure
390 * @mode: output parameter for current eswitch mode
391 */
392int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
393{
394 struct ice_pf *pf = devlink_priv(devlink);
395
396 *mode = pf->eswitch_mode;
397 return 0;
398}
399
400/**
401 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
402 * @pf: pointer to PF structure
403 *
404 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
405 * false otherwise.
406 */
407bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
408{
409 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
410}
411
412/**
413 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
414 * @pf: pointer to PF structure
415 */
416static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
417{
418 struct ice_repr *repr;
419 unsigned long id;
420
421 if (test_bit(ICE_DOWN, pf->state))
422 return;
423
424 xa_for_each(&pf->eswitch.reprs, id, repr)
425 ice_repr_start_tx_queues(repr);
426}
427
428/**
429 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
430 * @pf: pointer to PF structure
431 */
432void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
433{
434 struct ice_repr *repr;
435 unsigned long id;
436
437 if (test_bit(ICE_DOWN, pf->state))
438 return;
439
440 xa_for_each(&pf->eswitch.reprs, id, repr)
441 ice_repr_stop_tx_queues(repr);
442}
443
444static void ice_eswitch_stop_reprs(struct ice_pf *pf)
445{
446 ice_eswitch_stop_all_tx_queues(pf);
447}
448
449static void ice_eswitch_start_reprs(struct ice_pf *pf)
450{
451 ice_eswitch_start_all_tx_queues(pf);
452}
453
454static int
455ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
456{
457 int err;
458
459 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
460 return 0;
461
462 if (xa_empty(&pf->eswitch.reprs)) {
463 err = ice_eswitch_enable_switchdev(pf);
464 if (err)
465 return err;
466 }
467
468 ice_eswitch_stop_reprs(pf);
469
470 err = repr->ops.add(repr);
471 if (err)
472 goto err_create_repr;
473
474 err = ice_eswitch_setup_repr(pf, repr);
475 if (err)
476 goto err_setup_repr;
477
478 err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
479 if (err)
480 goto err_xa_alloc;
481
482 *id = repr->id;
483
484 ice_eswitch_start_reprs(pf);
485
486 return 0;
487
488err_xa_alloc:
489 ice_eswitch_release_repr(pf, repr);
490err_setup_repr:
491 repr->ops.rem(repr);
492err_create_repr:
493 if (xa_empty(&pf->eswitch.reprs))
494 ice_eswitch_disable_switchdev(pf);
495 ice_eswitch_start_reprs(pf);
496
497 return err;
498}
499
500/**
501 * ice_eswitch_attach_vf - attach VF to a eswitch
502 * @pf: pointer to PF structure
503 * @vf: pointer to VF structure to be attached
504 *
505 * During attaching port representor for VF is created.
506 *
507 * Return: zero on success or an error code on failure.
508 */
509int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
510{
511 struct ice_repr *repr = ice_repr_create_vf(vf);
512 struct devlink *devlink = priv_to_devlink(pf);
513 int err;
514
515 if (IS_ERR(repr))
516 return PTR_ERR(repr);
517
518 devl_lock(devlink);
519 err = ice_eswitch_attach(pf, repr, &vf->repr_id);
520 if (err)
521 ice_repr_destroy(repr);
522 devl_unlock(devlink);
523
524 return err;
525}
526
527/**
528 * ice_eswitch_attach_sf - attach SF to a eswitch
529 * @pf: pointer to PF structure
530 * @sf: pointer to SF structure to be attached
531 *
532 * During attaching port representor for SF is created.
533 *
534 * Return: zero on success or an error code on failure.
535 */
536int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
537{
538 struct ice_repr *repr = ice_repr_create_sf(sf);
539 int err;
540
541 if (IS_ERR(repr))
542 return PTR_ERR(repr);
543
544 err = ice_eswitch_attach(pf, repr, &sf->repr_id);
545 if (err)
546 ice_repr_destroy(repr);
547
548 return err;
549}
550
551static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
552{
553 ice_eswitch_stop_reprs(pf);
554 repr->ops.rem(repr);
555
556 xa_erase(&pf->eswitch.reprs, repr->id);
557
558 if (xa_empty(&pf->eswitch.reprs))
559 ice_eswitch_disable_switchdev(pf);
560
561 ice_eswitch_release_repr(pf, repr);
562 ice_repr_destroy(repr);
563
564 if (xa_empty(&pf->eswitch.reprs)) {
565 struct devlink *devlink = priv_to_devlink(pf);
566
567 /* since all port representors are destroyed, there is
568 * no point in keeping the nodes
569 */
570 ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
571 devl_rate_nodes_destroy(devlink);
572 } else {
573 ice_eswitch_start_reprs(pf);
574 }
575}
576
577/**
578 * ice_eswitch_detach_vf - detach VF from a eswitch
579 * @pf: pointer to PF structure
580 * @vf: pointer to VF structure to be detached
581 */
582void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
583{
584 struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
585 struct devlink *devlink = priv_to_devlink(pf);
586
587 if (!repr)
588 return;
589
590 devl_lock(devlink);
591 ice_eswitch_detach(pf, repr);
592 devl_unlock(devlink);
593}
594
595/**
596 * ice_eswitch_detach_sf - detach SF from a eswitch
597 * @pf: pointer to PF structure
598 * @sf: pointer to SF structure to be detached
599 */
600void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
601{
602 struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
603
604 if (!repr)
605 return;
606
607 ice_eswitch_detach(pf, repr);
608}
609
610/**
611 * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
612 * @rx_ring: ring used to receive the packet
613 * @rx_desc: descriptor used to get src_vsi value
614 *
615 * Get src_vsi value from descriptor and load correct representor. If it isn't
616 * found return rx_ring->netdev.
617 */
618struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
619 union ice_32b_rx_flex_desc *rx_desc)
620{
621 struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
622 struct ice_32b_rx_flex_desc_nic_2 *desc;
623 struct ice_repr *repr;
624
625 desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
626 repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
627 if (!repr)
628 return rx_ring->netdev;
629
630 return repr->netdev;
631}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2019-2021, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_lib.h"
6#include "ice_eswitch.h"
7#include "ice_eswitch_br.h"
8#include "ice_fltr.h"
9#include "ice_repr.h"
10#include "ice_devlink.h"
11#include "ice_tc_lib.h"
12
13/**
14 * ice_eswitch_del_sp_rules - delete adv rules added on PRs
15 * @pf: pointer to the PF struct
16 *
17 * Delete all advanced rules that were used to forward packets with the
18 * device's VSI index to the corresponding eswitch ctrl VSI queue.
19 */
20static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
21{
22 struct ice_repr *repr;
23 unsigned long id;
24
25 xa_for_each(&pf->eswitch.reprs, id, repr) {
26 if (repr->sp_rule.rid)
27 ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule);
28 }
29}
30
31/**
32 * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
33 * @pf: pointer to PF struct
34 * @repr: pointer to the repr struct
35 *
36 * This function adds advanced rule that forwards packets with
37 * device's VSI index to the corresponding eswitch ctrl VSI queue.
38 */
39static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
40{
41 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
42 struct ice_adv_rule_info rule_info = { 0 };
43 struct ice_adv_lkup_elem *list;
44 struct ice_hw *hw = &pf->hw;
45 const u16 lkups_cnt = 1;
46 int err;
47
48 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
49 if (!list)
50 return -ENOMEM;
51
52 ice_rule_add_src_vsi_metadata(list);
53
54 rule_info.sw_act.flag = ICE_FLTR_TX;
55 rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
56 rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
57 rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
58 ctrl_vsi->rxq_map[repr->q_id];
59 rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
60 rule_info.flags_info.act_valid = true;
61 rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
62 rule_info.src_vsi = repr->src_vsi->idx;
63
64 err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
65 &repr->sp_rule);
66 if (err)
67 dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
68 repr->id);
69
70 kfree(list);
71 return err;
72}
73
74static int
75ice_eswitch_add_sp_rules(struct ice_pf *pf)
76{
77 struct ice_repr *repr;
78 unsigned long id;
79 int err;
80
81 xa_for_each(&pf->eswitch.reprs, id, repr) {
82 err = ice_eswitch_add_sp_rule(pf, repr);
83 if (err) {
84 ice_eswitch_del_sp_rules(pf);
85 return err;
86 }
87 }
88
89 return 0;
90}
91
92/**
93 * ice_eswitch_setup_env - configure eswitch HW filters
94 * @pf: pointer to PF struct
95 *
96 * This function adds HW filters configuration specific for switchdev
97 * mode.
98 */
99static int ice_eswitch_setup_env(struct ice_pf *pf)
100{
101 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
102 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
103 struct net_device *netdev = uplink_vsi->netdev;
104 struct ice_vsi_vlan_ops *vlan_ops;
105 bool rule_added = false;
106
107 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
108
109 netif_addr_lock_bh(netdev);
110 __dev_uc_unsync(netdev, NULL);
111 __dev_mc_unsync(netdev, NULL);
112 netif_addr_unlock_bh(netdev);
113
114 if (ice_vsi_add_vlan_zero(uplink_vsi))
115 goto err_def_rx;
116
117 if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
118 if (ice_set_dflt_vsi(uplink_vsi))
119 goto err_def_rx;
120 rule_added = true;
121 }
122
123 vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
124 if (vlan_ops->dis_rx_filtering(uplink_vsi))
125 goto err_dis_rx;
126
127 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
128 goto err_override_uplink;
129
130 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
131 goto err_override_control;
132
133 if (ice_vsi_update_local_lb(uplink_vsi, true))
134 goto err_override_local_lb;
135
136 return 0;
137
138err_override_local_lb:
139 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
140err_override_control:
141 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
142err_override_uplink:
143 vlan_ops->ena_rx_filtering(uplink_vsi);
144err_dis_rx:
145 if (rule_added)
146 ice_clear_dflt_vsi(uplink_vsi);
147err_def_rx:
148 ice_fltr_add_mac_and_broadcast(uplink_vsi,
149 uplink_vsi->port_info->mac.perm_addr,
150 ICE_FWD_TO_VSI);
151 return -ENODEV;
152}
153
154/**
155 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
156 * @eswitch: pointer to eswitch struct
157 *
158 * In eswitch number of allocated Tx/Rx rings is equal.
159 *
160 * This function fills q_vectors structures associated with representor and
161 * move each ring pairs to port representor netdevs. Each port representor
162 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
163 * number of VFs.
164 */
165static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
166{
167 struct ice_vsi *vsi = eswitch->control_vsi;
168 unsigned long repr_id = 0;
169 int q_id;
170
171 ice_for_each_txq(vsi, q_id) {
172 struct ice_q_vector *q_vector;
173 struct ice_tx_ring *tx_ring;
174 struct ice_rx_ring *rx_ring;
175 struct ice_repr *repr;
176
177 repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
178 XA_PRESENT);
179 if (!repr)
180 break;
181
182 repr_id += 1;
183 repr->q_id = q_id;
184 q_vector = repr->q_vector;
185 tx_ring = vsi->tx_rings[q_id];
186 rx_ring = vsi->rx_rings[q_id];
187
188 q_vector->vsi = vsi;
189 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
190
191 q_vector->num_ring_tx = 1;
192 q_vector->tx.tx_ring = tx_ring;
193 tx_ring->q_vector = q_vector;
194 tx_ring->next = NULL;
195 tx_ring->netdev = repr->netdev;
196 /* In switchdev mode, from OS stack perspective, there is only
197 * one queue for given netdev, so it needs to be indexed as 0.
198 */
199 tx_ring->q_index = 0;
200
201 q_vector->num_ring_rx = 1;
202 q_vector->rx.rx_ring = rx_ring;
203 rx_ring->q_vector = q_vector;
204 rx_ring->next = NULL;
205 rx_ring->netdev = repr->netdev;
206 }
207}
208
209/**
210 * ice_eswitch_release_repr - clear PR VSI configuration
211 * @pf: poiner to PF struct
212 * @repr: pointer to PR
213 */
214static void
215ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
216{
217 struct ice_vsi *vsi = repr->src_vsi;
218
219 /* Skip representors that aren't configured */
220 if (!repr->dst)
221 return;
222
223 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
224 metadata_dst_free(repr->dst);
225 repr->dst = NULL;
226 ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
227 ICE_FWD_TO_VSI);
228
229 netif_napi_del(&repr->q_vector->napi);
230}
231
232/**
233 * ice_eswitch_setup_repr - configure PR to run in switchdev mode
234 * @pf: pointer to PF struct
235 * @repr: pointer to PR struct
236 */
237static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
238{
239 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
240 struct ice_vsi *vsi = repr->src_vsi;
241 struct metadata_dst *dst;
242
243 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
244 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
245 GFP_KERNEL);
246 if (!repr->dst)
247 goto err_add_mac_fltr;
248
249 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
250 goto err_dst_free;
251
252 if (ice_vsi_add_vlan_zero(vsi))
253 goto err_update_security;
254
255 netif_napi_add(repr->netdev, &repr->q_vector->napi,
256 ice_napi_poll);
257
258 netif_keep_dst(repr->netdev);
259
260 dst = repr->dst;
261 dst->u.port_info.port_id = vsi->vsi_num;
262 dst->u.port_info.lower_dev = repr->netdev;
263 ice_repr_set_traffic_vsi(repr, ctrl_vsi);
264
265 return 0;
266
267err_update_security:
268 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
269err_dst_free:
270 metadata_dst_free(repr->dst);
271 repr->dst = NULL;
272err_add_mac_fltr:
273 ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
274
275 return -ENODEV;
276}
277
278/**
279 * ice_eswitch_update_repr - reconfigure port representor
280 * @repr_id: representor ID
281 * @vsi: VSI for which port representor is configured
282 */
283void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
284{
285 struct ice_pf *pf = vsi->back;
286 struct ice_repr *repr;
287 int ret;
288
289 if (!ice_is_switchdev_running(pf))
290 return;
291
292 repr = xa_load(&pf->eswitch.reprs, repr_id);
293 if (!repr)
294 return;
295
296 repr->src_vsi = vsi;
297 repr->dst->u.port_info.port_id = vsi->vsi_num;
298
299 if (repr->br_port)
300 repr->br_port->vsi = vsi;
301
302 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
303 if (ret) {
304 ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
305 ICE_FWD_TO_VSI);
306 dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
307 repr->id);
308 }
309}
310
311/**
312 * ice_eswitch_port_start_xmit - callback for packets transmit
313 * @skb: send buffer
314 * @netdev: network interface device structure
315 *
316 * Returns NETDEV_TX_OK if sent, else an error code
317 */
318netdev_tx_t
319ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
320{
321 struct ice_netdev_priv *np;
322 struct ice_repr *repr;
323 struct ice_vsi *vsi;
324
325 np = netdev_priv(netdev);
326 vsi = np->vsi;
327
328 if (!vsi || !ice_is_switchdev_running(vsi->back))
329 return NETDEV_TX_BUSY;
330
331 if (ice_is_reset_in_progress(vsi->back->state) ||
332 test_bit(ICE_VF_DIS, vsi->back->state))
333 return NETDEV_TX_BUSY;
334
335 repr = ice_netdev_to_repr(netdev);
336 skb_dst_drop(skb);
337 dst_hold((struct dst_entry *)repr->dst);
338 skb_dst_set(skb, (struct dst_entry *)repr->dst);
339 skb->queue_mapping = repr->q_id;
340
341 return ice_start_xmit(skb, netdev);
342}
343
344/**
345 * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
346 * @skb: pointer to send buffer
347 * @off: pointer to offload struct
348 */
349void
350ice_eswitch_set_target_vsi(struct sk_buff *skb,
351 struct ice_tx_offload_params *off)
352{
353 struct metadata_dst *dst = skb_metadata_dst(skb);
354 u64 cd_cmd, dst_vsi;
355
356 if (!dst) {
357 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
358 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
359 } else {
360 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
361 dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
362 dst->u.port_info.port_id);
363 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
364 }
365}
366
367/**
368 * ice_eswitch_release_env - clear eswitch HW filters
369 * @pf: pointer to PF struct
370 *
371 * This function removes HW filters configuration specific for switchdev
372 * mode and restores default legacy mode settings.
373 */
374static void ice_eswitch_release_env(struct ice_pf *pf)
375{
376 struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
377 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
378 struct ice_vsi_vlan_ops *vlan_ops;
379
380 vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
381
382 ice_vsi_update_local_lb(uplink_vsi, false);
383 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
384 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
385 vlan_ops->ena_rx_filtering(uplink_vsi);
386 ice_clear_dflt_vsi(uplink_vsi);
387 ice_fltr_add_mac_and_broadcast(uplink_vsi,
388 uplink_vsi->port_info->mac.perm_addr,
389 ICE_FWD_TO_VSI);
390}
391
392/**
393 * ice_eswitch_vsi_setup - configure eswitch control VSI
394 * @pf: pointer to PF structure
395 * @pi: pointer to port_info structure
396 */
397static struct ice_vsi *
398ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
399{
400 struct ice_vsi_cfg_params params = {};
401
402 params.type = ICE_VSI_SWITCHDEV_CTRL;
403 params.pi = pi;
404 params.flags = ICE_VSI_FLAG_INIT;
405
406 return ice_vsi_setup(pf, ¶ms);
407}
408
409/**
410 * ice_eswitch_napi_enable - enable NAPI for all port representors
411 * @reprs: xarray of reprs
412 */
413static void ice_eswitch_napi_enable(struct xarray *reprs)
414{
415 struct ice_repr *repr;
416 unsigned long id;
417
418 xa_for_each(reprs, id, repr)
419 napi_enable(&repr->q_vector->napi);
420}
421
422/**
423 * ice_eswitch_napi_disable - disable NAPI for all port representors
424 * @reprs: xarray of reprs
425 */
426static void ice_eswitch_napi_disable(struct xarray *reprs)
427{
428 struct ice_repr *repr;
429 unsigned long id;
430
431 xa_for_each(reprs, id, repr)
432 napi_disable(&repr->q_vector->napi);
433}
434
435/**
436 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
437 * @pf: pointer to PF structure
438 */
439static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
440{
441 struct ice_vsi *ctrl_vsi, *uplink_vsi;
442
443 uplink_vsi = ice_get_main_vsi(pf);
444 if (!uplink_vsi)
445 return -ENODEV;
446
447 if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
448 dev_err(ice_pf_to_dev(pf),
449 "Uplink port cannot be a bridge port\n");
450 return -EINVAL;
451 }
452
453 pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
454 if (!pf->eswitch.control_vsi)
455 return -ENODEV;
456
457 ctrl_vsi = pf->eswitch.control_vsi;
458 /* cp VSI is createad with 1 queue as default */
459 pf->eswitch.qs.value = 1;
460 pf->eswitch.uplink_vsi = uplink_vsi;
461
462 if (ice_eswitch_setup_env(pf))
463 goto err_vsi;
464
465 if (ice_eswitch_br_offloads_init(pf))
466 goto err_br_offloads;
467
468 pf->eswitch.is_running = true;
469
470 return 0;
471
472err_br_offloads:
473 ice_eswitch_release_env(pf);
474err_vsi:
475 ice_vsi_release(ctrl_vsi);
476 return -ENODEV;
477}
478
479/**
480 * ice_eswitch_disable_switchdev - disable eswitch resources
481 * @pf: pointer to PF structure
482 */
483static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
484{
485 struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
486
487 ice_eswitch_br_offloads_deinit(pf);
488 ice_eswitch_release_env(pf);
489 ice_vsi_release(ctrl_vsi);
490
491 pf->eswitch.is_running = false;
492 pf->eswitch.qs.is_reaching = false;
493}
494
495/**
496 * ice_eswitch_mode_set - set new eswitch mode
497 * @devlink: pointer to devlink structure
498 * @mode: eswitch mode to switch to
499 * @extack: pointer to extack structure
500 */
501int
502ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
503 struct netlink_ext_ack *extack)
504{
505 struct ice_pf *pf = devlink_priv(devlink);
506
507 if (pf->eswitch_mode == mode)
508 return 0;
509
510 if (ice_has_vfs(pf)) {
511 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
512 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
513 return -EOPNOTSUPP;
514 }
515
516 switch (mode) {
517 case DEVLINK_ESWITCH_MODE_LEGACY:
518 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
519 pf->hw.pf_id);
520 xa_destroy(&pf->eswitch.reprs);
521 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
522 break;
523 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
524 {
525 if (ice_is_adq_active(pf)) {
526 dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
527 NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
528 return -EOPNOTSUPP;
529 }
530
531 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
532 pf->hw.pf_id);
533 xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC);
534 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
535 break;
536 }
537 default:
538 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
539 return -EINVAL;
540 }
541
542 pf->eswitch_mode = mode;
543 return 0;
544}
545
546/**
547 * ice_eswitch_mode_get - get current eswitch mode
548 * @devlink: pointer to devlink structure
549 * @mode: output parameter for current eswitch mode
550 */
551int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
552{
553 struct ice_pf *pf = devlink_priv(devlink);
554
555 *mode = pf->eswitch_mode;
556 return 0;
557}
558
559/**
560 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
561 * @pf: pointer to PF structure
562 *
563 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
564 * false otherwise.
565 */
566bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
567{
568 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
569}
570
571/**
572 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
573 * @pf: pointer to PF structure
574 */
575static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
576{
577 struct ice_repr *repr;
578 unsigned long id;
579
580 if (test_bit(ICE_DOWN, pf->state))
581 return;
582
583 xa_for_each(&pf->eswitch.reprs, id, repr)
584 ice_repr_start_tx_queues(repr);
585}
586
587/**
588 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
589 * @pf: pointer to PF structure
590 */
591void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
592{
593 struct ice_repr *repr;
594 unsigned long id;
595
596 if (test_bit(ICE_DOWN, pf->state))
597 return;
598
599 xa_for_each(&pf->eswitch.reprs, id, repr)
600 ice_repr_stop_tx_queues(repr);
601}
602
603static void ice_eswitch_stop_reprs(struct ice_pf *pf)
604{
605 ice_eswitch_del_sp_rules(pf);
606 ice_eswitch_stop_all_tx_queues(pf);
607 ice_eswitch_napi_disable(&pf->eswitch.reprs);
608}
609
610static void ice_eswitch_start_reprs(struct ice_pf *pf)
611{
612 ice_eswitch_napi_enable(&pf->eswitch.reprs);
613 ice_eswitch_start_all_tx_queues(pf);
614 ice_eswitch_add_sp_rules(pf);
615}
616
617static void
618ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
619{
620 struct ice_vsi *cp = eswitch->control_vsi;
621 int queues = 0;
622
623 if (eswitch->qs.is_reaching) {
624 if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
625 queues = eswitch->qs.to_reach;
626 eswitch->qs.is_reaching = false;
627 } else {
628 queues = 0;
629 }
630 } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
631 change < 0) {
632 queues = cp->alloc_txq + change;
633 }
634
635 if (queues) {
636 cp->req_txq = queues;
637 cp->req_rxq = queues;
638 ice_vsi_close(cp);
639 ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
640 ice_vsi_open(cp);
641 } else if (!change) {
642 /* change == 0 means that VSI wasn't open, open it here */
643 ice_vsi_open(cp);
644 }
645
646 eswitch->qs.value += change;
647 ice_eswitch_remap_rings_to_vectors(eswitch);
648}
649
650int
651ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
652{
653 struct ice_repr *repr;
654 int change = 1;
655 int err;
656
657 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
658 return 0;
659
660 if (xa_empty(&pf->eswitch.reprs)) {
661 err = ice_eswitch_enable_switchdev(pf);
662 if (err)
663 return err;
664 /* Control plane VSI is created with 1 queue as default */
665 pf->eswitch.qs.to_reach -= 1;
666 change = 0;
667 }
668
669 ice_eswitch_stop_reprs(pf);
670
671 repr = ice_repr_add_vf(vf);
672 if (IS_ERR(repr)) {
673 err = PTR_ERR(repr);
674 goto err_create_repr;
675 }
676
677 err = ice_eswitch_setup_repr(pf, repr);
678 if (err)
679 goto err_setup_repr;
680
681 err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
682 XA_LIMIT(1, INT_MAX), GFP_KERNEL);
683 if (err)
684 goto err_xa_alloc;
685
686 vf->repr_id = repr->id;
687
688 ice_eswitch_cp_change_queues(&pf->eswitch, change);
689 ice_eswitch_start_reprs(pf);
690
691 return 0;
692
693err_xa_alloc:
694 ice_eswitch_release_repr(pf, repr);
695err_setup_repr:
696 ice_repr_rem_vf(repr);
697err_create_repr:
698 if (xa_empty(&pf->eswitch.reprs))
699 ice_eswitch_disable_switchdev(pf);
700 ice_eswitch_start_reprs(pf);
701
702 return err;
703}
704
705void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
706{
707 struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
708 struct devlink *devlink = priv_to_devlink(pf);
709
710 if (!repr)
711 return;
712
713 ice_eswitch_stop_reprs(pf);
714 xa_erase(&pf->eswitch.reprs, repr->id);
715
716 if (xa_empty(&pf->eswitch.reprs))
717 ice_eswitch_disable_switchdev(pf);
718 else
719 ice_eswitch_cp_change_queues(&pf->eswitch, -1);
720
721 ice_eswitch_release_repr(pf, repr);
722 ice_repr_rem_vf(repr);
723
724 if (xa_empty(&pf->eswitch.reprs)) {
725 /* since all port representors are destroyed, there is
726 * no point in keeping the nodes
727 */
728 ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
729 devl_lock(devlink);
730 devl_rate_nodes_destroy(devlink);
731 devl_unlock(devlink);
732 } else {
733 ice_eswitch_start_reprs(pf);
734 }
735}
736
737/**
738 * ice_eswitch_rebuild - rebuild eswitch
739 * @pf: pointer to PF structure
740 */
741int ice_eswitch_rebuild(struct ice_pf *pf)
742{
743 struct ice_repr *repr;
744 unsigned long id;
745 int err;
746
747 if (!ice_is_switchdev_running(pf))
748 return 0;
749
750 err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
751 if (err)
752 return err;
753
754 xa_for_each(&pf->eswitch.reprs, id, repr)
755 ice_eswitch_detach(pf, repr->vf);
756
757 return 0;
758}
759
760/**
761 * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
762 * @pf: pointer to PF structure
763 * @change: how many more (or less) queues is needed
764 *
765 * Remember to call ice_eswitch_attach/detach() the "change" times.
766 */
767void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
768{
769 if (pf->eswitch.qs.value + change < 0)
770 return;
771
772 pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
773 pf->eswitch.qs.is_reaching = true;
774}