Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2019-2021, Intel Corporation. */
  3
  4#include "ice.h"
  5#include "ice_lib.h"
  6#include "ice_eswitch.h"
  7#include "ice_eswitch_br.h"
  8#include "ice_fltr.h"
  9#include "ice_repr.h"
 10#include "devlink/devlink.h"
 11#include "ice_tc_lib.h"
 12
 13/**
 14 * ice_eswitch_setup_env - configure eswitch HW filters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15 * @pf: pointer to PF struct
 16 *
 17 * This function adds HW filters configuration specific for switchdev
 18 * mode.
 19 */
 20static int ice_eswitch_setup_env(struct ice_pf *pf)
 21{
 22	struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
 23	struct net_device *netdev = uplink_vsi->netdev;
 24	bool if_running = netif_running(netdev);
 25	struct ice_vsi_vlan_ops *vlan_ops;
 
 26
 27	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
 28		if (ice_down(uplink_vsi))
 29			return -ENODEV;
 30
 31	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
 32
 33	netif_addr_lock_bh(netdev);
 34	__dev_uc_unsync(netdev, NULL);
 35	__dev_mc_unsync(netdev, NULL);
 36	netif_addr_unlock_bh(netdev);
 37
 38	if (ice_vsi_add_vlan_zero(uplink_vsi))
 39		goto err_vlan_zero;
 40
 41	if (ice_set_dflt_vsi(uplink_vsi))
 42		goto err_def_rx;
 43
 44	if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
 45			     ICE_FLTR_TX))
 46		goto err_def_tx;
 47
 48	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
 49	if (vlan_ops->dis_rx_filtering(uplink_vsi))
 50		goto err_vlan_filtering;
 51
 52	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
 53		goto err_override_uplink;
 54
 55	if (ice_vsi_update_local_lb(uplink_vsi, true))
 56		goto err_override_local_lb;
 57
 58	if (if_running && ice_up(uplink_vsi))
 59		goto err_up;
 60
 61	return 0;
 62
 63err_up:
 64	ice_vsi_update_local_lb(uplink_vsi, false);
 65err_override_local_lb:
 66	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
 67err_override_uplink:
 68	vlan_ops->ena_rx_filtering(uplink_vsi);
 69err_vlan_filtering:
 70	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
 71			 ICE_FLTR_TX);
 72err_def_tx:
 73	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
 74			 ICE_FLTR_RX);
 75err_def_rx:
 76	ice_vsi_del_vlan_zero(uplink_vsi);
 77err_vlan_zero:
 78	ice_fltr_add_mac_and_broadcast(uplink_vsi,
 79				       uplink_vsi->port_info->mac.perm_addr,
 80				       ICE_FWD_TO_VSI);
 81	if (if_running)
 82		ice_up(uplink_vsi);
 83
 84	return -ENODEV;
 85}
 86
 87/**
 88 * ice_eswitch_release_repr - clear PR VSI configuration
 89 * @pf: poiner to PF struct
 90 * @repr: pointer to PR
 91 */
 92static void
 93ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
 94{
 95	struct ice_vsi *vsi = repr->src_vsi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 96
 97	/* Skip representors that aren't configured */
 98	if (!repr->dst)
 99		return;
 
 
100
101	ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
102	metadata_dst_free(repr->dst);
103	repr->dst = NULL;
104	ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
105				       ICE_FWD_TO_VSI);
106}
107
108/**
109 * ice_eswitch_setup_repr - configure PR to run in switchdev mode
110 * @pf: pointer to PF struct
111 * @repr: pointer to PR struct
112 */
113static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
 
114{
115	struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
116	struct ice_vsi *vsi = repr->src_vsi;
117	struct metadata_dst *dst;
118
119	repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
120				       GFP_KERNEL);
121	if (!repr->dst)
122		return -ENOMEM;
123
124	netif_keep_dst(uplink_vsi->netdev);
 
125
126	dst = repr->dst;
127	dst->u.port_info.port_id = vsi->vsi_num;
128	dst->u.port_info.lower_dev = uplink_vsi->netdev;
129
130	return 0;
 
 
 
 
 
 
 
131}
132
133/**
134 * ice_eswitch_cfg_vsi - configure VSI to work in slow-path
135 * @vsi: VSI structure of representee
136 * @mac: representee MAC
137 *
138 * Return: 0 on success, non-zero on error.
139 */
140int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
141{
142	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
144	ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
 
 
 
 
 
 
 
145
146	err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
147	if (err)
148		goto err_update_security;
 
 
 
 
 
 
149
150	err = ice_vsi_add_vlan_zero(vsi);
151	if (err)
152		goto err_vlan_zero;
153
154	return 0;
 
155
156err_vlan_zero:
157	ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
158err_update_security:
159	ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
160
161	return err;
162}
 
 
 
 
 
 
 
 
163
164/**
165 * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
166 * @vsi: VSI structure of representee
167 * @mac: representee MAC
168 */
169void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
170{
171	ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
172	ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
173}
174
175/**
176 * ice_eswitch_update_repr - reconfigure port representor
177 * @repr_id: representor ID
178 * @vsi: VSI for which port representor is configured
179 */
180void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
181{
182	struct ice_pf *pf = vsi->back;
183	struct ice_repr *repr;
184	int err;
 
185
186	if (!ice_is_switchdev_running(pf))
187		return;
188
189	repr = xa_load(&pf->eswitch.reprs, *repr_id);
190	if (!repr)
191		return;
192
193	repr->src_vsi = vsi;
194	repr->dst->u.port_info.port_id = vsi->vsi_num;
195
196	if (repr->br_port)
197		repr->br_port->vsi = vsi;
198
199	err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
200	if (err)
201		dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
202			repr->id);
203
204	/* The VSI number is different, reload the PR with new id */
205	if (repr->id != vsi->vsi_num) {
206		xa_erase(&pf->eswitch.reprs, repr->id);
207		repr->id = vsi->vsi_num;
208		if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
209			dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
210				repr->id);
211		*repr_id = repr->id;
212	}
213}
214
215/**
216 * ice_eswitch_port_start_xmit - callback for packets transmit
217 * @skb: send buffer
218 * @netdev: network interface device structure
219 *
220 * Returns NETDEV_TX_OK if sent, else an error code
221 */
222netdev_tx_t
223ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
224{
225	struct ice_repr *repr = ice_netdev_to_repr(netdev);
226	unsigned int len = skb->len;
227	int ret;
 
 
 
228
 
 
 
 
 
229	skb_dst_drop(skb);
230	dst_hold((struct dst_entry *)repr->dst);
231	skb_dst_set(skb, (struct dst_entry *)repr->dst);
232	skb->dev = repr->dst->u.port_info.lower_dev;
233
234	ret = dev_queue_xmit(skb);
235	ice_repr_inc_tx_stats(repr, len, ret);
236
237	return ret;
238}
239
240/**
241 * ice_eswitch_set_target_vsi - set eswitch context in Tx context descriptor
242 * @skb: pointer to send buffer
243 * @off: pointer to offload struct
244 */
245void
246ice_eswitch_set_target_vsi(struct sk_buff *skb,
247			   struct ice_tx_offload_params *off)
248{
249	struct metadata_dst *dst = skb_metadata_dst(skb);
250	u64 cd_cmd, dst_vsi;
251
252	if (!dst) {
253		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
254		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
255	} else {
256		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
257		dst_vsi = FIELD_PREP(ICE_TXD_CTX_QW1_VSI_M,
258				     dst->u.port_info.port_id);
259		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
260	}
261}
262
263/**
264 * ice_eswitch_release_env - clear eswitch HW filters
265 * @pf: pointer to PF struct
266 *
267 * This function removes HW filters configuration specific for switchdev
268 * mode and restores default legacy mode settings.
269 */
270static void ice_eswitch_release_env(struct ice_pf *pf)
271{
272	struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
273	struct ice_vsi_vlan_ops *vlan_ops;
274
275	vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
276
277	ice_vsi_update_local_lb(uplink_vsi, false);
278	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
279	vlan_ops->ena_rx_filtering(uplink_vsi);
280	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
281			 ICE_FLTR_TX);
282	ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
283			 ICE_FLTR_RX);
284	ice_fltr_add_mac_and_broadcast(uplink_vsi,
285				       uplink_vsi->port_info->mac.perm_addr,
286				       ICE_FWD_TO_VSI);
287}
288
289/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
291 * @pf: pointer to PF structure
292 */
293static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
294{
295	struct ice_vsi *uplink_vsi;
296
297	uplink_vsi = ice_get_main_vsi(pf);
298	if (!uplink_vsi)
299		return -ENODEV;
300
301	if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
302		dev_err(ice_pf_to_dev(pf),
303			"Uplink port cannot be a bridge port\n");
304		return -EINVAL;
305	}
306
307	pf->eswitch.uplink_vsi = uplink_vsi;
308
309	if (ice_eswitch_setup_env(pf))
310		return -ENODEV;
 
 
 
311
312	if (ice_eswitch_br_offloads_init(pf))
313		goto err_br_offloads;
314
315	pf->eswitch.is_running = true;
 
 
 
 
 
316
317	return 0;
318
319err_br_offloads:
 
 
320	ice_eswitch_release_env(pf);
 
 
321	return -ENODEV;
322}
323
324/**
325 * ice_eswitch_disable_switchdev - disable eswitch resources
326 * @pf: pointer to PF structure
327 */
328static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
329{
330	ice_eswitch_br_offloads_deinit(pf);
331	ice_eswitch_release_env(pf);
332
333	pf->eswitch.is_running = false;
 
 
 
 
 
334}
335
336/**
337 * ice_eswitch_mode_set - set new eswitch mode
338 * @devlink: pointer to devlink structure
339 * @mode: eswitch mode to switch to
340 * @extack: pointer to extack structure
341 */
342int
343ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
344		     struct netlink_ext_ack *extack)
345{
346	struct ice_pf *pf = devlink_priv(devlink);
347
348	if (pf->eswitch_mode == mode)
349		return 0;
350
351	if (ice_has_vfs(pf)) {
352		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
353		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
354		return -EOPNOTSUPP;
355	}
356
357	switch (mode) {
358	case DEVLINK_ESWITCH_MODE_LEGACY:
359		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
360			 pf->hw.pf_id);
361		xa_destroy(&pf->eswitch.reprs);
362		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
363		break;
364	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
365	{
366		if (ice_is_adq_active(pf)) {
367			dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
368			NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
369			return -EOPNOTSUPP;
370		}
371
372		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
373			 pf->hw.pf_id);
374		xa_init(&pf->eswitch.reprs);
375		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
376		break;
377	}
378	default:
379		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
380		return -EINVAL;
381	}
382
383	pf->eswitch_mode = mode;
384	return 0;
385}
386
387/**
388 * ice_eswitch_mode_get - get current eswitch mode
389 * @devlink: pointer to devlink structure
390 * @mode: output parameter for current eswitch mode
391 */
392int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
393{
394	struct ice_pf *pf = devlink_priv(devlink);
395
396	*mode = pf->eswitch_mode;
397	return 0;
398}
399
400/**
401 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
402 * @pf: pointer to PF structure
403 *
404 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
405 * false otherwise.
406 */
407bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
408{
409	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
410}
411
412/**
413 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
414 * @pf: pointer to PF structure
415 */
416static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
417{
418	struct ice_repr *repr;
419	unsigned long id;
420
421	if (test_bit(ICE_DOWN, pf->state))
422		return;
423
424	xa_for_each(&pf->eswitch.reprs, id, repr)
425		ice_repr_start_tx_queues(repr);
426}
427
428/**
429 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
430 * @pf: pointer to PF structure
431 */
432void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
433{
434	struct ice_repr *repr;
435	unsigned long id;
436
437	if (test_bit(ICE_DOWN, pf->state))
438		return;
439
440	xa_for_each(&pf->eswitch.reprs, id, repr)
441		ice_repr_stop_tx_queues(repr);
442}
443
444static void ice_eswitch_stop_reprs(struct ice_pf *pf)
445{
446	ice_eswitch_stop_all_tx_queues(pf);
447}
448
449static void ice_eswitch_start_reprs(struct ice_pf *pf)
450{
451	ice_eswitch_start_all_tx_queues(pf);
452}
453
454static int
455ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
456{
457	int err;
458
459	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
460		return 0;
461
462	if (xa_empty(&pf->eswitch.reprs)) {
463		err = ice_eswitch_enable_switchdev(pf);
464		if (err)
465			return err;
466	}
467
468	ice_eswitch_stop_reprs(pf);
469
470	err = repr->ops.add(repr);
471	if (err)
472		goto err_create_repr;
473
474	err = ice_eswitch_setup_repr(pf, repr);
475	if (err)
476		goto err_setup_repr;
477
478	err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
479	if (err)
480		goto err_xa_alloc;
481
482	*id = repr->id;
483
484	ice_eswitch_start_reprs(pf);
485
 
486	return 0;
487
488err_xa_alloc:
489	ice_eswitch_release_repr(pf, repr);
490err_setup_repr:
491	repr->ops.rem(repr);
492err_create_repr:
493	if (xa_empty(&pf->eswitch.reprs))
494		ice_eswitch_disable_switchdev(pf);
495	ice_eswitch_start_reprs(pf);
496
497	return err;
498}
499
500/**
501 * ice_eswitch_attach_vf - attach VF to a eswitch
502 * @pf: pointer to PF structure
503 * @vf: pointer to VF structure to be attached
504 *
505 * During attaching port representor for VF is created.
506 *
507 * Return: zero on success or an error code on failure.
508 */
509int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
510{
511	struct ice_repr *repr = ice_repr_create_vf(vf);
512	struct devlink *devlink = priv_to_devlink(pf);
513	int err;
514
515	if (IS_ERR(repr))
516		return PTR_ERR(repr);
517
518	devl_lock(devlink);
519	err = ice_eswitch_attach(pf, repr, &vf->repr_id);
520	if (err)
521		ice_repr_destroy(repr);
522	devl_unlock(devlink);
523
524	return err;
 
 
 
525}
526
527/**
528 * ice_eswitch_attach_sf - attach SF to a eswitch
529 * @pf: pointer to PF structure
530 * @sf: pointer to SF structure to be attached
531 *
532 * During attaching port representor for SF is created.
533 *
534 * Return: zero on success or an error code on failure.
535 */
536int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
537{
538	struct ice_repr *repr = ice_repr_create_sf(sf);
539	int err;
540
541	if (IS_ERR(repr))
542		return PTR_ERR(repr);
543
544	err = ice_eswitch_attach(pf, repr, &sf->repr_id);
545	if (err)
546		ice_repr_destroy(repr);
547
548	return err;
549}
550
551static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
552{
553	ice_eswitch_stop_reprs(pf);
554	repr->ops.rem(repr);
555
556	xa_erase(&pf->eswitch.reprs, repr->id);
557
558	if (xa_empty(&pf->eswitch.reprs))
559		ice_eswitch_disable_switchdev(pf);
560
561	ice_eswitch_release_repr(pf, repr);
562	ice_repr_destroy(repr);
563
564	if (xa_empty(&pf->eswitch.reprs)) {
565		struct devlink *devlink = priv_to_devlink(pf);
566
567		/* since all port representors are destroyed, there is
568		 * no point in keeping the nodes
569		 */
570		ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
571		devl_rate_nodes_destroy(devlink);
572	} else {
573		ice_eswitch_start_reprs(pf);
574	}
575}
576
577/**
578 * ice_eswitch_detach_vf - detach VF from a eswitch
579 * @pf: pointer to PF structure
580 * @vf: pointer to VF structure to be detached
581 */
582void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
583{
584	struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
585	struct devlink *devlink = priv_to_devlink(pf);
586
587	if (!repr)
588		return;
589
590	devl_lock(devlink);
591	ice_eswitch_detach(pf, repr);
592	devl_unlock(devlink);
593}
594
595/**
596 * ice_eswitch_detach_sf - detach SF from a eswitch
597 * @pf: pointer to PF structure
598 * @sf: pointer to SF structure to be detached
599 */
600void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
601{
602	struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
603
604	if (!repr)
605		return;
606
607	ice_eswitch_detach(pf, repr);
608}
609
610/**
611 * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
612 * @rx_ring: ring used to receive the packet
613 * @rx_desc: descriptor used to get src_vsi value
614 *
615 * Get src_vsi value from descriptor and load correct representor. If it isn't
616 * found return rx_ring->netdev.
617 */
618struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
619					  union ice_32b_rx_flex_desc *rx_desc)
620{
621	struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
622	struct ice_32b_rx_flex_desc_nic_2 *desc;
623	struct ice_repr *repr;
624
625	desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
626	repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
627	if (!repr)
628		return rx_ring->netdev;
629
630	return repr->netdev;
631}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2019-2021, Intel Corporation. */
  3
  4#include "ice.h"
  5#include "ice_lib.h"
  6#include "ice_eswitch.h"
 
  7#include "ice_fltr.h"
  8#include "ice_repr.h"
  9#include "ice_devlink.h"
 10#include "ice_tc_lib.h"
 11
 12/**
 13 * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
 14 * @pf: pointer to PF struct
 15 * @vf: pointer to VF struct
 16 * @mac: VF's MAC address
 17 *
 18 * This function adds advanced rule that forwards packets with
 19 * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
 20 */
 21int
 22ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
 23{
 24	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
 25	struct ice_adv_rule_info rule_info = { 0 };
 26	struct ice_adv_lkup_elem *list;
 27	struct ice_hw *hw = &pf->hw;
 28	const u16 lkups_cnt = 1;
 29	int err;
 30
 31	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
 32	if (!list)
 33		return -ENOMEM;
 34
 35	list[0].type = ICE_MAC_OFOS;
 36	ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
 37	eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
 38
 39	rule_info.sw_act.flag |= ICE_FLTR_TX;
 40	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
 41	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
 42	rule_info.rx = false;
 43	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
 44				       ctrl_vsi->rxq_map[vf->vf_id];
 45	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
 46	rule_info.flags_info.act_valid = true;
 47	rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
 48
 49	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
 50			       vf->repr->mac_rule);
 51	if (err)
 52		dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
 53			vf->vf_id);
 54	else
 55		vf->repr->rule_added = true;
 56
 57	kfree(list);
 58	return err;
 59}
 60
 61/**
 62 * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
 63 * @vf: pointer to vF struct
 64 *
 65 * This function replays VF's MAC rule after reset.
 66 */
 67void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
 68{
 69	int err;
 70
 71	if (!ice_is_switchdev_running(vf->pf))
 72		return;
 73
 74	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
 75		err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
 76						  vf->hw_lan_addr.addr);
 77		if (err) {
 78			dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
 79				vf->hw_lan_addr.addr, vf->vf_id, err);
 80			return;
 81		}
 82		vf->num_mac++;
 83
 84		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
 85	}
 86}
 87
 88/**
 89 * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
 90 * @vf: pointer to the VF struct
 91 *
 92 * Delete the advanced rule that was used to forward packets with the VF's MAC
 93 * address (src MAC) to the corresponding switchdev ctrl VSI queue.
 94 */
 95void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
 96{
 97	if (!ice_is_switchdev_running(vf->pf))
 98		return;
 99
100	if (!vf->repr->rule_added)
101		return;
102
103	ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
104	vf->repr->rule_added = false;
105}
106
107/**
108 * ice_eswitch_setup_env - configure switchdev HW filters
109 * @pf: pointer to PF struct
110 *
111 * This function adds HW filters configuration specific for switchdev
112 * mode.
113 */
114static int ice_eswitch_setup_env(struct ice_pf *pf)
115{
116	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
117	struct net_device *uplink_netdev = uplink_vsi->netdev;
118	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
119	struct ice_vsi_vlan_ops *vlan_ops;
120	bool rule_added = false;
121
122	vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
123	if (vlan_ops->dis_stripping(ctrl_vsi))
124		return -ENODEV;
125
126	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
127
128	netif_addr_lock_bh(uplink_netdev);
129	__dev_uc_unsync(uplink_netdev, NULL);
130	__dev_mc_unsync(uplink_netdev, NULL);
131	netif_addr_unlock_bh(uplink_netdev);
132
133	if (ice_vsi_add_vlan_zero(uplink_vsi))
 
 
 
134		goto err_def_rx;
135
136	if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
137		if (ice_set_dflt_vsi(uplink_vsi))
138			goto err_def_rx;
139		rule_added = true;
140	}
 
 
141
142	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
143		goto err_override_uplink;
144
145	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
146		goto err_override_control;
 
 
 
147
148	return 0;
149
150err_override_control:
 
 
151	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
152err_override_uplink:
153	if (rule_added)
154		ice_clear_dflt_vsi(uplink_vsi);
 
 
 
 
 
155err_def_rx:
 
 
156	ice_fltr_add_mac_and_broadcast(uplink_vsi,
157				       uplink_vsi->port_info->mac.perm_addr,
158				       ICE_FWD_TO_VSI);
 
 
 
159	return -ENODEV;
160}
161
162/**
163 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
164 * @pf: pointer to PF struct
165 *
166 * In switchdev number of allocated Tx/Rx rings is equal.
167 *
168 * This function fills q_vectors structures associated with representor and
169 * move each ring pairs to port representor netdevs. Each port representor
170 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
171 * number of VFs.
172 */
173static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
174{
175	struct ice_vsi *vsi = pf->switchdev.control_vsi;
176	int q_id;
177
178	ice_for_each_txq(vsi, q_id) {
179		struct ice_q_vector *q_vector;
180		struct ice_tx_ring *tx_ring;
181		struct ice_rx_ring *rx_ring;
182		struct ice_repr *repr;
183		struct ice_vf *vf;
184
185		vf = ice_get_vf_by_id(pf, q_id);
186		if (WARN_ON(!vf))
187			continue;
188
189		repr = vf->repr;
190		q_vector = repr->q_vector;
191		tx_ring = vsi->tx_rings[q_id];
192		rx_ring = vsi->rx_rings[q_id];
193
194		q_vector->vsi = vsi;
195		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
196
197		q_vector->num_ring_tx = 1;
198		q_vector->tx.tx_ring = tx_ring;
199		tx_ring->q_vector = q_vector;
200		tx_ring->next = NULL;
201		tx_ring->netdev = repr->netdev;
202		/* In switchdev mode, from OS stack perspective, there is only
203		 * one queue for given netdev, so it needs to be indexed as 0.
204		 */
205		tx_ring->q_index = 0;
206
207		q_vector->num_ring_rx = 1;
208		q_vector->rx.rx_ring = rx_ring;
209		rx_ring->q_vector = q_vector;
210		rx_ring->next = NULL;
211		rx_ring->netdev = repr->netdev;
212
213		ice_put_vf(vf);
214	}
 
 
 
215}
216
217/**
218 * ice_eswitch_release_reprs - clear PR VSIs configuration
219 * @pf: poiner to PF struct
220 * @ctrl_vsi: pointer to switchdev control VSI
221 */
222static void
223ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
224{
225	struct ice_vf *vf;
226	unsigned int bkt;
 
227
228	lockdep_assert_held(&pf->vfs.table_lock);
 
 
 
229
230	ice_for_each_vf(pf, bkt, vf) {
231		struct ice_vsi *vsi = vf->repr->src_vsi;
232
233		/* Skip VFs that aren't configured */
234		if (!vf->repr->dst)
235			continue;
236
237		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
238		metadata_dst_free(vf->repr->dst);
239		vf->repr->dst = NULL;
240		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
241					       ICE_FWD_TO_VSI);
242
243		netif_napi_del(&vf->repr->q_vector->napi);
244	}
245}
246
247/**
248 * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
249 * @pf: pointer to PF struct
 
 
 
250 */
251static int ice_eswitch_setup_reprs(struct ice_pf *pf)
252{
253	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
254	int max_vsi_num = 0;
255	struct ice_vf *vf;
256	unsigned int bkt;
257
258	lockdep_assert_held(&pf->vfs.table_lock);
259
260	ice_for_each_vf(pf, bkt, vf) {
261		struct ice_vsi *vsi = vf->repr->src_vsi;
262
263		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
264		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
265						   GFP_KERNEL);
266		if (!vf->repr->dst) {
267			ice_fltr_add_mac_and_broadcast(vsi,
268						       vf->hw_lan_addr.addr,
269						       ICE_FWD_TO_VSI);
270			goto err;
271		}
272
273		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
274			ice_fltr_add_mac_and_broadcast(vsi,
275						       vf->hw_lan_addr.addr,
276						       ICE_FWD_TO_VSI);
277			metadata_dst_free(vf->repr->dst);
278			vf->repr->dst = NULL;
279			goto err;
280		}
281
282		if (ice_vsi_add_vlan_zero(vsi)) {
283			ice_fltr_add_mac_and_broadcast(vsi,
284						       vf->hw_lan_addr.addr,
285						       ICE_FWD_TO_VSI);
286			metadata_dst_free(vf->repr->dst);
287			vf->repr->dst = NULL;
288			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
289			goto err;
290		}
291
292		if (max_vsi_num < vsi->vsi_num)
293			max_vsi_num = vsi->vsi_num;
 
294
295		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
296			       ice_napi_poll);
297
298		netif_keep_dst(vf->repr->netdev);
299	}
 
 
300
301	ice_for_each_vf(pf, bkt, vf) {
302		struct ice_repr *repr = vf->repr;
303		struct ice_vsi *vsi = repr->src_vsi;
304		struct metadata_dst *dst;
305
306		dst = repr->dst;
307		dst->u.port_info.port_id = vsi->vsi_num;
308		dst->u.port_info.lower_dev = repr->netdev;
309		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
310	}
311
312	return 0;
313
314err:
315	ice_eswitch_release_reprs(pf, ctrl_vsi);
316
317	return -ENODEV;
 
 
 
318}
319
320/**
321 * ice_eswitch_update_repr - reconfigure VF port representor
322 * @vsi: VF VSI for which port representor is configured
 
323 */
324void ice_eswitch_update_repr(struct ice_vsi *vsi)
325{
326	struct ice_pf *pf = vsi->back;
327	struct ice_repr *repr;
328	struct ice_vf *vf;
329	int ret;
330
331	if (!ice_is_switchdev_running(pf))
332		return;
333
334	vf = vsi->vf;
335	repr = vf->repr;
 
 
336	repr->src_vsi = vsi;
337	repr->dst->u.port_info.port_id = vsi->vsi_num;
338
339	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
340	if (ret) {
341		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
342		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
343			vsi->vf->vf_id);
 
 
 
 
 
 
 
 
 
 
 
344	}
345}
346
347/**
348 * ice_eswitch_port_start_xmit - callback for packets transmit
349 * @skb: send buffer
350 * @netdev: network interface device structure
351 *
352 * Returns NETDEV_TX_OK if sent, else an error code
353 */
354netdev_tx_t
355ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
356{
357	struct ice_netdev_priv *np;
358	struct ice_repr *repr;
359	struct ice_vsi *vsi;
360
361	np = netdev_priv(netdev);
362	vsi = np->vsi;
363
364	if (ice_is_reset_in_progress(vsi->back->state) ||
365	    test_bit(ICE_VF_DIS, vsi->back->state))
366		return NETDEV_TX_BUSY;
367
368	repr = ice_netdev_to_repr(netdev);
369	skb_dst_drop(skb);
370	dst_hold((struct dst_entry *)repr->dst);
371	skb_dst_set(skb, (struct dst_entry *)repr->dst);
372	skb->queue_mapping = repr->vf->vf_id;
 
 
 
373
374	return ice_start_xmit(skb, netdev);
375}
376
377/**
378 * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
379 * @skb: pointer to send buffer
380 * @off: pointer to offload struct
381 */
382void
383ice_eswitch_set_target_vsi(struct sk_buff *skb,
384			   struct ice_tx_offload_params *off)
385{
386	struct metadata_dst *dst = skb_metadata_dst(skb);
387	u64 cd_cmd, dst_vsi;
388
389	if (!dst) {
390		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
391		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
392	} else {
393		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
394		dst_vsi = ((u64)dst->u.port_info.port_id <<
395			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
396		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
397	}
398}
399
400/**
401 * ice_eswitch_release_env - clear switchdev HW filters
402 * @pf: pointer to PF struct
403 *
404 * This function removes HW filters configuration specific for switchdev
405 * mode and restores default legacy mode settings.
406 */
407static void ice_eswitch_release_env(struct ice_pf *pf)
408{
409	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
410	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
 
 
411
412	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
413	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
414	ice_clear_dflt_vsi(uplink_vsi);
 
 
 
 
415	ice_fltr_add_mac_and_broadcast(uplink_vsi,
416				       uplink_vsi->port_info->mac.perm_addr,
417				       ICE_FWD_TO_VSI);
418}
419
420/**
421 * ice_eswitch_vsi_setup - configure switchdev control VSI
422 * @pf: pointer to PF structure
423 * @pi: pointer to port_info structure
424 */
425static struct ice_vsi *
426ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
427{
428	return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
429}
430
431/**
432 * ice_eswitch_napi_del - remove NAPI handle for all port representors
433 * @pf: pointer to PF structure
434 */
435static void ice_eswitch_napi_del(struct ice_pf *pf)
436{
437	struct ice_vf *vf;
438	unsigned int bkt;
439
440	lockdep_assert_held(&pf->vfs.table_lock);
441
442	ice_for_each_vf(pf, bkt, vf)
443		netif_napi_del(&vf->repr->q_vector->napi);
444}
445
446/**
447 * ice_eswitch_napi_enable - enable NAPI for all port representors
448 * @pf: pointer to PF structure
449 */
450static void ice_eswitch_napi_enable(struct ice_pf *pf)
451{
452	struct ice_vf *vf;
453	unsigned int bkt;
454
455	lockdep_assert_held(&pf->vfs.table_lock);
456
457	ice_for_each_vf(pf, bkt, vf)
458		napi_enable(&vf->repr->q_vector->napi);
459}
460
461/**
462 * ice_eswitch_napi_disable - disable NAPI for all port representors
463 * @pf: pointer to PF structure
464 */
465static void ice_eswitch_napi_disable(struct ice_pf *pf)
466{
467	struct ice_vf *vf;
468	unsigned int bkt;
469
470	lockdep_assert_held(&pf->vfs.table_lock);
471
472	ice_for_each_vf(pf, bkt, vf)
473		napi_disable(&vf->repr->q_vector->napi);
474}
475
476/**
477 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
478 * @pf: pointer to PF structure
479 */
480static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
481{
482	struct ice_vsi *ctrl_vsi;
483
484	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
485	if (!pf->switchdev.control_vsi)
486		return -ENODEV;
487
488	ctrl_vsi = pf->switchdev.control_vsi;
489	pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
490	if (!pf->switchdev.uplink_vsi)
491		goto err_vsi;
 
 
 
492
493	if (ice_eswitch_setup_env(pf))
494		goto err_vsi;
495
496	if (ice_repr_add_for_all_vfs(pf))
497		goto err_repr_add;
498
499	if (ice_eswitch_setup_reprs(pf))
500		goto err_setup_reprs;
501
502	ice_eswitch_remap_rings_to_vectors(pf);
503
504	if (ice_vsi_open(ctrl_vsi))
505		goto err_setup_reprs;
506
507	ice_eswitch_napi_enable(pf);
508
509	return 0;
510
511err_setup_reprs:
512	ice_repr_rem_from_all_vfs(pf);
513err_repr_add:
514	ice_eswitch_release_env(pf);
515err_vsi:
516	ice_vsi_release(ctrl_vsi);
517	return -ENODEV;
518}
519
520/**
521 * ice_eswitch_disable_switchdev - disable switchdev resources
522 * @pf: pointer to PF structure
523 */
524static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
525{
526	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
 
527
528	ice_eswitch_napi_disable(pf);
529	ice_eswitch_release_env(pf);
530	ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
531	ice_eswitch_release_reprs(pf, ctrl_vsi);
532	ice_vsi_release(ctrl_vsi);
533	ice_repr_rem_from_all_vfs(pf);
534}
535
536/**
537 * ice_eswitch_mode_set - set new eswitch mode
538 * @devlink: pointer to devlink structure
539 * @mode: eswitch mode to switch to
540 * @extack: pointer to extack structure
541 */
542int
543ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
544		     struct netlink_ext_ack *extack)
545{
546	struct ice_pf *pf = devlink_priv(devlink);
547
548	if (pf->eswitch_mode == mode)
549		return 0;
550
551	if (ice_has_vfs(pf)) {
552		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
553		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
554		return -EOPNOTSUPP;
555	}
556
557	switch (mode) {
558	case DEVLINK_ESWITCH_MODE_LEGACY:
559		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
560			 pf->hw.pf_id);
 
561		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
562		break;
563	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
564	{
 
 
 
 
 
 
565		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
566			 pf->hw.pf_id);
 
567		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
568		break;
569	}
570	default:
571		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
572		return -EINVAL;
573	}
574
575	pf->eswitch_mode = mode;
576	return 0;
577}
578
579/**
580 * ice_eswitch_mode_get - get current eswitch mode
581 * @devlink: pointer to devlink structure
582 * @mode: output parameter for current eswitch mode
583 */
584int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
585{
586	struct ice_pf *pf = devlink_priv(devlink);
587
588	*mode = pf->eswitch_mode;
589	return 0;
590}
591
592/**
593 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
594 * @pf: pointer to PF structure
595 *
596 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
597 * false otherwise.
598 */
599bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
600{
601	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
602}
603
604/**
605 * ice_eswitch_release - cleanup eswitch
606 * @pf: pointer to PF structure
607 */
608void ice_eswitch_release(struct ice_pf *pf)
609{
610	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
 
 
 
611		return;
612
613	ice_eswitch_disable_switchdev(pf);
614	pf->switchdev.is_running = false;
615}
616
617/**
618 * ice_eswitch_configure - configure eswitch
619 * @pf: pointer to PF structure
620 */
621int ice_eswitch_configure(struct ice_pf *pf)
622{
623	int status;
 
 
 
 
624
625	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626		return 0;
627
628	status = ice_eswitch_enable_switchdev(pf);
629	if (status)
630		return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631
632	pf->switchdev.is_running = true;
633	return 0;
 
 
 
 
 
 
 
 
 
 
 
634}
635
636/**
637 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
638 * @pf: pointer to PF structure
 
 
 
 
 
639 */
640static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
641{
642	struct ice_vf *vf;
643	unsigned int bkt;
 
644
645	lockdep_assert_held(&pf->vfs.table_lock);
 
646
647	if (test_bit(ICE_DOWN, pf->state))
648		return;
 
 
 
649
650	ice_for_each_vf(pf, bkt, vf) {
651		if (vf->repr)
652			ice_repr_start_tx_queues(vf->repr);
653	}
654}
655
656/**
657 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
658 * @pf: pointer to PF structure
 
 
 
 
 
659 */
660void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
661{
662	struct ice_vf *vf;
663	unsigned int bkt;
 
 
664
665	lockdep_assert_held(&pf->vfs.table_lock);
 
666
667	if (test_bit(ICE_DOWN, pf->state))
668		return;
 
 
 
669
670	ice_for_each_vf(pf, bkt, vf) {
671		if (vf->repr)
672			ice_repr_stop_tx_queues(vf->repr);
 
 
 
 
673	}
674}
675
676/**
677 * ice_eswitch_rebuild - rebuild eswitch
678 * @pf: pointer to PF structure
 
679 */
680int ice_eswitch_rebuild(struct ice_pf *pf)
681{
682	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
683	int status;
684
685	ice_eswitch_napi_disable(pf);
686	ice_eswitch_napi_del(pf);
687
688	status = ice_eswitch_setup_env(pf);
689	if (status)
690		return status;
 
691
692	status = ice_eswitch_setup_reprs(pf);
693	if (status)
694		return status;
 
 
 
 
 
695
696	ice_eswitch_remap_rings_to_vectors(pf);
 
697
698	ice_replay_tc_fltrs(pf);
 
699
700	status = ice_vsi_open(ctrl_vsi);
701	if (status)
702		return status;
 
 
 
 
 
 
 
 
 
 
 
703
704	ice_eswitch_napi_enable(pf);
705	ice_eswitch_start_all_tx_queues(pf);
 
 
706
707	return 0;
708}