Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2019-2021, Intel Corporation. */
  3
  4#include "ice.h"
  5#include "ice_eswitch.h"
  6#include "devlink/devlink.h"
  7#include "devlink/devlink_port.h"
  8#include "ice_sriov.h"
  9#include "ice_tc_lib.h"
 10#include "ice_dcb_lib.h"
 11
 12/**
 13 * ice_repr_inc_tx_stats - increment Tx statistic by one packet
 14 * @repr: repr to increment stats on
 15 * @len: length of the packet
 16 * @xmit_status: value returned by xmit function
 17 */
 18void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
 19			   int xmit_status)
 20{
 21	struct ice_repr_pcpu_stats *stats;
 22
 23	if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
 24		     xmit_status != NET_XMIT_CN)) {
 25		this_cpu_inc(repr->stats->tx_drops);
 26		return;
 27	}
 28
 29	stats = this_cpu_ptr(repr->stats);
 30	u64_stats_update_begin(&stats->syncp);
 31	stats->tx_packets++;
 32	stats->tx_bytes += len;
 33	u64_stats_update_end(&stats->syncp);
 34}
 35
 36/**
 37 * ice_repr_inc_rx_stats - increment Rx statistic by one packet
 38 * @netdev: repr netdev to increment stats on
 39 * @len: length of the packet
 
 40 */
 41void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
 
 42{
 43	struct ice_repr *repr = ice_netdev_to_repr(netdev);
 44	struct ice_repr_pcpu_stats *stats;
 
 
 
 
 
 45
 46	stats = this_cpu_ptr(repr->stats);
 47	u64_stats_update_begin(&stats->syncp);
 48	stats->rx_packets++;
 49	stats->rx_bytes += len;
 50	u64_stats_update_end(&stats->syncp);
 51}
 52
 53/**
 54 * ice_repr_get_stats64 - get VF stats for VFPR use
 55 * @netdev: pointer to port representor netdev
 56 * @stats: pointer to struct where stats can be stored
 57 */
 58static void
 59ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 60{
 61	struct ice_netdev_priv *np = netdev_priv(netdev);
 62	struct ice_repr *repr = np->repr;
 63	struct ice_eth_stats *eth_stats;
 64	struct ice_vsi *vsi;
 65
 66	if (repr->ops.ready(repr))
 67		return;
 68	vsi = repr->src_vsi;
 69
 70	ice_update_vsi_stats(vsi);
 71	eth_stats = &vsi->eth_stats;
 72
 73	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
 74			    eth_stats->tx_multicast;
 75	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
 76			    eth_stats->rx_multicast;
 77	stats->tx_bytes = eth_stats->tx_bytes;
 78	stats->rx_bytes = eth_stats->rx_bytes;
 79	stats->multicast = eth_stats->rx_multicast;
 80	stats->tx_errors = eth_stats->tx_errors;
 81	stats->tx_dropped = eth_stats->tx_discards;
 82	stats->rx_dropped = eth_stats->rx_discards;
 83}
 84
 85/**
 86 * ice_netdev_to_repr - Get port representor for given netdevice
 87 * @netdev: pointer to port representor netdev
 88 */
 89struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
 90{
 91	struct ice_netdev_priv *np = netdev_priv(netdev);
 92
 93	return np->repr;
 94}
 95
 96/**
 97 * ice_repr_vf_open - Enable port representor's network interface
 98 * @netdev: network interface device structure
 99 *
100 * The open entry point is called when a port representor's network
101 * interface is made active by the system (IFF_UP). Corresponding
102 * VF is notified about link status change.
103 *
104 * Returns 0 on success
105 */
106static int ice_repr_vf_open(struct net_device *netdev)
107{
108	struct ice_repr *repr = ice_netdev_to_repr(netdev);
109	struct ice_vf *vf;
110
111	vf = repr->vf;
112	vf->link_forced = true;
113	vf->link_up = true;
114	ice_vc_notify_vf_link_state(vf);
115
116	netif_carrier_on(netdev);
117	netif_tx_start_all_queues(netdev);
118
119	return 0;
120}
121
122static int ice_repr_sf_open(struct net_device *netdev)
123{
124	netif_carrier_on(netdev);
125	netif_tx_start_all_queues(netdev);
126
127	return 0;
128}
129
130/**
131 * ice_repr_vf_stop - Disable port representor's network interface
132 * @netdev: network interface device structure
133 *
134 * The stop entry point is called when a port representor's network
135 * interface is de-activated by the system. Corresponding
136 * VF is notified about link status change.
137 *
138 * Returns 0 on success
139 */
140static int ice_repr_vf_stop(struct net_device *netdev)
141{
142	struct ice_repr *repr = ice_netdev_to_repr(netdev);
143	struct ice_vf *vf;
144
145	vf = repr->vf;
146	vf->link_forced = true;
147	vf->link_up = false;
148	ice_vc_notify_vf_link_state(vf);
149
150	netif_carrier_off(netdev);
151	netif_tx_stop_all_queues(netdev);
152
153	return 0;
154}
155
156static int ice_repr_sf_stop(struct net_device *netdev)
157{
158	netif_carrier_off(netdev);
159	netif_tx_stop_all_queues(netdev);
160
161	return 0;
162}
163
164/**
165 * ice_repr_sp_stats64 - get slow path stats for port representor
166 * @dev: network interface device structure
167 * @stats: netlink stats structure
 
 
 
 
 
168 */
169static int
170ice_repr_sp_stats64(const struct net_device *dev,
171		    struct rtnl_link_stats64 *stats)
172{
173	struct ice_repr *repr = ice_netdev_to_repr(dev);
174	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
176	for_each_possible_cpu(i) {
177		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
178		struct ice_repr_pcpu_stats *repr_stats;
179		unsigned int start;
180
181		repr_stats = per_cpu_ptr(repr->stats, i);
182		do {
183			start = u64_stats_fetch_begin(&repr_stats->syncp);
184			tbytes = repr_stats->tx_bytes;
185			tpkts = repr_stats->tx_packets;
186			tdrops = repr_stats->tx_drops;
187			rbytes = repr_stats->rx_bytes;
188			rpkts = repr_stats->rx_packets;
189		} while (u64_stats_fetch_retry(&repr_stats->syncp, start));
190
191		stats->tx_bytes += tbytes;
192		stats->tx_packets += tpkts;
193		stats->tx_dropped += tdrops;
194		stats->rx_bytes += rbytes;
195		stats->rx_packets += rpkts;
196	}
197	return 0;
198}
199
200static bool
201ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
202{
203	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
204}
205
206static int
207ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
208			       void *sp)
209{
210	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
211		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
212
213	return -EINVAL;
214}
215
216static int
217ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
218			     struct flow_cls_offload *flower)
219{
220	switch (flower->command) {
221	case FLOW_CLS_REPLACE:
222		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
223	case FLOW_CLS_DESTROY:
224		return ice_del_cls_flower(repr->src_vsi, flower);
225	default:
226		return -EINVAL;
227	}
228}
229
230static int
231ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
232			   void *cb_priv)
233{
234	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
235	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
236
237	switch (type) {
238	case TC_SETUP_CLSFLOWER:
239		return ice_repr_setup_tc_cls_flower(np->repr, flower);
240	default:
241		return -EOPNOTSUPP;
242	}
243}
244
245static LIST_HEAD(ice_repr_block_cb_list);
246
247static int
248ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
249		  void *type_data)
250{
251	struct ice_netdev_priv *np = netdev_priv(netdev);
252
253	switch (type) {
254	case TC_SETUP_BLOCK:
255		return flow_block_cb_setup_simple((struct flow_block_offload *)
256						  type_data,
257						  &ice_repr_block_cb_list,
258						  ice_repr_setup_tc_block_cb,
259						  np, np, true);
260	default:
261		return -EOPNOTSUPP;
262	}
263}
264
265static const struct net_device_ops ice_repr_vf_netdev_ops = {
266	.ndo_get_stats64 = ice_repr_get_stats64,
267	.ndo_open = ice_repr_vf_open,
268	.ndo_stop = ice_repr_vf_stop,
269	.ndo_start_xmit = ice_eswitch_port_start_xmit,
270	.ndo_setup_tc = ice_repr_setup_tc,
271	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
272	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
273};
274
275static const struct net_device_ops ice_repr_sf_netdev_ops = {
276	.ndo_get_stats64 = ice_repr_get_stats64,
277	.ndo_open = ice_repr_sf_open,
278	.ndo_stop = ice_repr_sf_stop,
279	.ndo_start_xmit = ice_eswitch_port_start_xmit,
280	.ndo_setup_tc = ice_repr_setup_tc,
281	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
282	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
283};
284
285/**
286 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
287 * @netdev: pointer to netdev
288 */
289bool ice_is_port_repr_netdev(const struct net_device *netdev)
290{
291	return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
292			  netdev->netdev_ops == &ice_repr_sf_netdev_ops);
293}
294
295/**
296 * ice_repr_reg_netdev - register port representor netdev
297 * @netdev: pointer to port representor netdev
298 * @ops: new ops for netdev
299 */
300static int
301ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
302{
303	eth_hw_addr_random(netdev);
304	netdev->netdev_ops = ops;
305	ice_set_ethtool_repr_ops(netdev);
306
307	netdev->hw_features |= NETIF_F_HW_TC;
308
309	netif_carrier_off(netdev);
310	netif_tx_stop_all_queues(netdev);
311
312	return register_netdev(netdev);
313}
314
315static int ice_repr_ready_vf(struct ice_repr *repr)
316{
317	return !ice_check_vf_ready_for_cfg(repr->vf);
318}
319
320static int ice_repr_ready_sf(struct ice_repr *repr)
321{
322	return !repr->sf->active;
323}
324
325/**
326 * ice_repr_destroy - remove representor from VF
327 * @repr: pointer to representor structure
328 */
329void ice_repr_destroy(struct ice_repr *repr)
330{
331	free_percpu(repr->stats);
332	free_netdev(repr->netdev);
333	kfree(repr);
334}
335
336static void ice_repr_rem_vf(struct ice_repr *repr)
 
 
 
 
337{
338	ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
339	unregister_netdev(repr->netdev);
340	ice_devlink_destroy_vf_port(repr->vf);
341	ice_virtchnl_set_dflt_ops(repr->vf);
 
342}
343
344static void ice_repr_rem_sf(struct ice_repr *repr)
345{
346	unregister_netdev(repr->netdev);
347	ice_devlink_destroy_sf_port(repr->sf);
348}
349
350static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
351{
352	/* only export if ADQ and DCB disabled and eswitch enabled*/
353	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
354	    !ice_is_switchdev_running(pf))
355		return;
356
 
357	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
358}
359
360/**
361 * ice_repr_create - add representor for generic VSI
 
362 * @src_vsi: pointer to VSI structure of device to represent
 
363 */
364static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
 
365{
 
366	struct ice_netdev_priv *np;
367	struct ice_repr *repr;
368	int err;
369
370	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
371	if (!repr)
372		return ERR_PTR(-ENOMEM);
373
374	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
375	if (!repr->netdev) {
376		err =  -ENOMEM;
377		goto err_alloc;
378	}
379
380	repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
381	if (!repr->stats) {
382		err = -ENOMEM;
383		goto err_stats;
384	}
385
386	repr->src_vsi = src_vsi;
387	repr->id = src_vsi->vsi_num;
388	np = netdev_priv(repr->netdev);
389	np->repr = repr;
390
391	repr->netdev->min_mtu = ETH_MIN_MTU;
392	repr->netdev->max_mtu = ICE_MAX_MTU;
 
 
 
 
 
393
394	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
395
396	return repr;
397
398err_stats:
399	free_netdev(repr->netdev);
400err_alloc:
401	kfree(repr);
402	return ERR_PTR(err);
403}
404
405static int ice_repr_add_vf(struct ice_repr *repr)
406{
407	struct ice_vf *vf = repr->vf;
408	struct devlink *devlink;
409	int err;
410
411	err = ice_devlink_create_vf_port(vf);
412	if (err)
413		return err;
414
415	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
416	err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
417	if (err)
418		goto err_netdev;
419
420	err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
421	if (err)
422		goto err_cfg_vsi;
423
424	ice_virtchnl_set_repr_ops(vf);
425
426	devlink = priv_to_devlink(vf->pf);
427	ice_repr_set_tx_topology(vf->pf, devlink);
428
429	return 0;
430
431err_cfg_vsi:
432	unregister_netdev(repr->netdev);
433err_netdev:
434	ice_devlink_destroy_vf_port(vf);
435	return err;
436}
437
438/**
439 * ice_repr_create_vf - add representor for VF VSI
440 * @vf: VF to create port representor on
441 *
442 * Set correct representor type for VF and functions pointer.
443 *
444 * Return: created port representor on success, error otherwise
445 */
446struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
447{
448	struct ice_vsi *vsi = ice_get_vf_vsi(vf);
449	struct ice_repr *repr;
450
451	if (!vsi)
452		return ERR_PTR(-EINVAL);
453
454	repr = ice_repr_create(vsi);
455	if (IS_ERR(repr))
456		return repr;
457
458	repr->type = ICE_REPR_TYPE_VF;
459	repr->vf = vf;
460	repr->ops.add = ice_repr_add_vf;
461	repr->ops.rem = ice_repr_rem_vf;
462	repr->ops.ready = ice_repr_ready_vf;
463
464	ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
465
466	return repr;
467}
468
469static int ice_repr_add_sf(struct ice_repr *repr)
470{
471	struct ice_dynamic_port *sf = repr->sf;
472	int err;
473
474	err = ice_devlink_create_sf_port(sf);
475	if (err)
476		return err;
477
478	SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
479	err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
 
480	if (err)
481		goto err_netdev;
482
483	ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
 
484
485	return 0;
486
487err_netdev:
488	ice_devlink_destroy_sf_port(sf);
489	return err;
 
 
490}
491
492/**
493 * ice_repr_create_sf - add representor for SF VSI
494 * @sf: SF to create port representor on
495 *
496 * Set correct representor type for SF and functions pointer.
497 *
498 * Return: created port representor on success, error otherwise
499 */
500struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
501{
502	struct ice_repr *repr = ice_repr_create(sf->vsi);
503
504	if (IS_ERR(repr))
505		return repr;
506
507	repr->type = ICE_REPR_TYPE_SF;
508	repr->sf = sf;
509	repr->ops.add = ice_repr_add_sf;
510	repr->ops.rem = ice_repr_rem_sf;
511	repr->ops.ready = ice_repr_ready_sf;
512
513	ether_addr_copy(repr->parent_mac, sf->hw_addr);
514
515	return repr;
516}
517
518struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
519{
520	return xa_load(&pf->eswitch.reprs, id);
521}
522
523/**
524 * ice_repr_start_tx_queues - start Tx queues of port representor
525 * @repr: pointer to repr structure
526 */
527void ice_repr_start_tx_queues(struct ice_repr *repr)
528{
529	netif_carrier_on(repr->netdev);
530	netif_tx_start_all_queues(repr->netdev);
531}
532
533/**
534 * ice_repr_stop_tx_queues - stop Tx queues of port representor
535 * @repr: pointer to repr structure
536 */
537void ice_repr_stop_tx_queues(struct ice_repr *repr)
538{
539	netif_carrier_off(repr->netdev);
540	netif_tx_stop_all_queues(repr->netdev);
 
 
 
 
 
 
 
 
 
 
 
 
541}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (C) 2019-2021, Intel Corporation. */
  3
  4#include "ice.h"
  5#include "ice_eswitch.h"
  6#include "ice_devlink.h"
 
  7#include "ice_sriov.h"
  8#include "ice_tc_lib.h"
  9#include "ice_dcb_lib.h"
 10
 11/**
 12 * ice_repr_get_sw_port_id - get port ID associated with representor
 13 * @repr: pointer to port representor
 14 */
 15static int ice_repr_get_sw_port_id(struct ice_repr *repr)
 16{
 17	return repr->src_vsi->back->hw.port_info->lport;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18}
 19
 20/**
 21 * ice_repr_get_phys_port_name - get phys port name
 22 * @netdev: pointer to port representor netdev
 23 * @buf: write here port name
 24 * @len: max length of buf
 25 */
 26static int
 27ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
 28{
 29	struct ice_netdev_priv *np = netdev_priv(netdev);
 30	struct ice_repr *repr = np->repr;
 31	int res;
 32
 33	/* Devlink port is registered and devlink core is taking care of name formatting. */
 34	if (repr->vf->devlink_port.devlink)
 35		return -EOPNOTSUPP;
 36
 37	res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
 38		       repr->id);
 39	if (res <= 0)
 40		return -EOPNOTSUPP;
 41	return 0;
 42}
 43
 44/**
 45 * ice_repr_get_stats64 - get VF stats for VFPR use
 46 * @netdev: pointer to port representor netdev
 47 * @stats: pointer to struct where stats can be stored
 48 */
 49static void
 50ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 51{
 52	struct ice_netdev_priv *np = netdev_priv(netdev);
 
 53	struct ice_eth_stats *eth_stats;
 54	struct ice_vsi *vsi;
 55
 56	if (ice_is_vf_disabled(np->repr->vf))
 57		return;
 58	vsi = np->repr->src_vsi;
 59
 60	ice_update_vsi_stats(vsi);
 61	eth_stats = &vsi->eth_stats;
 62
 63	stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
 64			    eth_stats->tx_multicast;
 65	stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
 66			    eth_stats->rx_multicast;
 67	stats->tx_bytes = eth_stats->tx_bytes;
 68	stats->rx_bytes = eth_stats->rx_bytes;
 69	stats->multicast = eth_stats->rx_multicast;
 70	stats->tx_errors = eth_stats->tx_errors;
 71	stats->tx_dropped = eth_stats->tx_discards;
 72	stats->rx_dropped = eth_stats->rx_discards;
 73}
 74
 75/**
 76 * ice_netdev_to_repr - Get port representor for given netdevice
 77 * @netdev: pointer to port representor netdev
 78 */
 79struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
 80{
 81	struct ice_netdev_priv *np = netdev_priv(netdev);
 82
 83	return np->repr;
 84}
 85
 86/**
 87 * ice_repr_open - Enable port representor's network interface
 88 * @netdev: network interface device structure
 89 *
 90 * The open entry point is called when a port representor's network
 91 * interface is made active by the system (IFF_UP). Corresponding
 92 * VF is notified about link status change.
 93 *
 94 * Returns 0 on success
 95 */
 96static int ice_repr_open(struct net_device *netdev)
 97{
 98	struct ice_repr *repr = ice_netdev_to_repr(netdev);
 99	struct ice_vf *vf;
100
101	vf = repr->vf;
102	vf->link_forced = true;
103	vf->link_up = true;
104	ice_vc_notify_vf_link_state(vf);
105
106	netif_carrier_on(netdev);
107	netif_tx_start_all_queues(netdev);
108
109	return 0;
110}
111
 
 
 
 
 
 
 
 
112/**
113 * ice_repr_stop - Disable port representor's network interface
114 * @netdev: network interface device structure
115 *
116 * The stop entry point is called when a port representor's network
117 * interface is de-activated by the system. Corresponding
118 * VF is notified about link status change.
119 *
120 * Returns 0 on success
121 */
122static int ice_repr_stop(struct net_device *netdev)
123{
124	struct ice_repr *repr = ice_netdev_to_repr(netdev);
125	struct ice_vf *vf;
126
127	vf = repr->vf;
128	vf->link_forced = true;
129	vf->link_up = false;
130	ice_vc_notify_vf_link_state(vf);
131
132	netif_carrier_off(netdev);
133	netif_tx_stop_all_queues(netdev);
134
135	return 0;
136}
137
 
 
 
 
 
 
 
 
138/**
139 * ice_repr_sp_stats64 - get slow path stats for port representor
140 * @dev: network interface device structure
141 * @stats: netlink stats structure
142 *
143 * RX/TX stats are being swapped here to be consistent with VF stats. In slow
144 * path, port representor receives data when the corresponding VF is sending it
145 * (and vice versa), TX and RX bytes/packets are effectively swapped on port
146 * representor.
147 */
148static int
149ice_repr_sp_stats64(const struct net_device *dev,
150		    struct rtnl_link_stats64 *stats)
151{
152	struct ice_netdev_priv *np = netdev_priv(dev);
153	int vf_id = np->repr->vf->vf_id;
154	struct ice_tx_ring *tx_ring;
155	struct ice_rx_ring *rx_ring;
156	u64 pkts, bytes;
157
158	tx_ring = np->vsi->tx_rings[vf_id];
159	ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
160				     tx_ring->ring_stats->stats,
161				     &pkts, &bytes);
162	stats->rx_packets = pkts;
163	stats->rx_bytes = bytes;
164
165	rx_ring = np->vsi->rx_rings[vf_id];
166	ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
167				     rx_ring->ring_stats->stats,
168				     &pkts, &bytes);
169	stats->tx_packets = pkts;
170	stats->tx_bytes = bytes;
171	stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
172			    rx_ring->ring_stats->rx_stats.alloc_buf_failed;
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174	return 0;
175}
176
177static bool
178ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
179{
180	return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
181}
182
183static int
184ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
185			       void *sp)
186{
187	if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
188		return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
189
190	return -EINVAL;
191}
192
193static int
194ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
195			     struct flow_cls_offload *flower)
196{
197	switch (flower->command) {
198	case FLOW_CLS_REPLACE:
199		return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
200	case FLOW_CLS_DESTROY:
201		return ice_del_cls_flower(repr->src_vsi, flower);
202	default:
203		return -EINVAL;
204	}
205}
206
207static int
208ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
209			   void *cb_priv)
210{
211	struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
212	struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
213
214	switch (type) {
215	case TC_SETUP_CLSFLOWER:
216		return ice_repr_setup_tc_cls_flower(np->repr, flower);
217	default:
218		return -EOPNOTSUPP;
219	}
220}
221
222static LIST_HEAD(ice_repr_block_cb_list);
223
224static int
225ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
226		  void *type_data)
227{
228	struct ice_netdev_priv *np = netdev_priv(netdev);
229
230	switch (type) {
231	case TC_SETUP_BLOCK:
232		return flow_block_cb_setup_simple((struct flow_block_offload *)
233						  type_data,
234						  &ice_repr_block_cb_list,
235						  ice_repr_setup_tc_block_cb,
236						  np, np, true);
237	default:
238		return -EOPNOTSUPP;
239	}
240}
241
242static const struct net_device_ops ice_repr_netdev_ops = {
243	.ndo_get_phys_port_name = ice_repr_get_phys_port_name,
 
 
 
 
 
 
 
 
 
244	.ndo_get_stats64 = ice_repr_get_stats64,
245	.ndo_open = ice_repr_open,
246	.ndo_stop = ice_repr_stop,
247	.ndo_start_xmit = ice_eswitch_port_start_xmit,
248	.ndo_setup_tc = ice_repr_setup_tc,
249	.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
250	.ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
251};
252
253/**
254 * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
255 * @netdev: pointer to netdev
256 */
257bool ice_is_port_repr_netdev(const struct net_device *netdev)
258{
259	return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
 
260}
261
262/**
263 * ice_repr_reg_netdev - register port representor netdev
264 * @netdev: pointer to port representor netdev
 
265 */
266static int
267ice_repr_reg_netdev(struct net_device *netdev)
268{
269	eth_hw_addr_random(netdev);
270	netdev->netdev_ops = &ice_repr_netdev_ops;
271	ice_set_ethtool_repr_ops(netdev);
272
273	netdev->hw_features |= NETIF_F_HW_TC;
274
275	netif_carrier_off(netdev);
276	netif_tx_stop_all_queues(netdev);
277
278	return register_netdev(netdev);
279}
280
281static void ice_repr_remove_node(struct devlink_port *devlink_port)
282{
283	devl_lock(devlink_port->devlink);
284	devl_rate_leaf_destroy(devlink_port);
285	devl_unlock(devlink_port->devlink);
 
 
 
286}
287
288/**
289 * ice_repr_rem - remove representor from VF
290 * @repr: pointer to representor structure
291 */
292static void ice_repr_rem(struct ice_repr *repr)
293{
294	kfree(repr->q_vector);
295	free_netdev(repr->netdev);
296	kfree(repr);
297}
298
299/**
300 * ice_repr_rem_vf - remove representor from VF
301 * @repr: pointer to representor structure
302 */
303void ice_repr_rem_vf(struct ice_repr *repr)
304{
305	ice_repr_remove_node(&repr->vf->devlink_port);
306	unregister_netdev(repr->netdev);
307	ice_devlink_destroy_vf_port(repr->vf);
308	ice_virtchnl_set_dflt_ops(repr->vf);
309	ice_repr_rem(repr);
310}
311
312static void ice_repr_set_tx_topology(struct ice_pf *pf)
313{
314	struct devlink *devlink;
 
 
315
 
 
316	/* only export if ADQ and DCB disabled and eswitch enabled*/
317	if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
318	    !ice_is_switchdev_running(pf))
319		return;
320
321	devlink = priv_to_devlink(pf);
322	ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
323}
324
325/**
326 * ice_repr_add - add representor for generic VSI
327 * @pf: pointer to PF structure
328 * @src_vsi: pointer to VSI structure of device to represent
329 * @parent_mac: device MAC address
330 */
331static struct ice_repr *
332ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
333{
334	struct ice_q_vector *q_vector;
335	struct ice_netdev_priv *np;
336	struct ice_repr *repr;
337	int err;
338
339	repr = kzalloc(sizeof(*repr), GFP_KERNEL);
340	if (!repr)
341		return ERR_PTR(-ENOMEM);
342
343	repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
344	if (!repr->netdev) {
345		err =  -ENOMEM;
346		goto err_alloc;
347	}
348
 
 
 
 
 
 
349	repr->src_vsi = src_vsi;
 
350	np = netdev_priv(repr->netdev);
351	np->repr = repr;
352
353	q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
354	if (!q_vector) {
355		err = -ENOMEM;
356		goto err_alloc_q_vector;
357	}
358	repr->q_vector = q_vector;
359	repr->q_id = repr->id;
360
361	ether_addr_copy(repr->parent_mac, parent_mac);
362
363	return repr;
364
365err_alloc_q_vector:
366	free_netdev(repr->netdev);
367err_alloc:
368	kfree(repr);
369	return ERR_PTR(err);
370}
371
372struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
373{
374	struct ice_repr *repr;
375	struct ice_vsi *vsi;
376	int err;
377
378	vsi = ice_get_vf_vsi(vf);
379	if (!vsi)
380		return ERR_PTR(-ENOENT);
 
 
 
 
 
381
382	err = ice_devlink_create_vf_port(vf);
383	if (err)
384		return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385
386	repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
387	if (IS_ERR(repr)) {
388		err = PTR_ERR(repr);
389		goto err_repr_add;
390	}
 
391
 
392	repr->vf = vf;
 
 
 
 
 
 
 
 
393
394	repr->netdev->min_mtu = ETH_MIN_MTU;
395	repr->netdev->max_mtu = ICE_MAX_MTU;
 
 
 
 
 
 
396
397	SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
398	SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
399	err = ice_repr_reg_netdev(repr->netdev);
400	if (err)
401		goto err_netdev;
402
403	ice_virtchnl_set_repr_ops(vf);
404	ice_repr_set_tx_topology(vf->pf);
405
406	return repr;
407
408err_netdev:
409	ice_repr_rem(repr);
410err_repr_add:
411	ice_devlink_destroy_vf_port(vf);
412	return ERR_PTR(err);
413}
414
415struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
 
 
 
 
 
 
 
 
416{
417	if (!vsi->vf)
418		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
419
420	return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
 
 
421}
422
423/**
424 * ice_repr_start_tx_queues - start Tx queues of port representor
425 * @repr: pointer to repr structure
426 */
427void ice_repr_start_tx_queues(struct ice_repr *repr)
428{
429	netif_carrier_on(repr->netdev);
430	netif_tx_start_all_queues(repr->netdev);
431}
432
433/**
434 * ice_repr_stop_tx_queues - stop Tx queues of port representor
435 * @repr: pointer to repr structure
436 */
437void ice_repr_stop_tx_queues(struct ice_repr *repr)
438{
439	netif_carrier_off(repr->netdev);
440	netif_tx_stop_all_queues(repr->netdev);
441}
442
443/**
444 * ice_repr_set_traffic_vsi - set traffic VSI for port representor
445 * @repr: repr on with VSI will be set
446 * @vsi: pointer to VSI that will be used by port representor to pass traffic
447 */
448void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
449{
450	struct ice_netdev_priv *np = netdev_priv(repr->netdev);
451
452	np->vsi = vsi;
453}