Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2023 Intel Corporation */
   3
   4#include "idpf.h"
   5
   6/**
   7 * idpf_get_rxnfc - command to get RX flow classification rules
   8 * @netdev: network interface device structure
   9 * @cmd: ethtool rxnfc command
  10 * @rule_locs: pointer to store rule locations
  11 *
  12 * Returns Success if the command is supported.
  13 */
  14static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  15			  u32 __always_unused *rule_locs)
  16{
  17	struct idpf_vport *vport;
  18
  19	idpf_vport_ctrl_lock(netdev);
  20	vport = idpf_netdev_to_vport(netdev);
  21
  22	switch (cmd->cmd) {
  23	case ETHTOOL_GRXRINGS:
  24		cmd->data = vport->num_rxq;
  25		idpf_vport_ctrl_unlock(netdev);
  26
  27		return 0;
  28	default:
  29		break;
  30	}
  31
  32	idpf_vport_ctrl_unlock(netdev);
  33
  34	return -EOPNOTSUPP;
  35}
  36
  37/**
  38 * idpf_get_rxfh_key_size - get the RSS hash key size
  39 * @netdev: network interface device structure
  40 *
  41 * Returns the key size on success, error value on failure.
  42 */
  43static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
  44{
  45	struct idpf_netdev_priv *np = netdev_priv(netdev);
  46	struct idpf_vport_user_config_data *user_config;
  47
  48	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
  49		return -EOPNOTSUPP;
  50
  51	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
  52
  53	return user_config->rss_data.rss_key_size;
  54}
  55
  56/**
  57 * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
  58 * @netdev: network interface device structure
  59 *
  60 * Returns the table size on success, error value on failure.
  61 */
  62static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
  63{
  64	struct idpf_netdev_priv *np = netdev_priv(netdev);
  65	struct idpf_vport_user_config_data *user_config;
  66
  67	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
  68		return -EOPNOTSUPP;
  69
  70	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
  71
  72	return user_config->rss_data.rss_lut_size;
  73}
  74
  75/**
  76 * idpf_get_rxfh - get the rx flow hash indirection table
  77 * @netdev: network interface device structure
  78 * @rxfh: pointer to param struct (indir, key, hfunc)
  79 *
  80 * Reads the indirection table directly from the hardware. Always returns 0.
  81 */
  82static int idpf_get_rxfh(struct net_device *netdev,
  83			 struct ethtool_rxfh_param *rxfh)
  84{
  85	struct idpf_netdev_priv *np = netdev_priv(netdev);
  86	struct idpf_rss_data *rss_data;
  87	struct idpf_adapter *adapter;
  88	int err = 0;
  89	u16 i;
  90
  91	idpf_vport_ctrl_lock(netdev);
  92
  93	adapter = np->adapter;
  94
  95	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
  96		err = -EOPNOTSUPP;
  97		goto unlock_mutex;
  98	}
  99
 100	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
 101	if (np->state != __IDPF_VPORT_UP)
 102		goto unlock_mutex;
 103
 104	rxfh->hfunc = ETH_RSS_HASH_TOP;
 105
 106	if (rxfh->key)
 107		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
 108
 109	if (rxfh->indir) {
 110		for (i = 0; i < rss_data->rss_lut_size; i++)
 111			rxfh->indir[i] = rss_data->rss_lut[i];
 112	}
 113
 114unlock_mutex:
 115	idpf_vport_ctrl_unlock(netdev);
 116
 117	return err;
 118}
 119
 120/**
 121 * idpf_set_rxfh - set the rx flow hash indirection table
 122 * @netdev: network interface device structure
 123 * @rxfh: pointer to param struct (indir, key, hfunc)
 124 * @extack: extended ACK from the Netlink message
 125 *
 126 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
 127 * returns 0 after programming the table.
 128 */
 129static int idpf_set_rxfh(struct net_device *netdev,
 130			 struct ethtool_rxfh_param *rxfh,
 131			 struct netlink_ext_ack *extack)
 132{
 133	struct idpf_netdev_priv *np = netdev_priv(netdev);
 134	struct idpf_rss_data *rss_data;
 135	struct idpf_adapter *adapter;
 136	struct idpf_vport *vport;
 137	int err = 0;
 138	u16 lut;
 139
 140	idpf_vport_ctrl_lock(netdev);
 141	vport = idpf_netdev_to_vport(netdev);
 142
 143	adapter = vport->adapter;
 144
 145	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
 146		err = -EOPNOTSUPP;
 147		goto unlock_mutex;
 148	}
 149
 150	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
 151	if (np->state != __IDPF_VPORT_UP)
 152		goto unlock_mutex;
 153
 154	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
 155	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
 156		err = -EOPNOTSUPP;
 157		goto unlock_mutex;
 158	}
 159
 160	if (rxfh->key)
 161		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
 162
 163	if (rxfh->indir) {
 164		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
 165			rss_data->rss_lut[lut] = rxfh->indir[lut];
 166	}
 167
 168	err = idpf_config_rss(vport);
 169
 170unlock_mutex:
 171	idpf_vport_ctrl_unlock(netdev);
 172
 173	return err;
 174}
 175
 176/**
 177 * idpf_get_channels: get the number of channels supported by the device
 178 * @netdev: network interface device structure
 179 * @ch: channel information structure
 180 *
 181 * Report maximum of TX and RX. Report one extra channel to match our MailBox
 182 * Queue.
 183 */
 184static void idpf_get_channels(struct net_device *netdev,
 185			      struct ethtool_channels *ch)
 186{
 187	struct idpf_netdev_priv *np = netdev_priv(netdev);
 188	struct idpf_vport_config *vport_config;
 189	u16 num_txq, num_rxq;
 190	u16 combined;
 191
 192	vport_config = np->adapter->vport_config[np->vport_idx];
 193
 194	num_txq = vport_config->user_config.num_req_tx_qs;
 195	num_rxq = vport_config->user_config.num_req_rx_qs;
 196
 197	combined = min(num_txq, num_rxq);
 198
 199	/* Report maximum channels */
 200	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
 201				 vport_config->max_q.max_rxq);
 202	ch->max_rx = vport_config->max_q.max_rxq;
 203	ch->max_tx = vport_config->max_q.max_txq;
 204
 205	ch->max_other = IDPF_MAX_MBXQ;
 206	ch->other_count = IDPF_MAX_MBXQ;
 207
 208	ch->combined_count = combined;
 209	ch->rx_count = num_rxq - combined;
 210	ch->tx_count = num_txq - combined;
 211}
 212
 213/**
 214 * idpf_set_channels: set the new channel count
 215 * @netdev: network interface device structure
 216 * @ch: channel information structure
 217 *
 218 * Negotiate a new number of channels with CP. Returns 0 on success, negative
 219 * on failure.
 220 */
 221static int idpf_set_channels(struct net_device *netdev,
 222			     struct ethtool_channels *ch)
 223{
 224	struct idpf_vport_config *vport_config;
 
 225	unsigned int num_req_tx_q;
 226	unsigned int num_req_rx_q;
 227	struct idpf_vport *vport;
 228	u16 num_txq, num_rxq;
 229	struct device *dev;
 230	int err = 0;
 231	u16 idx;
 232
 233	if (ch->rx_count && ch->tx_count) {
 234		netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
 235		return -EINVAL;
 236	}
 237
 238	idpf_vport_ctrl_lock(netdev);
 239	vport = idpf_netdev_to_vport(netdev);
 240
 241	idx = vport->idx;
 242	vport_config = vport->adapter->vport_config[idx];
 243
 244	num_txq = vport_config->user_config.num_req_tx_qs;
 245	num_rxq = vport_config->user_config.num_req_rx_qs;
 246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 247	num_req_tx_q = ch->combined_count + ch->tx_count;
 248	num_req_rx_q = ch->combined_count + ch->rx_count;
 249
 250	dev = &vport->adapter->pdev->dev;
 251	/* It's possible to specify number of queues that exceeds max.
 252	 * Stack checks max combined_count and max [tx|rx]_count but not the
 253	 * max combined_count + [tx|rx]_count. These checks should catch that.
 254	 */
 255	if (num_req_tx_q > vport_config->max_q.max_txq) {
 256		dev_info(dev, "Maximum TX queues is %d\n",
 257			 vport_config->max_q.max_txq);
 258		err = -EINVAL;
 259		goto unlock_mutex;
 260	}
 261	if (num_req_rx_q > vport_config->max_q.max_rxq) {
 262		dev_info(dev, "Maximum RX queues is %d\n",
 263			 vport_config->max_q.max_rxq);
 264		err = -EINVAL;
 265		goto unlock_mutex;
 266	}
 267
 268	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
 269		goto unlock_mutex;
 270
 271	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
 272	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
 273
 274	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
 275	if (err) {
 276		/* roll back queue change */
 277		vport_config->user_config.num_req_tx_qs = num_txq;
 278		vport_config->user_config.num_req_rx_qs = num_rxq;
 279	}
 280
 281unlock_mutex:
 282	idpf_vport_ctrl_unlock(netdev);
 283
 284	return err;
 285}
 286
 287/**
 288 * idpf_get_ringparam - Get ring parameters
 289 * @netdev: network interface device structure
 290 * @ring: ethtool ringparam structure
 291 * @kring: unused
 292 * @ext_ack: unused
 293 *
 294 * Returns current ring parameters. TX and RX rings are reported separately,
 295 * but the number of rings is not reported.
 296 */
 297static void idpf_get_ringparam(struct net_device *netdev,
 298			       struct ethtool_ringparam *ring,
 299			       struct kernel_ethtool_ringparam *kring,
 300			       struct netlink_ext_ack *ext_ack)
 301{
 302	struct idpf_vport *vport;
 303
 304	idpf_vport_ctrl_lock(netdev);
 305	vport = idpf_netdev_to_vport(netdev);
 306
 307	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
 308	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
 309	ring->rx_pending = vport->rxq_desc_count;
 310	ring->tx_pending = vport->txq_desc_count;
 311
 312	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
 313
 314	idpf_vport_ctrl_unlock(netdev);
 315}
 316
 317/**
 318 * idpf_set_ringparam - Set ring parameters
 319 * @netdev: network interface device structure
 320 * @ring: ethtool ringparam structure
 321 * @kring: unused
 322 * @ext_ack: unused
 323 *
 324 * Sets ring parameters. TX and RX rings are controlled separately, but the
 325 * number of rings is not specified, so all rings get the same settings.
 326 */
 327static int idpf_set_ringparam(struct net_device *netdev,
 328			      struct ethtool_ringparam *ring,
 329			      struct kernel_ethtool_ringparam *kring,
 330			      struct netlink_ext_ack *ext_ack)
 331{
 332	struct idpf_vport_user_config_data *config_data;
 333	u32 new_rx_count, new_tx_count;
 334	struct idpf_vport *vport;
 335	int i, err = 0;
 336	u16 idx;
 337
 338	idpf_vport_ctrl_lock(netdev);
 339	vport = idpf_netdev_to_vport(netdev);
 340
 341	idx = vport->idx;
 342
 343	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
 344		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
 345			   ring->tx_pending,
 346			   IDPF_MIN_TXQ_DESC);
 347		err = -EINVAL;
 348		goto unlock_mutex;
 349	}
 350
 351	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
 352		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
 353			   ring->rx_pending,
 354			   IDPF_MIN_RXQ_DESC);
 355		err = -EINVAL;
 356		goto unlock_mutex;
 357	}
 358
 359	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
 360	if (new_rx_count != ring->rx_pending)
 361		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
 362			    new_rx_count);
 363
 364	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
 365	if (new_tx_count != ring->tx_pending)
 366		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
 367			    new_tx_count);
 368
 369	if (new_tx_count == vport->txq_desc_count &&
 370	    new_rx_count == vport->rxq_desc_count &&
 371	    kring->tcp_data_split == idpf_vport_get_hsplit(vport))
 372		goto unlock_mutex;
 373
 374	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
 375		NL_SET_ERR_MSG_MOD(ext_ack,
 376				   "setting TCP data split is not supported");
 377		err = -EOPNOTSUPP;
 378
 379		goto unlock_mutex;
 380	}
 381
 382	config_data = &vport->adapter->vport_config[idx]->user_config;
 383	config_data->num_req_txq_desc = new_tx_count;
 384	config_data->num_req_rxq_desc = new_rx_count;
 385
 386	/* Since we adjusted the RX completion queue count, the RX buffer queue
 387	 * descriptor count needs to be adjusted as well
 388	 */
 389	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
 390		vport->bufq_desc_count[i] =
 391			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
 392						vport->num_bufqs_per_qgrp);
 393
 394	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
 395
 396unlock_mutex:
 397	idpf_vport_ctrl_unlock(netdev);
 398
 399	return err;
 400}
 401
 402/**
 403 * struct idpf_stats - definition for an ethtool statistic
 404 * @stat_string: statistic name to display in ethtool -S output
 405 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
 406 * @stat_offset: offsetof() the stat from a base pointer
 407 *
 408 * This structure defines a statistic to be added to the ethtool stats buffer.
 409 * It defines a statistic as offset from a common base pointer. Stats should
 410 * be defined in constant arrays using the IDPF_STAT macro, with every element
 411 * of the array using the same _type for calculating the sizeof_stat and
 412 * stat_offset.
 413 *
 414 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
 415 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
 416 * the idpf_add_ethtool_stat() helper function.
 417 *
 418 * The @stat_string is interpreted as a format string, allowing formatted
 419 * values to be inserted while looping over multiple structures for a given
 420 * statistics array. Thus, every statistic string in an array should have the
 421 * same type and number of format specifiers, to be formatted by variadic
 422 * arguments to the idpf_add_stat_string() helper function.
 423 */
 424struct idpf_stats {
 425	char stat_string[ETH_GSTRING_LEN];
 426	int sizeof_stat;
 427	int stat_offset;
 428};
 429
 430/* Helper macro to define an idpf_stat structure with proper size and type.
 431 * Use this when defining constant statistics arrays. Note that @_type expects
 432 * only a type name and is used multiple times.
 433 */
 434#define IDPF_STAT(_type, _name, _stat) { \
 435	.stat_string = _name, \
 436	.sizeof_stat = sizeof_field(_type, _stat), \
 437	.stat_offset = offsetof(_type, _stat) \
 438}
 439
 440/* Helper macros for defining some statistics related to queues */
 441#define IDPF_RX_QUEUE_STAT(_name, _stat) \
 442	IDPF_STAT(struct idpf_rx_queue, _name, _stat)
 443#define IDPF_TX_QUEUE_STAT(_name, _stat) \
 444	IDPF_STAT(struct idpf_tx_queue, _name, _stat)
 445
 446/* Stats associated with a Tx queue */
 447static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
 448	IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
 449	IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
 450	IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
 451};
 452
 453/* Stats associated with an Rx queue */
 454static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
 455	IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
 456	IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
 457	IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
 458};
 459
 460#define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
 461#define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
 462
 463#define IDPF_PORT_STAT(_name, _stat) \
 464	IDPF_STAT(struct idpf_vport,  _name, _stat)
 465
 466static const struct idpf_stats idpf_gstrings_port_stats[] = {
 467	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
 468	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
 469	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
 470	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
 471	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
 472	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
 473	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
 474	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
 475	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
 476	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
 477	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
 478	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
 479	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
 480	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
 481	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
 482};
 483
 484#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
 485
 486/**
 487 * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
 488 * @p: ethtool supplied buffer
 489 * @stats: stat definitions array
 490 * @size: size of the stats array
 491 * @type: stat type
 492 * @idx: stat index
 493 *
 494 * Format and copy the strings described by stats into the buffer pointed at
 495 * by p.
 496 */
 497static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
 498				     const unsigned int size, const char *type,
 499				     unsigned int idx)
 500{
 501	unsigned int i;
 502
 503	for (i = 0; i < size; i++)
 504		ethtool_sprintf(p, "%s_q-%u_%s",
 505				type, idx, stats[i].stat_string);
 506}
 507
 508/**
 509 * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
 510 * @p: ethtool supplied buffer
 511 * @stats: stat definitions array
 512 * @type: stat type
 513 * @idx: stat idx
 514 *
 515 * Format and copy the strings described by the const static stats value into
 516 * the buffer pointed at by p.
 517 *
 518 * The parameter @stats is evaluated twice, so parameters with side effects
 519 * should be avoided. Additionally, stats must be an array such that
 520 * ARRAY_SIZE can be called on it.
 521 */
 522#define idpf_add_qstat_strings(p, stats, type, idx) \
 523	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
 524
 525/**
 526 * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
 527 * @p: ethtool buffer
 528 * @stats: struct to copy from
 529 * @size: size of stats array to copy from
 530 */
 531static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
 532				  const unsigned int size)
 533{
 534	unsigned int i;
 535
 536	for (i = 0; i < size; i++)
 537		ethtool_puts(p, stats[i].stat_string);
 538}
 539
 540/**
 541 * idpf_get_stat_strings - Get stat strings
 542 * @netdev: network interface device structure
 543 * @data: buffer for string data
 544 *
 545 * Builds the statistics string table
 546 */
 547static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
 548{
 549	struct idpf_netdev_priv *np = netdev_priv(netdev);
 550	struct idpf_vport_config *vport_config;
 551	unsigned int i;
 552
 553	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
 554			      IDPF_PORT_STATS_LEN);
 555
 556	vport_config = np->adapter->vport_config[np->vport_idx];
 557	/* It's critical that we always report a constant number of strings and
 558	 * that the strings are reported in the same order regardless of how
 559	 * many queues are actually in use.
 560	 */
 561	for (i = 0; i < vport_config->max_q.max_txq; i++)
 562		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
 563				       "tx", i);
 564
 565	for (i = 0; i < vport_config->max_q.max_rxq; i++)
 566		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
 567				       "rx", i);
 
 
 568}
 569
 570/**
 571 * idpf_get_strings - Get string set
 572 * @netdev: network interface device structure
 573 * @sset: id of string set
 574 * @data: buffer for string data
 575 *
 576 * Builds string tables for various string sets
 577 */
 578static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 579{
 580	switch (sset) {
 581	case ETH_SS_STATS:
 582		idpf_get_stat_strings(netdev, data);
 583		break;
 584	default:
 585		break;
 586	}
 587}
 588
 589/**
 590 * idpf_get_sset_count - Get length of string set
 591 * @netdev: network interface device structure
 592 * @sset: id of string set
 593 *
 594 * Reports size of various string tables.
 595 */
 596static int idpf_get_sset_count(struct net_device *netdev, int sset)
 597{
 598	struct idpf_netdev_priv *np = netdev_priv(netdev);
 599	struct idpf_vport_config *vport_config;
 600	u16 max_txq, max_rxq;
 
 601
 602	if (sset != ETH_SS_STATS)
 603		return -EINVAL;
 604
 605	vport_config = np->adapter->vport_config[np->vport_idx];
 606	/* This size reported back here *must* be constant throughout the
 607	 * lifecycle of the netdevice, i.e. we must report the maximum length
 608	 * even for queues that don't technically exist.  This is due to the
 609	 * fact that this userspace API uses three separate ioctl calls to get
 610	 * stats data but has no way to communicate back to userspace when that
 611	 * size has changed, which can typically happen as a result of changing
 612	 * number of queues. If the number/order of stats change in the middle
 613	 * of this call chain it will lead to userspace crashing/accessing bad
 614	 * data through buffer under/overflow.
 615	 */
 616	max_txq = vport_config->max_q.max_txq;
 617	max_rxq = vport_config->max_q.max_rxq;
 618
 619	return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
 620	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
 
 
 
 621}
 622
 623/**
 624 * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
 625 * @data: location to store the stat value
 626 * @pstat: old stat pointer to copy from
 627 * @stat: the stat definition
 628 *
 629 * Copies the stat data defined by the pointer and stat structure pair into
 630 * the memory supplied as data. If the pointer is null, data will be zero'd.
 631 */
 632static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
 633				      const struct idpf_stats *stat)
 634{
 635	char *p;
 636
 637	if (!pstat) {
 638		/* Ensure that the ethtool data buffer is zero'd for any stats
 639		 * which don't have a valid pointer.
 640		 */
 641		*data = 0;
 642		return;
 643	}
 644
 645	p = (char *)pstat + stat->stat_offset;
 646	switch (stat->sizeof_stat) {
 647	case sizeof(u64):
 648		*data = *((u64 *)p);
 649		break;
 650	case sizeof(u32):
 651		*data = *((u32 *)p);
 652		break;
 653	case sizeof(u16):
 654		*data = *((u16 *)p);
 655		break;
 656	case sizeof(u8):
 657		*data = *((u8 *)p);
 658		break;
 659	default:
 660		WARN_ONCE(1, "unexpected stat size for %s",
 661			  stat->stat_string);
 662		*data = 0;
 663	}
 664}
 665
 666/**
 667 * idpf_add_queue_stats - copy queue statistics into supplied buffer
 668 * @data: ethtool stats buffer
 669 * @q: the queue to copy
 670 * @type: type of the queue
 671 *
 672 * Queue statistics must be copied while protected by u64_stats_fetch_begin,
 673 * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
 674 * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
 675 * zero out the queue stat values and update the data pointer. Otherwise
 676 * safely copy the stats from the queue into the supplied buffer and update
 677 * the data pointer when finished.
 678 *
 679 * This function expects to be called while under rcu_read_lock().
 680 */
 681static void idpf_add_queue_stats(u64 **data, const void *q,
 682				 enum virtchnl2_queue_type type)
 683{
 684	const struct u64_stats_sync *stats_sync;
 685	const struct idpf_stats *stats;
 686	unsigned int start;
 687	unsigned int size;
 688	unsigned int i;
 689
 690	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
 691		size = IDPF_RX_QUEUE_STATS_LEN;
 692		stats = idpf_gstrings_rx_queue_stats;
 693		stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
 694	} else {
 695		size = IDPF_TX_QUEUE_STATS_LEN;
 696		stats = idpf_gstrings_tx_queue_stats;
 697		stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
 698	}
 699
 700	/* To avoid invalid statistics values, ensure that we keep retrying
 701	 * the copy until we get a consistent value according to
 702	 * u64_stats_fetch_retry.
 703	 */
 704	do {
 705		start = u64_stats_fetch_begin(stats_sync);
 706		for (i = 0; i < size; i++)
 707			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
 708	} while (u64_stats_fetch_retry(stats_sync, start));
 709
 710	/* Once we successfully copy the stats in, update the data pointer */
 711	*data += size;
 712}
 713
 714/**
 715 * idpf_add_empty_queue_stats - Add stats for a non-existent queue
 716 * @data: pointer to data buffer
 717 * @qtype: type of data queue
 718 *
 719 * We must report a constant length of stats back to userspace regardless of
 720 * how many queues are actually in use because stats collection happens over
 721 * three separate ioctls and there's no way to notify userspace the size
 722 * changed between those calls. This adds empty to data to the stats since we
 723 * don't have a real queue to refer to for this stats slot.
 724 */
 725static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
 726{
 727	unsigned int i;
 728	int stats_len;
 729
 730	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
 731		stats_len = IDPF_RX_QUEUE_STATS_LEN;
 732	else
 733		stats_len = IDPF_TX_QUEUE_STATS_LEN;
 734
 735	for (i = 0; i < stats_len; i++)
 736		(*data)[i] = 0;
 737	*data += stats_len;
 738}
 739
 740/**
 741 * idpf_add_port_stats - Copy port stats into ethtool buffer
 742 * @vport: virtual port struct
 743 * @data: ethtool buffer to copy into
 744 */
 745static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
 746{
 747	unsigned int size = IDPF_PORT_STATS_LEN;
 748	unsigned int start;
 749	unsigned int i;
 750
 751	do {
 752		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
 753		for (i = 0; i < size; i++)
 754			idpf_add_one_ethtool_stat(&(*data)[i], vport,
 755						  &idpf_gstrings_port_stats[i]);
 756	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
 757
 758	*data += size;
 759}
 760
 761/**
 762 * idpf_collect_queue_stats - accumulate various per queue stats
 763 * into port level stats
 764 * @vport: pointer to vport struct
 765 **/
 766static void idpf_collect_queue_stats(struct idpf_vport *vport)
 767{
 768	struct idpf_port_stats *pstats = &vport->port_stats;
 769	int i, j;
 770
 771	/* zero out port stats since they're actually tracked in per
 772	 * queue stats; this is only for reporting
 773	 */
 774	u64_stats_update_begin(&pstats->stats_sync);
 775	u64_stats_set(&pstats->rx_hw_csum_err, 0);
 776	u64_stats_set(&pstats->rx_hsplit, 0);
 777	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
 778	u64_stats_set(&pstats->rx_bad_descs, 0);
 779	u64_stats_set(&pstats->tx_linearize, 0);
 780	u64_stats_set(&pstats->tx_busy, 0);
 781	u64_stats_set(&pstats->tx_drops, 0);
 782	u64_stats_set(&pstats->tx_dma_map_errs, 0);
 783	u64_stats_update_end(&pstats->stats_sync);
 784
 785	for (i = 0; i < vport->num_rxq_grp; i++) {
 786		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
 787		u16 num_rxq;
 788
 789		if (idpf_is_queue_model_split(vport->rxq_model))
 790			num_rxq = rxq_grp->splitq.num_rxq_sets;
 791		else
 792			num_rxq = rxq_grp->singleq.num_rxq;
 793
 794		for (j = 0; j < num_rxq; j++) {
 795			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
 796			struct idpf_rx_queue_stats *stats;
 797			struct idpf_rx_queue *rxq;
 798			unsigned int start;
 799
 800			if (idpf_is_queue_model_split(vport->rxq_model))
 801				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
 802			else
 803				rxq = rxq_grp->singleq.rxqs[j];
 804
 805			if (!rxq)
 806				continue;
 807
 808			do {
 809				start = u64_stats_fetch_begin(&rxq->stats_sync);
 810
 811				stats = &rxq->q_stats;
 812				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
 813				hsplit = u64_stats_read(&stats->hsplit_pkts);
 814				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
 815				bad_descs = u64_stats_read(&stats->bad_descs);
 816			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
 817
 818			u64_stats_update_begin(&pstats->stats_sync);
 819			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
 820			u64_stats_add(&pstats->rx_hsplit, hsplit);
 821			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
 822			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
 823			u64_stats_update_end(&pstats->stats_sync);
 824		}
 825	}
 826
 827	for (i = 0; i < vport->num_txq_grp; i++) {
 828		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 829
 830		for (j = 0; j < txq_grp->num_txq; j++) {
 831			u64 linearize, qbusy, skb_drops, dma_map_errs;
 832			struct idpf_tx_queue *txq = txq_grp->txqs[j];
 833			struct idpf_tx_queue_stats *stats;
 834			unsigned int start;
 835
 836			if (!txq)
 837				continue;
 838
 839			do {
 840				start = u64_stats_fetch_begin(&txq->stats_sync);
 841
 842				stats = &txq->q_stats;
 843				linearize = u64_stats_read(&stats->linearize);
 844				qbusy = u64_stats_read(&stats->q_busy);
 845				skb_drops = u64_stats_read(&stats->skb_drops);
 846				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
 847			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
 848
 849			u64_stats_update_begin(&pstats->stats_sync);
 850			u64_stats_add(&pstats->tx_linearize, linearize);
 851			u64_stats_add(&pstats->tx_busy, qbusy);
 852			u64_stats_add(&pstats->tx_drops, skb_drops);
 853			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
 854			u64_stats_update_end(&pstats->stats_sync);
 855		}
 856	}
 857}
 858
 859/**
 860 * idpf_get_ethtool_stats - report device statistics
 861 * @netdev: network interface device structure
 862 * @stats: ethtool statistics structure
 863 * @data: pointer to data buffer
 864 *
 865 * All statistics are added to the data buffer as an array of u64.
 866 */
 867static void idpf_get_ethtool_stats(struct net_device *netdev,
 868				   struct ethtool_stats __always_unused *stats,
 869				   u64 *data)
 870{
 871	struct idpf_netdev_priv *np = netdev_priv(netdev);
 872	struct idpf_vport_config *vport_config;
 
 873	struct idpf_vport *vport;
 874	unsigned int total = 0;
 875	unsigned int i, j;
 876	bool is_splitq;
 877	u16 qtype;
 878
 879	idpf_vport_ctrl_lock(netdev);
 880	vport = idpf_netdev_to_vport(netdev);
 881
 882	if (np->state != __IDPF_VPORT_UP) {
 883		idpf_vport_ctrl_unlock(netdev);
 884
 885		return;
 886	}
 887
 888	rcu_read_lock();
 889
 890	idpf_collect_queue_stats(vport);
 891	idpf_add_port_stats(vport, &data);
 892
 893	for (i = 0; i < vport->num_txq_grp; i++) {
 894		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 895
 896		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
 897
 898		for (j = 0; j < txq_grp->num_txq; j++, total++) {
 899			struct idpf_tx_queue *txq = txq_grp->txqs[j];
 900
 901			if (!txq)
 902				idpf_add_empty_queue_stats(&data, qtype);
 903			else
 904				idpf_add_queue_stats(&data, txq, qtype);
 905		}
 906	}
 907
 908	vport_config = vport->adapter->vport_config[vport->idx];
 909	/* It is critical we provide a constant number of stats back to
 910	 * userspace regardless of how many queues are actually in use because
 911	 * there is no way to inform userspace the size has changed between
 912	 * ioctl calls. This will fill in any missing stats with zero.
 913	 */
 914	for (; total < vport_config->max_q.max_txq; total++)
 915		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
 916	total = 0;
 917
 918	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
 919
 920	for (i = 0; i < vport->num_rxq_grp; i++) {
 921		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
 922		u16 num_rxq;
 923
 924		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
 925
 926		if (is_splitq)
 927			num_rxq = rxq_grp->splitq.num_rxq_sets;
 928		else
 929			num_rxq = rxq_grp->singleq.num_rxq;
 930
 931		for (j = 0; j < num_rxq; j++, total++) {
 932			struct idpf_rx_queue *rxq;
 933
 934			if (is_splitq)
 935				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
 936			else
 937				rxq = rxq_grp->singleq.rxqs[j];
 938			if (!rxq)
 939				idpf_add_empty_queue_stats(&data, qtype);
 940			else
 941				idpf_add_queue_stats(&data, rxq, qtype);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942		}
 943	}
 944
 945	for (; total < vport_config->max_q.max_rxq; total++)
 946		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
 947
 
 
 948	rcu_read_unlock();
 949
 950	idpf_vport_ctrl_unlock(netdev);
 951}
 952
 953/**
 954 * idpf_find_rxq_vec - find rxq vector from q index
 955 * @vport: virtual port associated to queue
 956 * @q_num: q index used to find queue
 957 *
 958 * returns pointer to rx vector
 959 */
 960static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
 961					       int q_num)
 962{
 963	int q_grp, q_idx;
 964
 965	if (!idpf_is_queue_model_split(vport->rxq_model))
 966		return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
 967
 968	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
 969	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
 970
 971	return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
 972}
 973
 974/**
 975 * idpf_find_txq_vec - find txq vector from q index
 976 * @vport: virtual port associated to queue
 977 * @q_num: q index used to find queue
 978 *
 979 * returns pointer to tx vector
 980 */
 981static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
 982					       int q_num)
 983{
 984	int q_grp;
 985
 986	if (!idpf_is_queue_model_split(vport->txq_model))
 987		return vport->txqs[q_num]->q_vector;
 988
 989	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
 990
 991	return vport->txq_grps[q_grp].complq->q_vector;
 992}
 993
 994/**
 995 * __idpf_get_q_coalesce - get ITR values for specific queue
 996 * @ec: ethtool structure to fill with driver's coalesce settings
 997 * @q_vector: queue vector corresponding to this queue
 998 * @type: queue type
 999 */
1000static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1001				  const struct idpf_q_vector *q_vector,
1002				  enum virtchnl2_queue_type type)
1003{
1004	if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
1005		ec->use_adaptive_rx_coalesce =
1006				IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
1007		ec->rx_coalesce_usecs = q_vector->rx_itr_value;
1008	} else {
1009		ec->use_adaptive_tx_coalesce =
1010				IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
1011		ec->tx_coalesce_usecs = q_vector->tx_itr_value;
1012	}
1013}
1014
1015/**
1016 * idpf_get_q_coalesce - get ITR values for specific queue
1017 * @netdev: pointer to the netdev associated with this query
1018 * @ec: coalesce settings to program the device with
1019 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1020 *
1021 * Return 0 on success, and negative on failure
1022 */
1023static int idpf_get_q_coalesce(struct net_device *netdev,
1024			       struct ethtool_coalesce *ec,
1025			       u32 q_num)
1026{
1027	const struct idpf_netdev_priv *np = netdev_priv(netdev);
1028	const struct idpf_vport *vport;
1029	int err = 0;
1030
1031	idpf_vport_ctrl_lock(netdev);
1032	vport = idpf_netdev_to_vport(netdev);
1033
1034	if (np->state != __IDPF_VPORT_UP)
1035		goto unlock_mutex;
1036
1037	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1038		err = -EINVAL;
1039		goto unlock_mutex;
1040	}
1041
1042	if (q_num < vport->num_rxq)
1043		__idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
1044				      VIRTCHNL2_QUEUE_TYPE_RX);
1045
1046	if (q_num < vport->num_txq)
1047		__idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
1048				      VIRTCHNL2_QUEUE_TYPE_TX);
1049
1050unlock_mutex:
1051	idpf_vport_ctrl_unlock(netdev);
1052
1053	return err;
1054}
1055
1056/**
1057 * idpf_get_coalesce - get ITR values as requested by user
1058 * @netdev: pointer to the netdev associated with this query
1059 * @ec: coalesce settings to be filled
1060 * @kec: unused
1061 * @extack: unused
1062 *
1063 * Return 0 on success, and negative on failure
1064 */
1065static int idpf_get_coalesce(struct net_device *netdev,
1066			     struct ethtool_coalesce *ec,
1067			     struct kernel_ethtool_coalesce *kec,
1068			     struct netlink_ext_ack *extack)
1069{
1070	/* Return coalesce based on queue number zero */
1071	return idpf_get_q_coalesce(netdev, ec, 0);
1072}
1073
1074/**
1075 * idpf_get_per_q_coalesce - get ITR values as requested by user
1076 * @netdev: pointer to the netdev associated with this query
1077 * @q_num: queue for which the itr values has to retrieved
1078 * @ec: coalesce settings to be filled
1079 *
1080 * Return 0 on success, and negative on failure
1081 */
1082
1083static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1084				   struct ethtool_coalesce *ec)
1085{
1086	return idpf_get_q_coalesce(netdev, ec, q_num);
1087}
1088
1089/**
1090 * __idpf_set_q_coalesce - set ITR values for specific queue
1091 * @ec: ethtool structure from user to update ITR settings
1092 * @qv: queue vector for which itr values has to be set
1093 * @is_rxq: is queue type rx
1094 *
1095 * Returns 0 on success, negative otherwise.
1096 */
1097static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
1098				 struct idpf_q_vector *qv, bool is_rxq)
1099{
1100	u32 use_adaptive_coalesce, coalesce_usecs;
 
1101	bool is_dim_ena = false;
1102	u16 itr_val;
1103
1104	if (is_rxq) {
1105		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1106		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1107		coalesce_usecs = ec->rx_coalesce_usecs;
1108		itr_val = qv->rx_itr_value;
1109	} else {
1110		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1111		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1112		coalesce_usecs = ec->tx_coalesce_usecs;
1113		itr_val = qv->tx_itr_value;
1114	}
1115	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1116		netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1117
1118		return -EINVAL;
1119	}
1120
1121	if (is_dim_ena && use_adaptive_coalesce)
1122		return 0;
1123
1124	if (coalesce_usecs > IDPF_ITR_MAX) {
1125		netdev_err(qv->vport->netdev,
1126			   "Invalid value, %d-usecs range is 0-%d\n",
1127			   coalesce_usecs, IDPF_ITR_MAX);
1128
1129		return -EINVAL;
1130	}
1131
1132	if (coalesce_usecs % 2) {
1133		coalesce_usecs--;
1134		netdev_info(qv->vport->netdev,
1135			    "HW only supports even ITR values, ITR rounded to %d\n",
1136			    coalesce_usecs);
1137	}
1138
1139	if (is_rxq) {
1140		qv->rx_itr_value = coalesce_usecs;
1141		if (use_adaptive_coalesce) {
1142			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1143		} else {
1144			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1145			idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
1146						  false);
1147		}
1148	} else {
1149		qv->tx_itr_value = coalesce_usecs;
1150		if (use_adaptive_coalesce) {
1151			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1152		} else {
1153			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1154			idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
1155		}
1156	}
1157
1158	/* Update of static/dynamic itr will be taken care when interrupt is
1159	 * fired
1160	 */
1161	return 0;
1162}
1163
1164/**
1165 * idpf_set_q_coalesce - set ITR values for specific queue
1166 * @vport: vport associated to the queue that need updating
1167 * @ec: coalesce settings to program the device with
1168 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1169 * @is_rxq: is queue type rx
1170 *
1171 * Return 0 on success, and negative on failure
1172 */
1173static int idpf_set_q_coalesce(const struct idpf_vport *vport,
1174			       const struct ethtool_coalesce *ec,
1175			       int q_num, bool is_rxq)
1176{
1177	struct idpf_q_vector *qv;
1178
1179	qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
1180		      idpf_find_txq_vec(vport, q_num);
1181
1182	if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
1183		return -EINVAL;
1184
1185	return 0;
1186}
1187
1188/**
1189 * idpf_set_coalesce - set ITR values as requested by user
1190 * @netdev: pointer to the netdev associated with this query
1191 * @ec: coalesce settings to program the device with
1192 * @kec: unused
1193 * @extack: unused
1194 *
1195 * Return 0 on success, and negative on failure
1196 */
1197static int idpf_set_coalesce(struct net_device *netdev,
1198			     struct ethtool_coalesce *ec,
1199			     struct kernel_ethtool_coalesce *kec,
1200			     struct netlink_ext_ack *extack)
1201{
1202	struct idpf_netdev_priv *np = netdev_priv(netdev);
1203	struct idpf_vport *vport;
1204	int i, err = 0;
1205
1206	idpf_vport_ctrl_lock(netdev);
1207	vport = idpf_netdev_to_vport(netdev);
1208
1209	if (np->state != __IDPF_VPORT_UP)
1210		goto unlock_mutex;
1211
1212	for (i = 0; i < vport->num_txq; i++) {
1213		err = idpf_set_q_coalesce(vport, ec, i, false);
1214		if (err)
1215			goto unlock_mutex;
1216	}
1217
1218	for (i = 0; i < vport->num_rxq; i++) {
1219		err = idpf_set_q_coalesce(vport, ec, i, true);
1220		if (err)
1221			goto unlock_mutex;
1222	}
1223
1224unlock_mutex:
1225	idpf_vport_ctrl_unlock(netdev);
1226
1227	return err;
1228}
1229
1230/**
1231 * idpf_set_per_q_coalesce - set ITR values as requested by user
1232 * @netdev: pointer to the netdev associated with this query
1233 * @q_num: queue for which the itr values has to be set
1234 * @ec: coalesce settings to program the device with
1235 *
1236 * Return 0 on success, and negative on failure
1237 */
1238static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1239				   struct ethtool_coalesce *ec)
1240{
1241	struct idpf_vport *vport;
1242	int err;
1243
1244	idpf_vport_ctrl_lock(netdev);
1245	vport = idpf_netdev_to_vport(netdev);
1246
1247	err = idpf_set_q_coalesce(vport, ec, q_num, false);
1248	if (err) {
1249		idpf_vport_ctrl_unlock(netdev);
1250
1251		return err;
1252	}
1253
1254	err = idpf_set_q_coalesce(vport, ec, q_num, true);
1255
1256	idpf_vport_ctrl_unlock(netdev);
1257
1258	return err;
1259}
1260
1261/**
1262 * idpf_get_msglevel - Get debug message level
1263 * @netdev: network interface device structure
1264 *
1265 * Returns current debug message level.
1266 */
1267static u32 idpf_get_msglevel(struct net_device *netdev)
1268{
1269	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1270
1271	return adapter->msg_enable;
1272}
1273
1274/**
1275 * idpf_set_msglevel - Set debug message level
1276 * @netdev: network interface device structure
1277 * @data: message level
1278 *
1279 * Set current debug message level. Higher values cause the driver to
1280 * be noisier.
1281 */
1282static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1283{
1284	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1285
1286	adapter->msg_enable = data;
1287}
1288
1289/**
1290 * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1291 * @netdev: network interface device structure
1292 * @cmd: ethtool command
1293 *
1294 * Reports speed/duplex settings.
1295 **/
1296static int idpf_get_link_ksettings(struct net_device *netdev,
1297				   struct ethtool_link_ksettings *cmd)
1298{
1299	struct idpf_netdev_priv *np = netdev_priv(netdev);
 
 
 
1300
1301	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1302	cmd->base.autoneg = AUTONEG_DISABLE;
1303	cmd->base.port = PORT_NONE;
1304	if (netif_carrier_ok(netdev)) {
1305		cmd->base.duplex = DUPLEX_FULL;
1306		cmd->base.speed = np->link_speed_mbps;
1307	} else {
1308		cmd->base.duplex = DUPLEX_UNKNOWN;
1309		cmd->base.speed = SPEED_UNKNOWN;
1310	}
 
 
1311
1312	return 0;
1313}
1314
1315static const struct ethtool_ops idpf_ethtool_ops = {
1316	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1317				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1318	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1319	.get_msglevel		= idpf_get_msglevel,
1320	.set_msglevel		= idpf_set_msglevel,
1321	.get_link		= ethtool_op_get_link,
1322	.get_coalesce		= idpf_get_coalesce,
1323	.set_coalesce		= idpf_set_coalesce,
1324	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1325	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1326	.get_ethtool_stats	= idpf_get_ethtool_stats,
1327	.get_strings		= idpf_get_strings,
1328	.get_sset_count		= idpf_get_sset_count,
1329	.get_channels		= idpf_get_channels,
1330	.get_rxnfc		= idpf_get_rxnfc,
1331	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1332	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1333	.get_rxfh		= idpf_get_rxfh,
1334	.set_rxfh		= idpf_set_rxfh,
1335	.set_channels		= idpf_set_channels,
1336	.get_ringparam		= idpf_get_ringparam,
1337	.set_ringparam		= idpf_set_ringparam,
1338	.get_link_ksettings	= idpf_get_link_ksettings,
1339};
1340
1341/**
1342 * idpf_set_ethtool_ops - Initialize ethtool ops struct
1343 * @netdev: network interface device structure
1344 *
1345 * Sets ethtool ops struct in our netdev so that ethtool can call
1346 * our functions.
1347 */
1348void idpf_set_ethtool_ops(struct net_device *netdev)
1349{
1350	netdev->ethtool_ops = &idpf_ethtool_ops;
1351}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2023 Intel Corporation */
   3
   4#include "idpf.h"
   5
   6/**
   7 * idpf_get_rxnfc - command to get RX flow classification rules
   8 * @netdev: network interface device structure
   9 * @cmd: ethtool rxnfc command
  10 * @rule_locs: pointer to store rule locations
  11 *
  12 * Returns Success if the command is supported.
  13 */
  14static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
  15			  u32 __always_unused *rule_locs)
  16{
  17	struct idpf_vport *vport;
  18
  19	idpf_vport_ctrl_lock(netdev);
  20	vport = idpf_netdev_to_vport(netdev);
  21
  22	switch (cmd->cmd) {
  23	case ETHTOOL_GRXRINGS:
  24		cmd->data = vport->num_rxq;
  25		idpf_vport_ctrl_unlock(netdev);
  26
  27		return 0;
  28	default:
  29		break;
  30	}
  31
  32	idpf_vport_ctrl_unlock(netdev);
  33
  34	return -EOPNOTSUPP;
  35}
  36
  37/**
  38 * idpf_get_rxfh_key_size - get the RSS hash key size
  39 * @netdev: network interface device structure
  40 *
  41 * Returns the key size on success, error value on failure.
  42 */
  43static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
  44{
  45	struct idpf_netdev_priv *np = netdev_priv(netdev);
  46	struct idpf_vport_user_config_data *user_config;
  47
  48	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
  49		return -EOPNOTSUPP;
  50
  51	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
  52
  53	return user_config->rss_data.rss_key_size;
  54}
  55
  56/**
  57 * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
  58 * @netdev: network interface device structure
  59 *
  60 * Returns the table size on success, error value on failure.
  61 */
  62static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
  63{
  64	struct idpf_netdev_priv *np = netdev_priv(netdev);
  65	struct idpf_vport_user_config_data *user_config;
  66
  67	if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
  68		return -EOPNOTSUPP;
  69
  70	user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
  71
  72	return user_config->rss_data.rss_lut_size;
  73}
  74
  75/**
  76 * idpf_get_rxfh - get the rx flow hash indirection table
  77 * @netdev: network interface device structure
  78 * @rxfh: pointer to param struct (indir, key, hfunc)
  79 *
  80 * Reads the indirection table directly from the hardware. Always returns 0.
  81 */
  82static int idpf_get_rxfh(struct net_device *netdev,
  83			 struct ethtool_rxfh_param *rxfh)
  84{
  85	struct idpf_netdev_priv *np = netdev_priv(netdev);
  86	struct idpf_rss_data *rss_data;
  87	struct idpf_adapter *adapter;
  88	int err = 0;
  89	u16 i;
  90
  91	idpf_vport_ctrl_lock(netdev);
  92
  93	adapter = np->adapter;
  94
  95	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
  96		err = -EOPNOTSUPP;
  97		goto unlock_mutex;
  98	}
  99
 100	rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
 101	if (np->state != __IDPF_VPORT_UP)
 102		goto unlock_mutex;
 103
 104	rxfh->hfunc = ETH_RSS_HASH_TOP;
 105
 106	if (rxfh->key)
 107		memcpy(rxfh->key, rss_data->rss_key, rss_data->rss_key_size);
 108
 109	if (rxfh->indir) {
 110		for (i = 0; i < rss_data->rss_lut_size; i++)
 111			rxfh->indir[i] = rss_data->rss_lut[i];
 112	}
 113
 114unlock_mutex:
 115	idpf_vport_ctrl_unlock(netdev);
 116
 117	return err;
 118}
 119
 120/**
 121 * idpf_set_rxfh - set the rx flow hash indirection table
 122 * @netdev: network interface device structure
 123 * @rxfh: pointer to param struct (indir, key, hfunc)
 124 * @extack: extended ACK from the Netlink message
 125 *
 126 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
 127 * returns 0 after programming the table.
 128 */
 129static int idpf_set_rxfh(struct net_device *netdev,
 130			 struct ethtool_rxfh_param *rxfh,
 131			 struct netlink_ext_ack *extack)
 132{
 133	struct idpf_netdev_priv *np = netdev_priv(netdev);
 134	struct idpf_rss_data *rss_data;
 135	struct idpf_adapter *adapter;
 136	struct idpf_vport *vport;
 137	int err = 0;
 138	u16 lut;
 139
 140	idpf_vport_ctrl_lock(netdev);
 141	vport = idpf_netdev_to_vport(netdev);
 142
 143	adapter = vport->adapter;
 144
 145	if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
 146		err = -EOPNOTSUPP;
 147		goto unlock_mutex;
 148	}
 149
 150	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
 151	if (np->state != __IDPF_VPORT_UP)
 152		goto unlock_mutex;
 153
 154	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
 155	    rxfh->hfunc != ETH_RSS_HASH_TOP) {
 156		err = -EOPNOTSUPP;
 157		goto unlock_mutex;
 158	}
 159
 160	if (rxfh->key)
 161		memcpy(rss_data->rss_key, rxfh->key, rss_data->rss_key_size);
 162
 163	if (rxfh->indir) {
 164		for (lut = 0; lut < rss_data->rss_lut_size; lut++)
 165			rss_data->rss_lut[lut] = rxfh->indir[lut];
 166	}
 167
 168	err = idpf_config_rss(vport);
 169
 170unlock_mutex:
 171	idpf_vport_ctrl_unlock(netdev);
 172
 173	return err;
 174}
 175
 176/**
 177 * idpf_get_channels: get the number of channels supported by the device
 178 * @netdev: network interface device structure
 179 * @ch: channel information structure
 180 *
 181 * Report maximum of TX and RX. Report one extra channel to match our MailBox
 182 * Queue.
 183 */
 184static void idpf_get_channels(struct net_device *netdev,
 185			      struct ethtool_channels *ch)
 186{
 187	struct idpf_netdev_priv *np = netdev_priv(netdev);
 188	struct idpf_vport_config *vport_config;
 189	u16 num_txq, num_rxq;
 190	u16 combined;
 191
 192	vport_config = np->adapter->vport_config[np->vport_idx];
 193
 194	num_txq = vport_config->user_config.num_req_tx_qs;
 195	num_rxq = vport_config->user_config.num_req_rx_qs;
 196
 197	combined = min(num_txq, num_rxq);
 198
 199	/* Report maximum channels */
 200	ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
 201				 vport_config->max_q.max_rxq);
 202	ch->max_rx = vport_config->max_q.max_rxq;
 203	ch->max_tx = vport_config->max_q.max_txq;
 204
 205	ch->max_other = IDPF_MAX_MBXQ;
 206	ch->other_count = IDPF_MAX_MBXQ;
 207
 208	ch->combined_count = combined;
 209	ch->rx_count = num_rxq - combined;
 210	ch->tx_count = num_txq - combined;
 211}
 212
 213/**
 214 * idpf_set_channels: set the new channel count
 215 * @netdev: network interface device structure
 216 * @ch: channel information structure
 217 *
 218 * Negotiate a new number of channels with CP. Returns 0 on success, negative
 219 * on failure.
 220 */
 221static int idpf_set_channels(struct net_device *netdev,
 222			     struct ethtool_channels *ch)
 223{
 224	struct idpf_vport_config *vport_config;
 225	u16 combined, num_txq, num_rxq;
 226	unsigned int num_req_tx_q;
 227	unsigned int num_req_rx_q;
 228	struct idpf_vport *vport;
 
 229	struct device *dev;
 230	int err = 0;
 231	u16 idx;
 232
 
 
 
 
 
 233	idpf_vport_ctrl_lock(netdev);
 234	vport = idpf_netdev_to_vport(netdev);
 235
 236	idx = vport->idx;
 237	vport_config = vport->adapter->vport_config[idx];
 238
 239	num_txq = vport_config->user_config.num_req_tx_qs;
 240	num_rxq = vport_config->user_config.num_req_rx_qs;
 241
 242	combined = min(num_txq, num_rxq);
 243
 244	/* these checks are for cases where user didn't specify a particular
 245	 * value on cmd line but we get non-zero value anyway via
 246	 * get_channels(); look at ethtool.c in ethtool repository (the user
 247	 * space part), particularly, do_schannels() routine
 248	 */
 249	if (ch->combined_count == combined)
 250		ch->combined_count = 0;
 251	if (ch->combined_count && ch->rx_count == num_rxq - combined)
 252		ch->rx_count = 0;
 253	if (ch->combined_count && ch->tx_count == num_txq - combined)
 254		ch->tx_count = 0;
 255
 256	num_req_tx_q = ch->combined_count + ch->tx_count;
 257	num_req_rx_q = ch->combined_count + ch->rx_count;
 258
 259	dev = &vport->adapter->pdev->dev;
 260	/* It's possible to specify number of queues that exceeds max.
 261	 * Stack checks max combined_count and max [tx|rx]_count but not the
 262	 * max combined_count + [tx|rx]_count. These checks should catch that.
 263	 */
 264	if (num_req_tx_q > vport_config->max_q.max_txq) {
 265		dev_info(dev, "Maximum TX queues is %d\n",
 266			 vport_config->max_q.max_txq);
 267		err = -EINVAL;
 268		goto unlock_mutex;
 269	}
 270	if (num_req_rx_q > vport_config->max_q.max_rxq) {
 271		dev_info(dev, "Maximum RX queues is %d\n",
 272			 vport_config->max_q.max_rxq);
 273		err = -EINVAL;
 274		goto unlock_mutex;
 275	}
 276
 277	if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
 278		goto unlock_mutex;
 279
 280	vport_config->user_config.num_req_tx_qs = num_req_tx_q;
 281	vport_config->user_config.num_req_rx_qs = num_req_rx_q;
 282
 283	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
 284	if (err) {
 285		/* roll back queue change */
 286		vport_config->user_config.num_req_tx_qs = num_txq;
 287		vport_config->user_config.num_req_rx_qs = num_rxq;
 288	}
 289
 290unlock_mutex:
 291	idpf_vport_ctrl_unlock(netdev);
 292
 293	return err;
 294}
 295
 296/**
 297 * idpf_get_ringparam - Get ring parameters
 298 * @netdev: network interface device structure
 299 * @ring: ethtool ringparam structure
 300 * @kring: unused
 301 * @ext_ack: unused
 302 *
 303 * Returns current ring parameters. TX and RX rings are reported separately,
 304 * but the number of rings is not reported.
 305 */
 306static void idpf_get_ringparam(struct net_device *netdev,
 307			       struct ethtool_ringparam *ring,
 308			       struct kernel_ethtool_ringparam *kring,
 309			       struct netlink_ext_ack *ext_ack)
 310{
 311	struct idpf_vport *vport;
 312
 313	idpf_vport_ctrl_lock(netdev);
 314	vport = idpf_netdev_to_vport(netdev);
 315
 316	ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
 317	ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
 318	ring->rx_pending = vport->rxq_desc_count;
 319	ring->tx_pending = vport->txq_desc_count;
 320
 321	kring->tcp_data_split = idpf_vport_get_hsplit(vport);
 322
 323	idpf_vport_ctrl_unlock(netdev);
 324}
 325
 326/**
 327 * idpf_set_ringparam - Set ring parameters
 328 * @netdev: network interface device structure
 329 * @ring: ethtool ringparam structure
 330 * @kring: unused
 331 * @ext_ack: unused
 332 *
 333 * Sets ring parameters. TX and RX rings are controlled separately, but the
 334 * number of rings is not specified, so all rings get the same settings.
 335 */
 336static int idpf_set_ringparam(struct net_device *netdev,
 337			      struct ethtool_ringparam *ring,
 338			      struct kernel_ethtool_ringparam *kring,
 339			      struct netlink_ext_ack *ext_ack)
 340{
 341	struct idpf_vport_user_config_data *config_data;
 342	u32 new_rx_count, new_tx_count;
 343	struct idpf_vport *vport;
 344	int i, err = 0;
 345	u16 idx;
 346
 347	idpf_vport_ctrl_lock(netdev);
 348	vport = idpf_netdev_to_vport(netdev);
 349
 350	idx = vport->idx;
 351
 352	if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
 353		netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
 354			   ring->tx_pending,
 355			   IDPF_MIN_TXQ_DESC);
 356		err = -EINVAL;
 357		goto unlock_mutex;
 358	}
 359
 360	if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
 361		netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
 362			   ring->rx_pending,
 363			   IDPF_MIN_RXQ_DESC);
 364		err = -EINVAL;
 365		goto unlock_mutex;
 366	}
 367
 368	new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
 369	if (new_rx_count != ring->rx_pending)
 370		netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
 371			    new_rx_count);
 372
 373	new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
 374	if (new_tx_count != ring->tx_pending)
 375		netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
 376			    new_tx_count);
 377
 378	if (new_tx_count == vport->txq_desc_count &&
 379	    new_rx_count == vport->rxq_desc_count)
 
 380		goto unlock_mutex;
 381
 382	if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
 383		NL_SET_ERR_MSG_MOD(ext_ack,
 384				   "setting TCP data split is not supported");
 385		err = -EOPNOTSUPP;
 386
 387		goto unlock_mutex;
 388	}
 389
 390	config_data = &vport->adapter->vport_config[idx]->user_config;
 391	config_data->num_req_txq_desc = new_tx_count;
 392	config_data->num_req_rxq_desc = new_rx_count;
 393
 394	/* Since we adjusted the RX completion queue count, the RX buffer queue
 395	 * descriptor count needs to be adjusted as well
 396	 */
 397	for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
 398		vport->bufq_desc_count[i] =
 399			IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
 400						vport->num_bufqs_per_qgrp);
 401
 402	err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
 403
 404unlock_mutex:
 405	idpf_vport_ctrl_unlock(netdev);
 406
 407	return err;
 408}
 409
 410/**
 411 * struct idpf_stats - definition for an ethtool statistic
 412 * @stat_string: statistic name to display in ethtool -S output
 413 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
 414 * @stat_offset: offsetof() the stat from a base pointer
 415 *
 416 * This structure defines a statistic to be added to the ethtool stats buffer.
 417 * It defines a statistic as offset from a common base pointer. Stats should
 418 * be defined in constant arrays using the IDPF_STAT macro, with every element
 419 * of the array using the same _type for calculating the sizeof_stat and
 420 * stat_offset.
 421 *
 422 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
 423 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
 424 * the idpf_add_ethtool_stat() helper function.
 425 *
 426 * The @stat_string is interpreted as a format string, allowing formatted
 427 * values to be inserted while looping over multiple structures for a given
 428 * statistics array. Thus, every statistic string in an array should have the
 429 * same type and number of format specifiers, to be formatted by variadic
 430 * arguments to the idpf_add_stat_string() helper function.
 431 */
 432struct idpf_stats {
 433	char stat_string[ETH_GSTRING_LEN];
 434	int sizeof_stat;
 435	int stat_offset;
 436};
 437
 438/* Helper macro to define an idpf_stat structure with proper size and type.
 439 * Use this when defining constant statistics arrays. Note that @_type expects
 440 * only a type name and is used multiple times.
 441 */
 442#define IDPF_STAT(_type, _name, _stat) { \
 443	.stat_string = _name, \
 444	.sizeof_stat = sizeof_field(_type, _stat), \
 445	.stat_offset = offsetof(_type, _stat) \
 446}
 447
 448/* Helper macro for defining some statistics related to queues */
 449#define IDPF_QUEUE_STAT(_name, _stat) \
 450	IDPF_STAT(struct idpf_queue, _name, _stat)
 
 
 451
 452/* Stats associated with a Tx queue */
 453static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
 454	IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
 455	IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
 456	IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
 457};
 458
 459/* Stats associated with an Rx queue */
 460static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
 461	IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
 462	IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
 463	IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
 464};
 465
 466#define IDPF_TX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
 467#define IDPF_RX_QUEUE_STATS_LEN		ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
 468
 469#define IDPF_PORT_STAT(_name, _stat) \
 470	IDPF_STAT(struct idpf_vport,  _name, _stat)
 471
 472static const struct idpf_stats idpf_gstrings_port_stats[] = {
 473	IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
 474	IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
 475	IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
 476	IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
 477	IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
 478	IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
 479	IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
 480	IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
 481	IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
 482	IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
 483	IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
 484	IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
 485	IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
 486	IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
 487	IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
 488};
 489
 490#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
 491
 492/**
 493 * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
 494 * @p: ethtool supplied buffer
 495 * @stats: stat definitions array
 496 * @size: size of the stats array
 497 * @type: stat type
 498 * @idx: stat index
 499 *
 500 * Format and copy the strings described by stats into the buffer pointed at
 501 * by p.
 502 */
 503static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
 504				     const unsigned int size, const char *type,
 505				     unsigned int idx)
 506{
 507	unsigned int i;
 508
 509	for (i = 0; i < size; i++)
 510		ethtool_sprintf(p, "%s_q-%u_%s",
 511				type, idx, stats[i].stat_string);
 512}
 513
 514/**
 515 * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
 516 * @p: ethtool supplied buffer
 517 * @stats: stat definitions array
 518 * @type: stat type
 519 * @idx: stat idx
 520 *
 521 * Format and copy the strings described by the const static stats value into
 522 * the buffer pointed at by p.
 523 *
 524 * The parameter @stats is evaluated twice, so parameters with side effects
 525 * should be avoided. Additionally, stats must be an array such that
 526 * ARRAY_SIZE can be called on it.
 527 */
 528#define idpf_add_qstat_strings(p, stats, type, idx) \
 529	__idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
 530
 531/**
 532 * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
 533 * @p: ethtool buffer
 534 * @stats: struct to copy from
 535 * @size: size of stats array to copy from
 536 */
 537static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
 538				  const unsigned int size)
 539{
 540	unsigned int i;
 541
 542	for (i = 0; i < size; i++)
 543		ethtool_puts(p, stats[i].stat_string);
 544}
 545
 546/**
 547 * idpf_get_stat_strings - Get stat strings
 548 * @netdev: network interface device structure
 549 * @data: buffer for string data
 550 *
 551 * Builds the statistics string table
 552 */
 553static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
 554{
 555	struct idpf_netdev_priv *np = netdev_priv(netdev);
 556	struct idpf_vport_config *vport_config;
 557	unsigned int i;
 558
 559	idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
 560			      IDPF_PORT_STATS_LEN);
 561
 562	vport_config = np->adapter->vport_config[np->vport_idx];
 563	/* It's critical that we always report a constant number of strings and
 564	 * that the strings are reported in the same order regardless of how
 565	 * many queues are actually in use.
 566	 */
 567	for (i = 0; i < vport_config->max_q.max_txq; i++)
 568		idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
 569				       "tx", i);
 570
 571	for (i = 0; i < vport_config->max_q.max_rxq; i++)
 572		idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
 573				       "rx", i);
 574
 575	page_pool_ethtool_stats_get_strings(data);
 576}
 577
 578/**
 579 * idpf_get_strings - Get string set
 580 * @netdev: network interface device structure
 581 * @sset: id of string set
 582 * @data: buffer for string data
 583 *
 584 * Builds string tables for various string sets
 585 */
 586static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 587{
 588	switch (sset) {
 589	case ETH_SS_STATS:
 590		idpf_get_stat_strings(netdev, data);
 591		break;
 592	default:
 593		break;
 594	}
 595}
 596
 597/**
 598 * idpf_get_sset_count - Get length of string set
 599 * @netdev: network interface device structure
 600 * @sset: id of string set
 601 *
 602 * Reports size of various string tables.
 603 */
 604static int idpf_get_sset_count(struct net_device *netdev, int sset)
 605{
 606	struct idpf_netdev_priv *np = netdev_priv(netdev);
 607	struct idpf_vport_config *vport_config;
 608	u16 max_txq, max_rxq;
 609	unsigned int size;
 610
 611	if (sset != ETH_SS_STATS)
 612		return -EINVAL;
 613
 614	vport_config = np->adapter->vport_config[np->vport_idx];
 615	/* This size reported back here *must* be constant throughout the
 616	 * lifecycle of the netdevice, i.e. we must report the maximum length
 617	 * even for queues that don't technically exist.  This is due to the
 618	 * fact that this userspace API uses three separate ioctl calls to get
 619	 * stats data but has no way to communicate back to userspace when that
 620	 * size has changed, which can typically happen as a result of changing
 621	 * number of queues. If the number/order of stats change in the middle
 622	 * of this call chain it will lead to userspace crashing/accessing bad
 623	 * data through buffer under/overflow.
 624	 */
 625	max_txq = vport_config->max_q.max_txq;
 626	max_rxq = vport_config->max_q.max_rxq;
 627
 628	size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
 629	       (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
 630	size += page_pool_ethtool_stats_get_count();
 631
 632	return size;
 633}
 634
 635/**
 636 * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
 637 * @data: location to store the stat value
 638 * @pstat: old stat pointer to copy from
 639 * @stat: the stat definition
 640 *
 641 * Copies the stat data defined by the pointer and stat structure pair into
 642 * the memory supplied as data. If the pointer is null, data will be zero'd.
 643 */
 644static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
 645				      const struct idpf_stats *stat)
 646{
 647	char *p;
 648
 649	if (!pstat) {
 650		/* Ensure that the ethtool data buffer is zero'd for any stats
 651		 * which don't have a valid pointer.
 652		 */
 653		*data = 0;
 654		return;
 655	}
 656
 657	p = (char *)pstat + stat->stat_offset;
 658	switch (stat->sizeof_stat) {
 659	case sizeof(u64):
 660		*data = *((u64 *)p);
 661		break;
 662	case sizeof(u32):
 663		*data = *((u32 *)p);
 664		break;
 665	case sizeof(u16):
 666		*data = *((u16 *)p);
 667		break;
 668	case sizeof(u8):
 669		*data = *((u8 *)p);
 670		break;
 671	default:
 672		WARN_ONCE(1, "unexpected stat size for %s",
 673			  stat->stat_string);
 674		*data = 0;
 675	}
 676}
 677
 678/**
 679 * idpf_add_queue_stats - copy queue statistics into supplied buffer
 680 * @data: ethtool stats buffer
 681 * @q: the queue to copy
 
 682 *
 683 * Queue statistics must be copied while protected by u64_stats_fetch_begin,
 684 * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
 685 * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
 686 * zero out the queue stat values and update the data pointer. Otherwise
 687 * safely copy the stats from the queue into the supplied buffer and update
 688 * the data pointer when finished.
 689 *
 690 * This function expects to be called while under rcu_read_lock().
 691 */
 692static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
 
 693{
 
 694	const struct idpf_stats *stats;
 695	unsigned int start;
 696	unsigned int size;
 697	unsigned int i;
 698
 699	if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
 700		size = IDPF_RX_QUEUE_STATS_LEN;
 701		stats = idpf_gstrings_rx_queue_stats;
 
 702	} else {
 703		size = IDPF_TX_QUEUE_STATS_LEN;
 704		stats = idpf_gstrings_tx_queue_stats;
 
 705	}
 706
 707	/* To avoid invalid statistics values, ensure that we keep retrying
 708	 * the copy until we get a consistent value according to
 709	 * u64_stats_fetch_retry.
 710	 */
 711	do {
 712		start = u64_stats_fetch_begin(&q->stats_sync);
 713		for (i = 0; i < size; i++)
 714			idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
 715	} while (u64_stats_fetch_retry(&q->stats_sync, start));
 716
 717	/* Once we successfully copy the stats in, update the data pointer */
 718	*data += size;
 719}
 720
 721/**
 722 * idpf_add_empty_queue_stats - Add stats for a non-existent queue
 723 * @data: pointer to data buffer
 724 * @qtype: type of data queue
 725 *
 726 * We must report a constant length of stats back to userspace regardless of
 727 * how many queues are actually in use because stats collection happens over
 728 * three separate ioctls and there's no way to notify userspace the size
 729 * changed between those calls. This adds empty to data to the stats since we
 730 * don't have a real queue to refer to for this stats slot.
 731 */
 732static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
 733{
 734	unsigned int i;
 735	int stats_len;
 736
 737	if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
 738		stats_len = IDPF_RX_QUEUE_STATS_LEN;
 739	else
 740		stats_len = IDPF_TX_QUEUE_STATS_LEN;
 741
 742	for (i = 0; i < stats_len; i++)
 743		(*data)[i] = 0;
 744	*data += stats_len;
 745}
 746
 747/**
 748 * idpf_add_port_stats - Copy port stats into ethtool buffer
 749 * @vport: virtual port struct
 750 * @data: ethtool buffer to copy into
 751 */
 752static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
 753{
 754	unsigned int size = IDPF_PORT_STATS_LEN;
 755	unsigned int start;
 756	unsigned int i;
 757
 758	do {
 759		start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
 760		for (i = 0; i < size; i++)
 761			idpf_add_one_ethtool_stat(&(*data)[i], vport,
 762						  &idpf_gstrings_port_stats[i]);
 763	} while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
 764
 765	*data += size;
 766}
 767
 768/**
 769 * idpf_collect_queue_stats - accumulate various per queue stats
 770 * into port level stats
 771 * @vport: pointer to vport struct
 772 **/
 773static void idpf_collect_queue_stats(struct idpf_vport *vport)
 774{
 775	struct idpf_port_stats *pstats = &vport->port_stats;
 776	int i, j;
 777
 778	/* zero out port stats since they're actually tracked in per
 779	 * queue stats; this is only for reporting
 780	 */
 781	u64_stats_update_begin(&pstats->stats_sync);
 782	u64_stats_set(&pstats->rx_hw_csum_err, 0);
 783	u64_stats_set(&pstats->rx_hsplit, 0);
 784	u64_stats_set(&pstats->rx_hsplit_hbo, 0);
 785	u64_stats_set(&pstats->rx_bad_descs, 0);
 786	u64_stats_set(&pstats->tx_linearize, 0);
 787	u64_stats_set(&pstats->tx_busy, 0);
 788	u64_stats_set(&pstats->tx_drops, 0);
 789	u64_stats_set(&pstats->tx_dma_map_errs, 0);
 790	u64_stats_update_end(&pstats->stats_sync);
 791
 792	for (i = 0; i < vport->num_rxq_grp; i++) {
 793		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
 794		u16 num_rxq;
 795
 796		if (idpf_is_queue_model_split(vport->rxq_model))
 797			num_rxq = rxq_grp->splitq.num_rxq_sets;
 798		else
 799			num_rxq = rxq_grp->singleq.num_rxq;
 800
 801		for (j = 0; j < num_rxq; j++) {
 802			u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
 803			struct idpf_rx_queue_stats *stats;
 804			struct idpf_queue *rxq;
 805			unsigned int start;
 806
 807			if (idpf_is_queue_model_split(vport->rxq_model))
 808				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
 809			else
 810				rxq = rxq_grp->singleq.rxqs[j];
 811
 812			if (!rxq)
 813				continue;
 814
 815			do {
 816				start = u64_stats_fetch_begin(&rxq->stats_sync);
 817
 818				stats = &rxq->q_stats.rx;
 819				hw_csum_err = u64_stats_read(&stats->hw_csum_err);
 820				hsplit = u64_stats_read(&stats->hsplit_pkts);
 821				hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
 822				bad_descs = u64_stats_read(&stats->bad_descs);
 823			} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
 824
 825			u64_stats_update_begin(&pstats->stats_sync);
 826			u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
 827			u64_stats_add(&pstats->rx_hsplit, hsplit);
 828			u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
 829			u64_stats_add(&pstats->rx_bad_descs, bad_descs);
 830			u64_stats_update_end(&pstats->stats_sync);
 831		}
 832	}
 833
 834	for (i = 0; i < vport->num_txq_grp; i++) {
 835		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 836
 837		for (j = 0; j < txq_grp->num_txq; j++) {
 838			u64 linearize, qbusy, skb_drops, dma_map_errs;
 839			struct idpf_queue *txq = txq_grp->txqs[j];
 840			struct idpf_tx_queue_stats *stats;
 841			unsigned int start;
 842
 843			if (!txq)
 844				continue;
 845
 846			do {
 847				start = u64_stats_fetch_begin(&txq->stats_sync);
 848
 849				stats = &txq->q_stats.tx;
 850				linearize = u64_stats_read(&stats->linearize);
 851				qbusy = u64_stats_read(&stats->q_busy);
 852				skb_drops = u64_stats_read(&stats->skb_drops);
 853				dma_map_errs = u64_stats_read(&stats->dma_map_errs);
 854			} while (u64_stats_fetch_retry(&txq->stats_sync, start));
 855
 856			u64_stats_update_begin(&pstats->stats_sync);
 857			u64_stats_add(&pstats->tx_linearize, linearize);
 858			u64_stats_add(&pstats->tx_busy, qbusy);
 859			u64_stats_add(&pstats->tx_drops, skb_drops);
 860			u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
 861			u64_stats_update_end(&pstats->stats_sync);
 862		}
 863	}
 864}
 865
 866/**
 867 * idpf_get_ethtool_stats - report device statistics
 868 * @netdev: network interface device structure
 869 * @stats: ethtool statistics structure
 870 * @data: pointer to data buffer
 871 *
 872 * All statistics are added to the data buffer as an array of u64.
 873 */
 874static void idpf_get_ethtool_stats(struct net_device *netdev,
 875				   struct ethtool_stats __always_unused *stats,
 876				   u64 *data)
 877{
 878	struct idpf_netdev_priv *np = netdev_priv(netdev);
 879	struct idpf_vport_config *vport_config;
 880	struct page_pool_stats pp_stats = { };
 881	struct idpf_vport *vport;
 882	unsigned int total = 0;
 883	unsigned int i, j;
 884	bool is_splitq;
 885	u16 qtype;
 886
 887	idpf_vport_ctrl_lock(netdev);
 888	vport = idpf_netdev_to_vport(netdev);
 889
 890	if (np->state != __IDPF_VPORT_UP) {
 891		idpf_vport_ctrl_unlock(netdev);
 892
 893		return;
 894	}
 895
 896	rcu_read_lock();
 897
 898	idpf_collect_queue_stats(vport);
 899	idpf_add_port_stats(vport, &data);
 900
 901	for (i = 0; i < vport->num_txq_grp; i++) {
 902		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 903
 904		qtype = VIRTCHNL2_QUEUE_TYPE_TX;
 905
 906		for (j = 0; j < txq_grp->num_txq; j++, total++) {
 907			struct idpf_queue *txq = txq_grp->txqs[j];
 908
 909			if (!txq)
 910				idpf_add_empty_queue_stats(&data, qtype);
 911			else
 912				idpf_add_queue_stats(&data, txq);
 913		}
 914	}
 915
 916	vport_config = vport->adapter->vport_config[vport->idx];
 917	/* It is critical we provide a constant number of stats back to
 918	 * userspace regardless of how many queues are actually in use because
 919	 * there is no way to inform userspace the size has changed between
 920	 * ioctl calls. This will fill in any missing stats with zero.
 921	 */
 922	for (; total < vport_config->max_q.max_txq; total++)
 923		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
 924	total = 0;
 925
 926	is_splitq = idpf_is_queue_model_split(vport->rxq_model);
 927
 928	for (i = 0; i < vport->num_rxq_grp; i++) {
 929		struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
 930		u16 num_rxq;
 931
 932		qtype = VIRTCHNL2_QUEUE_TYPE_RX;
 933
 934		if (is_splitq)
 935			num_rxq = rxq_grp->splitq.num_rxq_sets;
 936		else
 937			num_rxq = rxq_grp->singleq.num_rxq;
 938
 939		for (j = 0; j < num_rxq; j++, total++) {
 940			struct idpf_queue *rxq;
 941
 942			if (is_splitq)
 943				rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
 944			else
 945				rxq = rxq_grp->singleq.rxqs[j];
 946			if (!rxq)
 947				idpf_add_empty_queue_stats(&data, qtype);
 948			else
 949				idpf_add_queue_stats(&data, rxq);
 950
 951			/* In splitq mode, don't get page pool stats here since
 952			 * the pools are attached to the buffer queues
 953			 */
 954			if (is_splitq)
 955				continue;
 956
 957			if (rxq)
 958				page_pool_get_stats(rxq->pp, &pp_stats);
 959		}
 960	}
 961
 962	for (i = 0; i < vport->num_rxq_grp; i++) {
 963		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 964			struct idpf_queue *rxbufq =
 965				&vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
 966
 967			page_pool_get_stats(rxbufq->pp, &pp_stats);
 968		}
 969	}
 970
 971	for (; total < vport_config->max_q.max_rxq; total++)
 972		idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
 973
 974	page_pool_ethtool_stats_get(data, &pp_stats);
 975
 976	rcu_read_unlock();
 977
 978	idpf_vport_ctrl_unlock(netdev);
 979}
 980
 981/**
 982 * idpf_find_rxq - find rxq from q index
 983 * @vport: virtual port associated to queue
 984 * @q_num: q index used to find queue
 985 *
 986 * returns pointer to rx queue
 987 */
 988static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
 
 989{
 990	int q_grp, q_idx;
 991
 992	if (!idpf_is_queue_model_split(vport->rxq_model))
 993		return vport->rxq_grps->singleq.rxqs[q_num];
 994
 995	q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
 996	q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
 997
 998	return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
 999}
1000
1001/**
1002 * idpf_find_txq - find txq from q index
1003 * @vport: virtual port associated to queue
1004 * @q_num: q index used to find queue
1005 *
1006 * returns pointer to tx queue
1007 */
1008static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
 
1009{
1010	int q_grp;
1011
1012	if (!idpf_is_queue_model_split(vport->txq_model))
1013		return vport->txqs[q_num];
1014
1015	q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1016
1017	return vport->txq_grps[q_grp].complq;
1018}
1019
1020/**
1021 * __idpf_get_q_coalesce - get ITR values for specific queue
1022 * @ec: ethtool structure to fill with driver's coalesce settings
1023 * @q: quuee of Rx or Tx
 
1024 */
1025static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
1026				  struct idpf_queue *q)
 
1027{
1028	if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
1029		ec->use_adaptive_rx_coalesce =
1030				IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
1031		ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
1032	} else {
1033		ec->use_adaptive_tx_coalesce =
1034				IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
1035		ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
1036	}
1037}
1038
1039/**
1040 * idpf_get_q_coalesce - get ITR values for specific queue
1041 * @netdev: pointer to the netdev associated with this query
1042 * @ec: coalesce settings to program the device with
1043 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1044 *
1045 * Return 0 on success, and negative on failure
1046 */
1047static int idpf_get_q_coalesce(struct net_device *netdev,
1048			       struct ethtool_coalesce *ec,
1049			       u32 q_num)
1050{
1051	struct idpf_netdev_priv *np = netdev_priv(netdev);
1052	struct idpf_vport *vport;
1053	int err = 0;
1054
1055	idpf_vport_ctrl_lock(netdev);
1056	vport = idpf_netdev_to_vport(netdev);
1057
1058	if (np->state != __IDPF_VPORT_UP)
1059		goto unlock_mutex;
1060
1061	if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
1062		err = -EINVAL;
1063		goto unlock_mutex;
1064	}
1065
1066	if (q_num < vport->num_rxq)
1067		__idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
 
1068
1069	if (q_num < vport->num_txq)
1070		__idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
 
1071
1072unlock_mutex:
1073	idpf_vport_ctrl_unlock(netdev);
1074
1075	return err;
1076}
1077
1078/**
1079 * idpf_get_coalesce - get ITR values as requested by user
1080 * @netdev: pointer to the netdev associated with this query
1081 * @ec: coalesce settings to be filled
1082 * @kec: unused
1083 * @extack: unused
1084 *
1085 * Return 0 on success, and negative on failure
1086 */
1087static int idpf_get_coalesce(struct net_device *netdev,
1088			     struct ethtool_coalesce *ec,
1089			     struct kernel_ethtool_coalesce *kec,
1090			     struct netlink_ext_ack *extack)
1091{
1092	/* Return coalesce based on queue number zero */
1093	return idpf_get_q_coalesce(netdev, ec, 0);
1094}
1095
1096/**
1097 * idpf_get_per_q_coalesce - get ITR values as requested by user
1098 * @netdev: pointer to the netdev associated with this query
1099 * @q_num: queue for which the itr values has to retrieved
1100 * @ec: coalesce settings to be filled
1101 *
1102 * Return 0 on success, and negative on failure
1103 */
1104
1105static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
1106				   struct ethtool_coalesce *ec)
1107{
1108	return idpf_get_q_coalesce(netdev, ec, q_num);
1109}
1110
1111/**
1112 * __idpf_set_q_coalesce - set ITR values for specific queue
1113 * @ec: ethtool structure from user to update ITR settings
1114 * @q: queue for which itr values has to be set
1115 * @is_rxq: is queue type rx
1116 *
1117 * Returns 0 on success, negative otherwise.
1118 */
1119static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
1120				 struct idpf_queue *q, bool is_rxq)
1121{
1122	u32 use_adaptive_coalesce, coalesce_usecs;
1123	struct idpf_q_vector *qv = q->q_vector;
1124	bool is_dim_ena = false;
1125	u16 itr_val;
1126
1127	if (is_rxq) {
1128		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
1129		use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
1130		coalesce_usecs = ec->rx_coalesce_usecs;
1131		itr_val = qv->rx_itr_value;
1132	} else {
1133		is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
1134		use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
1135		coalesce_usecs = ec->tx_coalesce_usecs;
1136		itr_val = qv->tx_itr_value;
1137	}
1138	if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
1139		netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
1140
1141		return -EINVAL;
1142	}
1143
1144	if (is_dim_ena && use_adaptive_coalesce)
1145		return 0;
1146
1147	if (coalesce_usecs > IDPF_ITR_MAX) {
1148		netdev_err(q->vport->netdev,
1149			   "Invalid value, %d-usecs range is 0-%d\n",
1150			   coalesce_usecs, IDPF_ITR_MAX);
1151
1152		return -EINVAL;
1153	}
1154
1155	if (coalesce_usecs % 2) {
1156		coalesce_usecs--;
1157		netdev_info(q->vport->netdev,
1158			    "HW only supports even ITR values, ITR rounded to %d\n",
1159			    coalesce_usecs);
1160	}
1161
1162	if (is_rxq) {
1163		qv->rx_itr_value = coalesce_usecs;
1164		if (use_adaptive_coalesce) {
1165			qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
1166		} else {
1167			qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
1168			idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
1169						  false);
1170		}
1171	} else {
1172		qv->tx_itr_value = coalesce_usecs;
1173		if (use_adaptive_coalesce) {
1174			qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
1175		} else {
1176			qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
1177			idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
1178		}
1179	}
1180
1181	/* Update of static/dynamic itr will be taken care when interrupt is
1182	 * fired
1183	 */
1184	return 0;
1185}
1186
1187/**
1188 * idpf_set_q_coalesce - set ITR values for specific queue
1189 * @vport: vport associated to the queue that need updating
1190 * @ec: coalesce settings to program the device with
1191 * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
1192 * @is_rxq: is queue type rx
1193 *
1194 * Return 0 on success, and negative on failure
1195 */
1196static int idpf_set_q_coalesce(struct idpf_vport *vport,
1197			       struct ethtool_coalesce *ec,
1198			       int q_num, bool is_rxq)
1199{
1200	struct idpf_queue *q;
1201
1202	q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
 
1203
1204	if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
1205		return -EINVAL;
1206
1207	return 0;
1208}
1209
1210/**
1211 * idpf_set_coalesce - set ITR values as requested by user
1212 * @netdev: pointer to the netdev associated with this query
1213 * @ec: coalesce settings to program the device with
1214 * @kec: unused
1215 * @extack: unused
1216 *
1217 * Return 0 on success, and negative on failure
1218 */
1219static int idpf_set_coalesce(struct net_device *netdev,
1220			     struct ethtool_coalesce *ec,
1221			     struct kernel_ethtool_coalesce *kec,
1222			     struct netlink_ext_ack *extack)
1223{
1224	struct idpf_netdev_priv *np = netdev_priv(netdev);
1225	struct idpf_vport *vport;
1226	int i, err = 0;
1227
1228	idpf_vport_ctrl_lock(netdev);
1229	vport = idpf_netdev_to_vport(netdev);
1230
1231	if (np->state != __IDPF_VPORT_UP)
1232		goto unlock_mutex;
1233
1234	for (i = 0; i < vport->num_txq; i++) {
1235		err = idpf_set_q_coalesce(vport, ec, i, false);
1236		if (err)
1237			goto unlock_mutex;
1238	}
1239
1240	for (i = 0; i < vport->num_rxq; i++) {
1241		err = idpf_set_q_coalesce(vport, ec, i, true);
1242		if (err)
1243			goto unlock_mutex;
1244	}
1245
1246unlock_mutex:
1247	idpf_vport_ctrl_unlock(netdev);
1248
1249	return err;
1250}
1251
1252/**
1253 * idpf_set_per_q_coalesce - set ITR values as requested by user
1254 * @netdev: pointer to the netdev associated with this query
1255 * @q_num: queue for which the itr values has to be set
1256 * @ec: coalesce settings to program the device with
1257 *
1258 * Return 0 on success, and negative on failure
1259 */
1260static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
1261				   struct ethtool_coalesce *ec)
1262{
1263	struct idpf_vport *vport;
1264	int err;
1265
1266	idpf_vport_ctrl_lock(netdev);
1267	vport = idpf_netdev_to_vport(netdev);
1268
1269	err = idpf_set_q_coalesce(vport, ec, q_num, false);
1270	if (err) {
1271		idpf_vport_ctrl_unlock(netdev);
1272
1273		return err;
1274	}
1275
1276	err = idpf_set_q_coalesce(vport, ec, q_num, true);
1277
1278	idpf_vport_ctrl_unlock(netdev);
1279
1280	return err;
1281}
1282
1283/**
1284 * idpf_get_msglevel - Get debug message level
1285 * @netdev: network interface device structure
1286 *
1287 * Returns current debug message level.
1288 */
1289static u32 idpf_get_msglevel(struct net_device *netdev)
1290{
1291	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1292
1293	return adapter->msg_enable;
1294}
1295
1296/**
1297 * idpf_set_msglevel - Set debug message level
1298 * @netdev: network interface device structure
1299 * @data: message level
1300 *
1301 * Set current debug message level. Higher values cause the driver to
1302 * be noisier.
1303 */
1304static void idpf_set_msglevel(struct net_device *netdev, u32 data)
1305{
1306	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
1307
1308	adapter->msg_enable = data;
1309}
1310
1311/**
1312 * idpf_get_link_ksettings - Get Link Speed and Duplex settings
1313 * @netdev: network interface device structure
1314 * @cmd: ethtool command
1315 *
1316 * Reports speed/duplex settings.
1317 **/
1318static int idpf_get_link_ksettings(struct net_device *netdev,
1319				   struct ethtool_link_ksettings *cmd)
1320{
1321	struct idpf_vport *vport;
1322
1323	idpf_vport_ctrl_lock(netdev);
1324	vport = idpf_netdev_to_vport(netdev);
1325
1326	ethtool_link_ksettings_zero_link_mode(cmd, supported);
1327	cmd->base.autoneg = AUTONEG_DISABLE;
1328	cmd->base.port = PORT_NONE;
1329	if (vport->link_up) {
1330		cmd->base.duplex = DUPLEX_FULL;
1331		cmd->base.speed = vport->link_speed_mbps;
1332	} else {
1333		cmd->base.duplex = DUPLEX_UNKNOWN;
1334		cmd->base.speed = SPEED_UNKNOWN;
1335	}
1336
1337	idpf_vport_ctrl_unlock(netdev);
1338
1339	return 0;
1340}
1341
1342static const struct ethtool_ops idpf_ethtool_ops = {
1343	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1344				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1345	.supported_ring_params	= ETHTOOL_RING_USE_TCP_DATA_SPLIT,
1346	.get_msglevel		= idpf_get_msglevel,
1347	.set_msglevel		= idpf_set_msglevel,
1348	.get_link		= ethtool_op_get_link,
1349	.get_coalesce		= idpf_get_coalesce,
1350	.set_coalesce		= idpf_set_coalesce,
1351	.get_per_queue_coalesce = idpf_get_per_q_coalesce,
1352	.set_per_queue_coalesce = idpf_set_per_q_coalesce,
1353	.get_ethtool_stats	= idpf_get_ethtool_stats,
1354	.get_strings		= idpf_get_strings,
1355	.get_sset_count		= idpf_get_sset_count,
1356	.get_channels		= idpf_get_channels,
1357	.get_rxnfc		= idpf_get_rxnfc,
1358	.get_rxfh_key_size	= idpf_get_rxfh_key_size,
1359	.get_rxfh_indir_size	= idpf_get_rxfh_indir_size,
1360	.get_rxfh		= idpf_get_rxfh,
1361	.set_rxfh		= idpf_set_rxfh,
1362	.set_channels		= idpf_set_channels,
1363	.get_ringparam		= idpf_get_ringparam,
1364	.set_ringparam		= idpf_set_ringparam,
1365	.get_link_ksettings	= idpf_get_link_ksettings,
1366};
1367
1368/**
1369 * idpf_set_ethtool_ops - Initialize ethtool ops struct
1370 * @netdev: network interface device structure
1371 *
1372 * Sets ethtool ops struct in our netdev so that ethtool can call
1373 * our functions.
1374 */
1375void idpf_set_ethtool_ops(struct net_device *netdev)
1376{
1377	netdev->ethtool_ops = &idpf_ethtool_ops;
1378}