Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include <linux/bitfield.h>
   5#include <linux/uaccess.h>
   6
   7/* ethtool support for iavf */
   8#include "iavf.h"
   9
 
 
  10/* ethtool statistics helpers */
  11
  12/**
  13 * struct iavf_stats - definition for an ethtool statistic
  14 * @stat_string: statistic name to display in ethtool -S output
  15 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
  16 * @stat_offset: offsetof() the stat from a base pointer
  17 *
  18 * This structure defines a statistic to be added to the ethtool stats buffer.
  19 * It defines a statistic as offset from a common base pointer. Stats should
  20 * be defined in constant arrays using the IAVF_STAT macro, with every element
  21 * of the array using the same _type for calculating the sizeof_stat and
  22 * stat_offset.
  23 *
  24 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
  25 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
  26 * the iavf_add_ethtool_stat() helper function.
  27 *
  28 * The @stat_string is interpreted as a format string, allowing formatted
  29 * values to be inserted while looping over multiple structures for a given
  30 * statistics array. Thus, every statistic string in an array should have the
  31 * same type and number of format specifiers, to be formatted by variadic
  32 * arguments to the iavf_add_stat_string() helper function.
  33 **/
  34struct iavf_stats {
  35	char stat_string[ETH_GSTRING_LEN];
  36	int sizeof_stat;
  37	int stat_offset;
  38};
  39
  40/* Helper macro to define an iavf_stat structure with proper size and type.
  41 * Use this when defining constant statistics arrays. Note that @_type expects
  42 * only a type name and is used multiple times.
  43 */
  44#define IAVF_STAT(_type, _name, _stat) { \
  45	.stat_string = _name, \
  46	.sizeof_stat = sizeof_field(_type, _stat), \
  47	.stat_offset = offsetof(_type, _stat) \
  48}
  49
  50/* Helper macro for defining some statistics related to queues */
  51#define IAVF_QUEUE_STAT(_name, _stat) \
  52	IAVF_STAT(struct iavf_ring, _name, _stat)
  53
  54/* Stats associated with a Tx or Rx ring */
  55static const struct iavf_stats iavf_gstrings_queue_stats[] = {
  56	IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
  57	IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
  58};
  59
  60/**
  61 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
  62 * @data: location to store the stat value
  63 * @pointer: basis for where to copy from
  64 * @stat: the stat definition
  65 *
  66 * Copies the stat data defined by the pointer and stat structure pair into
  67 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
  68 * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
  69 */
  70static void
  71iavf_add_one_ethtool_stat(u64 *data, void *pointer,
  72			  const struct iavf_stats *stat)
  73{
  74	char *p;
  75
  76	if (!pointer) {
  77		/* ensure that the ethtool data buffer is zero'd for any stats
  78		 * which don't have a valid pointer.
  79		 */
  80		*data = 0;
  81		return;
  82	}
  83
  84	p = (char *)pointer + stat->stat_offset;
  85	switch (stat->sizeof_stat) {
  86	case sizeof(u64):
  87		*data = *((u64 *)p);
  88		break;
  89	case sizeof(u32):
  90		*data = *((u32 *)p);
  91		break;
  92	case sizeof(u16):
  93		*data = *((u16 *)p);
  94		break;
  95	case sizeof(u8):
  96		*data = *((u8 *)p);
  97		break;
  98	default:
  99		WARN_ONCE(1, "unexpected stat size for %s",
 100			  stat->stat_string);
 101		*data = 0;
 102	}
 103}
 104
 105/**
 106 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
 107 * @data: ethtool stats buffer
 108 * @pointer: location to copy stats from
 109 * @stats: array of stats to copy
 110 * @size: the size of the stats definition
 111 *
 112 * Copy the stats defined by the stats array using the pointer as a base into
 113 * the data buffer supplied by ethtool. Updates the data pointer to point to
 114 * the next empty location for successive calls to __iavf_add_ethtool_stats.
 115 * If pointer is null, set the data values to zero and update the pointer to
 116 * skip these stats.
 117 **/
 118static void
 119__iavf_add_ethtool_stats(u64 **data, void *pointer,
 120			 const struct iavf_stats stats[],
 121			 const unsigned int size)
 122{
 123	unsigned int i;
 124
 125	for (i = 0; i < size; i++)
 126		iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
 127}
 128
 129/**
 130 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
 131 * @data: ethtool stats buffer
 132 * @pointer: location where stats are stored
 133 * @stats: static const array of stat definitions
 134 *
 135 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
 136 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
 137 * ensuring that we pass the size associated with the given stats array.
 138 *
 139 * The parameter @stats is evaluated twice, so parameters with side effects
 140 * should be avoided.
 141 **/
 142#define iavf_add_ethtool_stats(data, pointer, stats) \
 143	__iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
 144
 145/**
 146 * iavf_add_queue_stats - copy queue statistics into supplied buffer
 147 * @data: ethtool stats buffer
 148 * @ring: the ring to copy
 149 *
 150 * Queue statistics must be copied while protected by
 151 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
 152 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
 153 * ring pointer is null, zero out the queue stat values and update the data
 154 * pointer. Otherwise safely copy the stats from the ring into the supplied
 155 * buffer and update the data pointer when finished.
 156 *
 157 * This function expects to be called while under rcu_read_lock().
 158 **/
 159static void
 160iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
 161{
 162	const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
 163	const struct iavf_stats *stats = iavf_gstrings_queue_stats;
 164	unsigned int start;
 165	unsigned int i;
 166
 167	/* To avoid invalid statistics values, ensure that we keep retrying
 168	 * the copy until we get a consistent value according to
 169	 * u64_stats_fetch_retry. But first, make sure our ring is
 170	 * non-null before attempting to access its syncp.
 171	 */
 172	do {
 173		start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
 174		for (i = 0; i < size; i++)
 175			iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
 176	} while (ring && u64_stats_fetch_retry(&ring->syncp, start));
 177
 178	/* Once we successfully copy the stats in, update the data pointer */
 179	*data += size;
 180}
 181
 182/**
 183 * __iavf_add_stat_strings - copy stat strings into ethtool buffer
 184 * @p: ethtool supplied buffer
 185 * @stats: stat definitions array
 186 * @size: size of the stats array
 187 *
 188 * Format and copy the strings described by stats into the buffer pointed at
 189 * by p.
 190 **/
 191static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
 192				    const unsigned int size, ...)
 193{
 194	unsigned int i;
 195
 196	for (i = 0; i < size; i++) {
 197		va_list args;
 198
 199		va_start(args, size);
 200		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
 201		*p += ETH_GSTRING_LEN;
 202		va_end(args);
 203	}
 204}
 205
 206/**
 207 * iavf_add_stat_strings - copy stat strings into ethtool buffer
 208 * @p: ethtool supplied buffer
 209 * @stats: stat definitions array
 210 *
 211 * Format and copy the strings described by the const static stats value into
 212 * the buffer pointed at by p.
 213 *
 214 * The parameter @stats is evaluated twice, so parameters with side effects
 215 * should be avoided. Additionally, stats must be an array such that
 216 * ARRAY_SIZE can be called on it.
 217 **/
 218#define iavf_add_stat_strings(p, stats, ...) \
 219	__iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
 220
 221#define VF_STAT(_name, _stat) \
 222	IAVF_STAT(struct iavf_adapter, _name, _stat)
 223
 224static const struct iavf_stats iavf_gstrings_stats[] = {
 225	VF_STAT("rx_bytes", current_stats.rx_bytes),
 226	VF_STAT("rx_unicast", current_stats.rx_unicast),
 227	VF_STAT("rx_multicast", current_stats.rx_multicast),
 228	VF_STAT("rx_broadcast", current_stats.rx_broadcast),
 229	VF_STAT("rx_discards", current_stats.rx_discards),
 230	VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
 231	VF_STAT("tx_bytes", current_stats.tx_bytes),
 232	VF_STAT("tx_unicast", current_stats.tx_unicast),
 233	VF_STAT("tx_multicast", current_stats.tx_multicast),
 234	VF_STAT("tx_broadcast", current_stats.tx_broadcast),
 235	VF_STAT("tx_discards", current_stats.tx_discards),
 236	VF_STAT("tx_errors", current_stats.tx_errors),
 237};
 238
 239#define IAVF_STATS_LEN	ARRAY_SIZE(iavf_gstrings_stats)
 240
 241#define IAVF_QUEUE_STATS_LEN	ARRAY_SIZE(iavf_gstrings_queue_stats)
 242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243/**
 244 * iavf_get_link_ksettings - Get Link Speed and Duplex settings
 245 * @netdev: network interface device structure
 246 * @cmd: ethtool command
 247 *
 248 * Reports speed/duplex settings. Because this is a VF, we don't know what
 249 * kind of link we really have, so we fake it.
 250 **/
 251static int iavf_get_link_ksettings(struct net_device *netdev,
 252				   struct ethtool_link_ksettings *cmd)
 253{
 254	struct iavf_adapter *adapter = netdev_priv(netdev);
 255
 256	ethtool_link_ksettings_zero_link_mode(cmd, supported);
 257	cmd->base.autoneg = AUTONEG_DISABLE;
 258	cmd->base.port = PORT_NONE;
 259	cmd->base.duplex = DUPLEX_FULL;
 260
 261	if (ADV_LINK_SUPPORT(adapter)) {
 262		if (adapter->link_speed_mbps &&
 263		    adapter->link_speed_mbps < U32_MAX)
 264			cmd->base.speed = adapter->link_speed_mbps;
 265		else
 266			cmd->base.speed = SPEED_UNKNOWN;
 267
 268		return 0;
 269	}
 270
 271	switch (adapter->link_speed) {
 272	case VIRTCHNL_LINK_SPEED_40GB:
 273		cmd->base.speed = SPEED_40000;
 274		break;
 275	case VIRTCHNL_LINK_SPEED_25GB:
 276		cmd->base.speed = SPEED_25000;
 277		break;
 278	case VIRTCHNL_LINK_SPEED_20GB:
 279		cmd->base.speed = SPEED_20000;
 280		break;
 281	case VIRTCHNL_LINK_SPEED_10GB:
 282		cmd->base.speed = SPEED_10000;
 283		break;
 284	case VIRTCHNL_LINK_SPEED_5GB:
 285		cmd->base.speed = SPEED_5000;
 286		break;
 287	case VIRTCHNL_LINK_SPEED_2_5GB:
 288		cmd->base.speed = SPEED_2500;
 289		break;
 290	case VIRTCHNL_LINK_SPEED_1GB:
 291		cmd->base.speed = SPEED_1000;
 292		break;
 293	case VIRTCHNL_LINK_SPEED_100MB:
 294		cmd->base.speed = SPEED_100;
 295		break;
 296	default:
 297		break;
 298	}
 299
 300	return 0;
 301}
 302
 303/**
 304 * iavf_get_sset_count - Get length of string set
 305 * @netdev: network interface device structure
 306 * @sset: id of string set
 307 *
 308 * Reports size of various string tables.
 309 **/
 310static int iavf_get_sset_count(struct net_device *netdev, int sset)
 311{
 312	/* Report the maximum number queues, even if not every queue is
 313	 * currently configured. Since allocation of queues is in pairs,
 314	 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
 315	 * at device creation and never changes.
 316	 */
 317
 318	if (sset == ETH_SS_STATS)
 319		return IAVF_STATS_LEN +
 320			(IAVF_QUEUE_STATS_LEN * 2 *
 321			 netdev->real_num_tx_queues);
 
 
 322	else
 323		return -EINVAL;
 324}
 325
 326/**
 327 * iavf_get_ethtool_stats - report device statistics
 328 * @netdev: network interface device structure
 329 * @stats: ethtool statistics structure
 330 * @data: pointer to data buffer
 331 *
 332 * All statistics are added to the data buffer as an array of u64.
 333 **/
 334static void iavf_get_ethtool_stats(struct net_device *netdev,
 335				   struct ethtool_stats *stats, u64 *data)
 336{
 337	struct iavf_adapter *adapter = netdev_priv(netdev);
 338	unsigned int i;
 339
 340	/* Explicitly request stats refresh */
 341	iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
 342
 343	iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
 344
 345	rcu_read_lock();
 346	/* As num_active_queues describe both tx and rx queues, we can use
 347	 * it to iterate over rings' stats.
 348	 */
 349	for (i = 0; i < adapter->num_active_queues; i++) {
 350		struct iavf_ring *ring;
 351
 352		/* Tx rings stats */
 353		ring = &adapter->tx_rings[i];
 354		iavf_add_queue_stats(&data, ring);
 355
 356		/* Rx rings stats */
 357		ring = &adapter->rx_rings[i];
 358		iavf_add_queue_stats(&data, ring);
 359	}
 360	rcu_read_unlock();
 361}
 362
 363/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 364 * iavf_get_stat_strings - Get stat strings
 365 * @netdev: network interface device structure
 366 * @data: buffer for string data
 367 *
 368 * Builds the statistics string table
 369 **/
 370static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
 371{
 372	unsigned int i;
 373
 374	iavf_add_stat_strings(&data, iavf_gstrings_stats);
 375
 376	/* Queues are always allocated in pairs, so we just use
 377	 * real_num_tx_queues for both Tx and Rx queues.
 378	 */
 379	for (i = 0; i < netdev->real_num_tx_queues; i++) {
 380		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
 381				      "tx", i);
 382		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
 383				      "rx", i);
 384	}
 385}
 386
 387/**
 388 * iavf_get_strings - Get string set
 389 * @netdev: network interface device structure
 390 * @sset: id of string set
 391 * @data: buffer for string data
 392 *
 393 * Builds string tables for various string sets
 394 **/
 395static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 396{
 397	switch (sset) {
 398	case ETH_SS_STATS:
 399		iavf_get_stat_strings(netdev, data);
 400		break;
 
 
 
 401	default:
 402		break;
 403	}
 404}
 405
 406/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407 * iavf_get_msglevel - Get debug message level
 408 * @netdev: network interface device structure
 409 *
 410 * Returns current debug message level.
 411 **/
 412static u32 iavf_get_msglevel(struct net_device *netdev)
 413{
 414	struct iavf_adapter *adapter = netdev_priv(netdev);
 415
 416	return adapter->msg_enable;
 417}
 418
 419/**
 420 * iavf_set_msglevel - Set debug message level
 421 * @netdev: network interface device structure
 422 * @data: message level
 423 *
 424 * Set current debug message level. Higher values cause the driver to
 425 * be noisier.
 426 **/
 427static void iavf_set_msglevel(struct net_device *netdev, u32 data)
 428{
 429	struct iavf_adapter *adapter = netdev_priv(netdev);
 430
 431	if (IAVF_DEBUG_USER & data)
 432		adapter->hw.debug_mask = data;
 433	adapter->msg_enable = data;
 434}
 435
 436/**
 437 * iavf_get_drvinfo - Get driver info
 438 * @netdev: network interface device structure
 439 * @drvinfo: ethool driver info structure
 440 *
 441 * Returns information about the driver and device for display to the user.
 442 **/
 443static void iavf_get_drvinfo(struct net_device *netdev,
 444			     struct ethtool_drvinfo *drvinfo)
 445{
 446	struct iavf_adapter *adapter = netdev_priv(netdev);
 447
 448	strscpy(drvinfo->driver, iavf_driver_name, 32);
 449	strscpy(drvinfo->fw_version, "N/A", 4);
 450	strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
 
 451}
 452
 453/**
 454 * iavf_get_ringparam - Get ring parameters
 455 * @netdev: network interface device structure
 456 * @ring: ethtool ringparam structure
 457 * @kernel_ring: ethtool extenal ringparam structure
 458 * @extack: netlink extended ACK report struct
 459 *
 460 * Returns current ring parameters. TX and RX rings are reported separately,
 461 * but the number of rings is not reported.
 462 **/
 463static void iavf_get_ringparam(struct net_device *netdev,
 464			       struct ethtool_ringparam *ring,
 465			       struct kernel_ethtool_ringparam *kernel_ring,
 466			       struct netlink_ext_ack *extack)
 467{
 468	struct iavf_adapter *adapter = netdev_priv(netdev);
 469
 470	ring->rx_max_pending = IAVF_MAX_RXD;
 471	ring->tx_max_pending = IAVF_MAX_TXD;
 472	ring->rx_pending = adapter->rx_desc_count;
 473	ring->tx_pending = adapter->tx_desc_count;
 474}
 475
 476/**
 477 * iavf_set_ringparam - Set ring parameters
 478 * @netdev: network interface device structure
 479 * @ring: ethtool ringparam structure
 480 * @kernel_ring: ethtool external ringparam structure
 481 * @extack: netlink extended ACK report struct
 482 *
 483 * Sets ring parameters. TX and RX rings are controlled separately, but the
 484 * number of rings is not specified, so all rings get the same settings.
 485 **/
 486static int iavf_set_ringparam(struct net_device *netdev,
 487			      struct ethtool_ringparam *ring,
 488			      struct kernel_ethtool_ringparam *kernel_ring,
 489			      struct netlink_ext_ack *extack)
 490{
 491	struct iavf_adapter *adapter = netdev_priv(netdev);
 492	u32 new_rx_count, new_tx_count;
 493	int ret = 0;
 494
 495	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 496		return -EINVAL;
 497
 498	if (ring->tx_pending > IAVF_MAX_TXD ||
 499	    ring->tx_pending < IAVF_MIN_TXD ||
 500	    ring->rx_pending > IAVF_MAX_RXD ||
 501	    ring->rx_pending < IAVF_MIN_RXD) {
 502		netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
 503			   ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
 504			   IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
 505		return -EINVAL;
 506	}
 507
 508	new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
 509	if (new_tx_count != ring->tx_pending)
 510		netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
 511			    new_tx_count);
 512
 513	new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
 514	if (new_rx_count != ring->rx_pending)
 515		netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
 516			    new_rx_count);
 517
 518	/* if nothing to do return success */
 519	if ((new_tx_count == adapter->tx_desc_count) &&
 520	    (new_rx_count == adapter->rx_desc_count)) {
 521		netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
 522		return 0;
 523	}
 524
 525	if (new_tx_count != adapter->tx_desc_count) {
 526		netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
 527			   adapter->tx_desc_count, new_tx_count);
 528		adapter->tx_desc_count = new_tx_count;
 529	}
 530
 531	if (new_rx_count != adapter->rx_desc_count) {
 532		netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
 533			   adapter->rx_desc_count, new_rx_count);
 534		adapter->rx_desc_count = new_rx_count;
 535	}
 536
 537	if (netif_running(netdev)) {
 538		iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
 539		ret = iavf_wait_for_reset(adapter);
 540		if (ret)
 541			netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset");
 542	}
 543
 544	return ret;
 545}
 546
 547/**
 548 * __iavf_get_coalesce - get per-queue coalesce settings
 549 * @netdev: the netdev to check
 550 * @ec: ethtool coalesce data structure
 551 * @queue: which queue to pick
 552 *
 553 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
 554 * are per queue. If queue is <0 then we default to queue 0 as the
 555 * representative value.
 556 **/
 557static int __iavf_get_coalesce(struct net_device *netdev,
 558			       struct ethtool_coalesce *ec, int queue)
 559{
 560	struct iavf_adapter *adapter = netdev_priv(netdev);
 561	struct iavf_ring *rx_ring, *tx_ring;
 562
 563	/* Rx and Tx usecs per queue value. If user doesn't specify the
 564	 * queue, return queue 0's value to represent.
 565	 */
 566	if (queue < 0)
 567		queue = 0;
 568	else if (queue >= adapter->num_active_queues)
 569		return -EINVAL;
 570
 571	rx_ring = &adapter->rx_rings[queue];
 572	tx_ring = &adapter->tx_rings[queue];
 573
 574	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
 575		ec->use_adaptive_rx_coalesce = 1;
 576
 577	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
 578		ec->use_adaptive_tx_coalesce = 1;
 579
 580	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 581	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 582
 583	return 0;
 584}
 585
 586/**
 587 * iavf_get_coalesce - Get interrupt coalescing settings
 588 * @netdev: network interface device structure
 589 * @ec: ethtool coalesce structure
 590 * @kernel_coal: ethtool CQE mode setting structure
 591 * @extack: extack for reporting error messages
 592 *
 593 * Returns current coalescing settings. This is referred to elsewhere in the
 594 * driver as Interrupt Throttle Rate, as this is how the hardware describes
 595 * this functionality. Note that if per-queue settings have been modified this
 596 * only represents the settings of queue 0.
 597 **/
 598static int iavf_get_coalesce(struct net_device *netdev,
 599			     struct ethtool_coalesce *ec,
 600			     struct kernel_ethtool_coalesce *kernel_coal,
 601			     struct netlink_ext_ack *extack)
 602{
 603	return __iavf_get_coalesce(netdev, ec, -1);
 604}
 605
 606/**
 607 * iavf_get_per_queue_coalesce - get coalesce values for specific queue
 608 * @netdev: netdev to read
 609 * @ec: coalesce settings from ethtool
 610 * @queue: the queue to read
 611 *
 612 * Read specific queue's coalesce settings.
 613 **/
 614static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
 615				       struct ethtool_coalesce *ec)
 616{
 617	return __iavf_get_coalesce(netdev, ec, queue);
 618}
 619
 620/**
 621 * iavf_set_itr_per_queue - set ITR values for specific queue
 622 * @adapter: the VF adapter struct to set values for
 623 * @ec: coalesce settings from ethtool
 624 * @queue: the queue to modify
 625 *
 626 * Change the ITR settings for a specific queue.
 627 **/
 628static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
 629				  struct ethtool_coalesce *ec, int queue)
 630{
 631	struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
 632	struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
 633	struct iavf_q_vector *q_vector;
 634	u16 itr_setting;
 635
 636	itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 637
 638	if (ec->rx_coalesce_usecs != itr_setting &&
 639	    ec->use_adaptive_rx_coalesce) {
 640		netif_info(adapter, drv, adapter->netdev,
 641			   "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
 642		return -EINVAL;
 643	}
 644
 645	itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 646
 647	if (ec->tx_coalesce_usecs != itr_setting &&
 648	    ec->use_adaptive_tx_coalesce) {
 649		netif_info(adapter, drv, adapter->netdev,
 650			   "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
 651		return -EINVAL;
 652	}
 653
 654	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
 655	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
 656
 657	rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
 658	if (!ec->use_adaptive_rx_coalesce)
 659		rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
 660
 661	tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
 662	if (!ec->use_adaptive_tx_coalesce)
 663		tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
 664
 665	q_vector = rx_ring->q_vector;
 666	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
 667
 668	q_vector = tx_ring->q_vector;
 669	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
 670
 671	/* The interrupt handler itself will take care of programming
 672	 * the Tx and Rx ITR values based on the values we have entered
 673	 * into the q_vector, no need to write the values now.
 674	 */
 675	return 0;
 676}
 677
 678/**
 679 * __iavf_set_coalesce - set coalesce settings for particular queue
 680 * @netdev: the netdev to change
 681 * @ec: ethtool coalesce settings
 682 * @queue: the queue to change
 683 *
 684 * Sets the coalesce settings for a particular queue.
 685 **/
 686static int __iavf_set_coalesce(struct net_device *netdev,
 687			       struct ethtool_coalesce *ec, int queue)
 688{
 689	struct iavf_adapter *adapter = netdev_priv(netdev);
 690	int i;
 691
 692	if (ec->rx_coalesce_usecs > IAVF_MAX_ITR) {
 
 
 
 
 693		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
 694		return -EINVAL;
 695	} else if (ec->tx_coalesce_usecs > IAVF_MAX_ITR) {
 
 
 
 
 696		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
 697		return -EINVAL;
 698	}
 699
 700	/* Rx and Tx usecs has per queue value. If user doesn't specify the
 701	 * queue, apply to all queues.
 702	 */
 703	if (queue < 0) {
 704		for (i = 0; i < adapter->num_active_queues; i++)
 705			if (iavf_set_itr_per_queue(adapter, ec, i))
 706				return -EINVAL;
 707	} else if (queue < adapter->num_active_queues) {
 708		if (iavf_set_itr_per_queue(adapter, ec, queue))
 709			return -EINVAL;
 710	} else {
 711		netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
 712			   adapter->num_active_queues - 1);
 713		return -EINVAL;
 714	}
 715
 716	return 0;
 717}
 718
 719/**
 720 * iavf_set_coalesce - Set interrupt coalescing settings
 721 * @netdev: network interface device structure
 722 * @ec: ethtool coalesce structure
 723 * @kernel_coal: ethtool CQE mode setting structure
 724 * @extack: extack for reporting error messages
 725 *
 726 * Change current coalescing settings for every queue.
 727 **/
 728static int iavf_set_coalesce(struct net_device *netdev,
 729			     struct ethtool_coalesce *ec,
 730			     struct kernel_ethtool_coalesce *kernel_coal,
 731			     struct netlink_ext_ack *extack)
 732{
 733	return __iavf_set_coalesce(netdev, ec, -1);
 734}
 735
 736/**
 737 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
 738 * @netdev: the netdev to change
 739 * @ec: ethtool's coalesce settings
 740 * @queue: the queue to modify
 741 *
 742 * Modifies a specific queue's coalesce settings.
 743 */
 744static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
 745				       struct ethtool_coalesce *ec)
 746{
 747	return __iavf_set_coalesce(netdev, ec, queue);
 748}
 749
 750/**
 751 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
 752 * flow type values
 753 * @flow: filter type to be converted
 754 *
 755 * Returns the corresponding ethtool flow type.
 756 */
 757static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
 758{
 759	switch (flow) {
 760	case IAVF_FDIR_FLOW_IPV4_TCP:
 761		return TCP_V4_FLOW;
 762	case IAVF_FDIR_FLOW_IPV4_UDP:
 763		return UDP_V4_FLOW;
 764	case IAVF_FDIR_FLOW_IPV4_SCTP:
 765		return SCTP_V4_FLOW;
 766	case IAVF_FDIR_FLOW_IPV4_AH:
 767		return AH_V4_FLOW;
 768	case IAVF_FDIR_FLOW_IPV4_ESP:
 769		return ESP_V4_FLOW;
 770	case IAVF_FDIR_FLOW_IPV4_OTHER:
 771		return IPV4_USER_FLOW;
 772	case IAVF_FDIR_FLOW_IPV6_TCP:
 773		return TCP_V6_FLOW;
 774	case IAVF_FDIR_FLOW_IPV6_UDP:
 775		return UDP_V6_FLOW;
 776	case IAVF_FDIR_FLOW_IPV6_SCTP:
 777		return SCTP_V6_FLOW;
 778	case IAVF_FDIR_FLOW_IPV6_AH:
 779		return AH_V6_FLOW;
 780	case IAVF_FDIR_FLOW_IPV6_ESP:
 781		return ESP_V6_FLOW;
 782	case IAVF_FDIR_FLOW_IPV6_OTHER:
 783		return IPV6_USER_FLOW;
 784	case IAVF_FDIR_FLOW_NON_IP_L2:
 785		return ETHER_FLOW;
 786	default:
 787		/* 0 is undefined ethtool flow */
 788		return 0;
 789	}
 790}
 791
 792/**
 793 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
 794 * @eth: Ethtool flow type to be converted
 795 *
 796 * Returns flow enum
 797 */
 798static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
 799{
 800	switch (eth) {
 801	case TCP_V4_FLOW:
 802		return IAVF_FDIR_FLOW_IPV4_TCP;
 803	case UDP_V4_FLOW:
 804		return IAVF_FDIR_FLOW_IPV4_UDP;
 805	case SCTP_V4_FLOW:
 806		return IAVF_FDIR_FLOW_IPV4_SCTP;
 807	case AH_V4_FLOW:
 808		return IAVF_FDIR_FLOW_IPV4_AH;
 809	case ESP_V4_FLOW:
 810		return IAVF_FDIR_FLOW_IPV4_ESP;
 811	case IPV4_USER_FLOW:
 812		return IAVF_FDIR_FLOW_IPV4_OTHER;
 813	case TCP_V6_FLOW:
 814		return IAVF_FDIR_FLOW_IPV6_TCP;
 815	case UDP_V6_FLOW:
 816		return IAVF_FDIR_FLOW_IPV6_UDP;
 817	case SCTP_V6_FLOW:
 818		return IAVF_FDIR_FLOW_IPV6_SCTP;
 819	case AH_V6_FLOW:
 820		return IAVF_FDIR_FLOW_IPV6_AH;
 821	case ESP_V6_FLOW:
 822		return IAVF_FDIR_FLOW_IPV6_ESP;
 823	case IPV6_USER_FLOW:
 824		return IAVF_FDIR_FLOW_IPV6_OTHER;
 825	case ETHER_FLOW:
 826		return IAVF_FDIR_FLOW_NON_IP_L2;
 827	default:
 828		return IAVF_FDIR_FLOW_NONE;
 829	}
 830}
 831
 832/**
 833 * iavf_is_mask_valid - check mask field set
 834 * @mask: full mask to check
 835 * @field: field for which mask should be valid
 836 *
 837 * If the mask is fully set return true. If it is not valid for field return
 838 * false.
 839 */
 840static bool iavf_is_mask_valid(u64 mask, u64 field)
 841{
 842	return (mask & field) == field;
 843}
 844
 845/**
 846 * iavf_parse_rx_flow_user_data - deconstruct user-defined data
 847 * @fsp: pointer to ethtool Rx flow specification
 848 * @fltr: pointer to Flow Director filter for userdef data storage
 849 *
 850 * Returns 0 on success, negative error value on failure
 851 */
 852static int
 853iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 854			     struct iavf_fdir_fltr *fltr)
 855{
 856	struct iavf_flex_word *flex;
 857	int i, cnt = 0;
 858
 859	if (!(fsp->flow_type & FLOW_EXT))
 860		return 0;
 861
 862	for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
 863#define IAVF_USERDEF_FLEX_WORD_M	GENMASK(15, 0)
 864#define IAVF_USERDEF_FLEX_OFFS_S	16
 865#define IAVF_USERDEF_FLEX_OFFS_M	GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
 866#define IAVF_USERDEF_FLEX_FLTR_M	GENMASK(31, 0)
 867		u32 value = be32_to_cpu(fsp->h_ext.data[i]);
 868		u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
 869
 870		if (!value || !mask)
 871			continue;
 872
 873		if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
 874			return -EINVAL;
 875
 876		/* 504 is the maximum value for offsets, and offset is measured
 877		 * from the start of the MAC address.
 878		 */
 879#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
 880		flex = &fltr->flex_words[cnt++];
 881		flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
 882		flex->offset = FIELD_GET(IAVF_USERDEF_FLEX_OFFS_M, value);
 
 883		if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
 884			return -EINVAL;
 885	}
 886
 887	fltr->flex_cnt = cnt;
 888
 889	return 0;
 890}
 891
 892/**
 893 * iavf_fill_rx_flow_ext_data - fill the additional data
 894 * @fsp: pointer to ethtool Rx flow specification
 895 * @fltr: pointer to Flow Director filter to get additional data
 896 */
 897static void
 898iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
 899			   struct iavf_fdir_fltr *fltr)
 900{
 901	if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
 902		return;
 903
 904	fsp->flow_type |= FLOW_EXT;
 905
 906	memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
 907	memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
 908}
 909
 910/**
 911 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
 912 * @adapter: the VF adapter structure that contains filter list
 913 * @cmd: ethtool command data structure to receive the filter data
 914 *
 915 * Returns 0 as expected for success by ethtool
 916 */
 917static int
 918iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
 919			    struct ethtool_rxnfc *cmd)
 920{
 921	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 922	struct iavf_fdir_fltr *rule = NULL;
 923	int ret = 0;
 924
 925	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
 926		return -EOPNOTSUPP;
 927
 928	spin_lock_bh(&adapter->fdir_fltr_lock);
 929
 930	rule = iavf_find_fdir_fltr(adapter, false, fsp->location);
 931	if (!rule) {
 932		ret = -EINVAL;
 933		goto release_lock;
 934	}
 935
 936	fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
 937
 938	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
 939	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
 940
 941	switch (fsp->flow_type) {
 942	case TCP_V4_FLOW:
 943	case UDP_V4_FLOW:
 944	case SCTP_V4_FLOW:
 945		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
 946		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
 947		fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
 948		fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
 949		fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
 950		fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
 951		fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
 952		fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
 953		fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
 954		fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
 955		break;
 956	case AH_V4_FLOW:
 957	case ESP_V4_FLOW:
 958		fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
 959		fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
 960		fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
 961		fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
 962		fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
 963		fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
 964		fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
 965		fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
 966		break;
 967	case IPV4_USER_FLOW:
 968		fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
 969		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
 970		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
 971		fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
 972		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
 973		fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
 974		fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
 975		fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
 976		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
 977		fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
 978		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
 979		fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
 980		break;
 981	case TCP_V6_FLOW:
 982	case UDP_V6_FLOW:
 983	case SCTP_V6_FLOW:
 984		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
 985		       sizeof(struct in6_addr));
 986		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
 987		       sizeof(struct in6_addr));
 988		fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
 989		fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
 990		fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
 991		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
 992		       sizeof(struct in6_addr));
 993		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
 994		       sizeof(struct in6_addr));
 995		fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
 996		fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
 997		fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
 998		break;
 999	case AH_V6_FLOW:
1000	case ESP_V6_FLOW:
1001		memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1002		       sizeof(struct in6_addr));
1003		memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1004		       sizeof(struct in6_addr));
1005		fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
1006		fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
1007		memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1008		       sizeof(struct in6_addr));
1009		memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1010		       sizeof(struct in6_addr));
1011		fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
1012		fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
1013		break;
1014	case IPV6_USER_FLOW:
1015		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1016		       sizeof(struct in6_addr));
1017		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1018		       sizeof(struct in6_addr));
1019		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
1020		fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
1021		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
1022		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1023		       sizeof(struct in6_addr));
1024		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1025		       sizeof(struct in6_addr));
1026		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
1027		fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
1028		fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
1029		break;
1030	case ETHER_FLOW:
1031		fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
1032		fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
1033		break;
1034	default:
1035		ret = -EINVAL;
1036		break;
1037	}
1038
1039	iavf_fill_rx_flow_ext_data(fsp, rule);
1040
1041	if (rule->action == VIRTCHNL_ACTION_DROP)
1042		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1043	else
1044		fsp->ring_cookie = rule->q_index;
1045
1046release_lock:
1047	spin_unlock_bh(&adapter->fdir_fltr_lock);
1048	return ret;
1049}
1050
1051/**
1052 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
1053 * @adapter: the VF adapter structure containing the filter list
1054 * @cmd: ethtool command data structure
1055 * @rule_locs: ethtool array passed in from OS to receive filter IDs
1056 *
1057 * Returns 0 as expected for success by ethtool
1058 */
1059static int
1060iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
1061		       u32 *rule_locs)
1062{
1063	struct iavf_fdir_fltr *fltr;
1064	unsigned int cnt = 0;
1065	int val = 0;
1066
1067	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1068		return -EOPNOTSUPP;
1069
1070	cmd->data = IAVF_MAX_FDIR_FILTERS;
1071
1072	spin_lock_bh(&adapter->fdir_fltr_lock);
1073
1074	list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
1075		if (iavf_is_raw_fdir(fltr))
1076			continue;
1077
1078		if (cnt == cmd->rule_cnt) {
1079			val = -EMSGSIZE;
1080			goto release_lock;
1081		}
1082		rule_locs[cnt] = fltr->loc;
1083		cnt++;
1084	}
1085
1086release_lock:
1087	spin_unlock_bh(&adapter->fdir_fltr_lock);
1088	if (!val)
1089		cmd->rule_cnt = cnt;
1090
1091	return val;
1092}
1093
1094/**
1095 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
1096 * @adapter: pointer to the VF adapter structure
1097 * @fsp: pointer to ethtool Rx flow specification
1098 * @fltr: filter structure
1099 */
1100static int
1101iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
1102			struct iavf_fdir_fltr *fltr)
1103{
1104	u32 flow_type, q_index = 0;
1105	enum virtchnl_action act;
1106	int err;
1107
1108	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1109		act = VIRTCHNL_ACTION_DROP;
1110	} else {
1111		q_index = fsp->ring_cookie;
1112		if (q_index >= adapter->num_active_queues)
1113			return -EINVAL;
1114
1115		act = VIRTCHNL_ACTION_QUEUE;
1116	}
1117
1118	fltr->action = act;
1119	fltr->loc = fsp->location;
1120	fltr->q_index = q_index;
1121
1122	if (fsp->flow_type & FLOW_EXT) {
1123		memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
1124		       sizeof(fltr->ext_data.usr_def));
1125		memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
1126		       sizeof(fltr->ext_mask.usr_def));
1127	}
1128
1129	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
1130	fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
1131
1132	switch (flow_type) {
1133	case TCP_V4_FLOW:
1134	case UDP_V4_FLOW:
1135	case SCTP_V4_FLOW:
1136		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1137		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1138		fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1139		fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1140		fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
1141		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1142		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1143		fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1144		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1145		fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
1146		fltr->ip_ver = 4;
1147		break;
1148	case AH_V4_FLOW:
1149	case ESP_V4_FLOW:
1150		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
1151		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
1152		fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
1153		fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
1154		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
1155		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
1156		fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
1157		fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
1158		fltr->ip_ver = 4;
1159		break;
1160	case IPV4_USER_FLOW:
1161		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1162		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1163		fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1164		fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
1165		fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
1166		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1167		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1168		fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1169		fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
1170		fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
1171		fltr->ip_ver = 4;
1172		break;
1173	case TCP_V6_FLOW:
1174	case UDP_V6_FLOW:
1175	case SCTP_V6_FLOW:
1176		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1177		       sizeof(struct in6_addr));
1178		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1179		       sizeof(struct in6_addr));
1180		fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1181		fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1182		fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
1183		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1184		       sizeof(struct in6_addr));
1185		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1186		       sizeof(struct in6_addr));
1187		fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1188		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1189		fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
1190		fltr->ip_ver = 6;
1191		break;
1192	case AH_V6_FLOW:
1193	case ESP_V6_FLOW:
1194		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
1195		       sizeof(struct in6_addr));
1196		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
1197		       sizeof(struct in6_addr));
1198		fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
1199		fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
1200		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
1201		       sizeof(struct in6_addr));
1202		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
1203		       sizeof(struct in6_addr));
1204		fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
1205		fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
1206		fltr->ip_ver = 6;
1207		break;
1208	case IPV6_USER_FLOW:
1209		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1210		       sizeof(struct in6_addr));
1211		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1212		       sizeof(struct in6_addr));
1213		fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1214		fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
1215		fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1216		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1217		       sizeof(struct in6_addr));
1218		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1219		       sizeof(struct in6_addr));
1220		fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1221		fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
1222		fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1223		fltr->ip_ver = 6;
1224		break;
1225	case ETHER_FLOW:
1226		fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
1227		fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
1228		break;
1229	default:
1230		/* not doing un-parsed flow types */
1231		return -EINVAL;
1232	}
1233
1234	err = iavf_validate_fdir_fltr_masks(adapter, fltr);
1235	if (err)
1236		return err;
1237
1238	if (iavf_fdir_is_dup_fltr(adapter, fltr))
1239		return -EEXIST;
1240
1241	err = iavf_parse_rx_flow_user_data(fsp, fltr);
1242	if (err)
1243		return err;
1244
1245	return iavf_fill_fdir_add_msg(adapter, fltr);
1246}
1247
1248/**
1249 * iavf_add_fdir_ethtool - add Flow Director filter
1250 * @adapter: pointer to the VF adapter structure
1251 * @cmd: command to add Flow Director filter
1252 *
1253 * Returns 0 on success and negative values for failure
1254 */
1255static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1256{
1257	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1258	struct iavf_fdir_fltr *fltr;
1259	int count = 50;
1260	int err;
1261
1262	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1263		return -EOPNOTSUPP;
1264
1265	if (fsp->flow_type & FLOW_MAC_EXT)
1266		return -EINVAL;
1267
 
 
 
 
 
 
 
1268	spin_lock_bh(&adapter->fdir_fltr_lock);
1269	if (iavf_find_fdir_fltr(adapter, false, fsp->location)) {
1270		dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
1271		spin_unlock_bh(&adapter->fdir_fltr_lock);
1272		return -EEXIST;
1273	}
1274	spin_unlock_bh(&adapter->fdir_fltr_lock);
1275
1276	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1277	if (!fltr)
1278		return -ENOMEM;
1279
1280	while (!mutex_trylock(&adapter->crit_lock)) {
1281		if (--count == 0) {
1282			kfree(fltr);
1283			return -EINVAL;
1284		}
1285		udelay(1);
1286	}
1287
1288	err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
1289	if (!err)
1290		err = iavf_fdir_add_fltr(adapter, fltr);
1291
1292	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
1293		kfree(fltr);
1294
1295	mutex_unlock(&adapter->crit_lock);
1296	return err;
1297}
1298
1299/**
1300 * iavf_del_fdir_ethtool - delete Flow Director filter
1301 * @adapter: pointer to the VF adapter structure
1302 * @cmd: command to delete Flow Director filter
1303 *
1304 * Returns 0 on success and negative values for failure
1305 */
1306static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1307{
1308	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
 
 
1309
1310	if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1311		return -EOPNOTSUPP;
1312
1313	return iavf_fdir_del_fltr(adapter, false, fsp->location);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314}
1315
1316/**
1317 * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
1318 * @cmd: ethtool rxnfc command
1319 *
1320 * This function parses the rxnfc command and returns intended
1321 * header types for RSS configuration
1322 */
1323static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
1324{
1325	u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
1326
1327	switch (cmd->flow_type) {
1328	case TCP_V4_FLOW:
1329		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1330			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1331		break;
1332	case UDP_V4_FLOW:
1333		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1334			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1335		break;
1336	case SCTP_V4_FLOW:
1337		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1338			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1339		break;
1340	case TCP_V6_FLOW:
1341		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1342			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1343		break;
1344	case UDP_V6_FLOW:
1345		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1346			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1347		break;
1348	case SCTP_V6_FLOW:
1349		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1350			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1351		break;
1352	default:
1353		break;
1354	}
1355
1356	return hdrs;
1357}
1358
1359/**
1360 * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
1361 * @cmd: ethtool rxnfc command
1362 * @symm: true if Symmetric Topelitz is set
1363 *
1364 * This function parses the rxnfc command and returns intended hash fields for
1365 * RSS configuration
1366 */
1367static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
1368{
1369	u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
1370
1371	if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
1372		switch (cmd->flow_type) {
1373		case TCP_V4_FLOW:
1374		case UDP_V4_FLOW:
1375		case SCTP_V4_FLOW:
1376			if (cmd->data & RXH_IP_SRC)
1377				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
1378			if (cmd->data & RXH_IP_DST)
1379				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
1380			break;
1381		case TCP_V6_FLOW:
1382		case UDP_V6_FLOW:
1383		case SCTP_V6_FLOW:
1384			if (cmd->data & RXH_IP_SRC)
1385				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
1386			if (cmd->data & RXH_IP_DST)
1387				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
1388			break;
1389		default:
1390			break;
1391		}
1392	}
1393
1394	if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
1395		switch (cmd->flow_type) {
1396		case TCP_V4_FLOW:
1397		case TCP_V6_FLOW:
1398			if (cmd->data & RXH_L4_B_0_1)
1399				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
1400			if (cmd->data & RXH_L4_B_2_3)
1401				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
1402			break;
1403		case UDP_V4_FLOW:
1404		case UDP_V6_FLOW:
1405			if (cmd->data & RXH_L4_B_0_1)
1406				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
1407			if (cmd->data & RXH_L4_B_2_3)
1408				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
1409			break;
1410		case SCTP_V4_FLOW:
1411		case SCTP_V6_FLOW:
1412			if (cmd->data & RXH_L4_B_0_1)
1413				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
1414			if (cmd->data & RXH_L4_B_2_3)
1415				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
1416			break;
1417		default:
1418			break;
1419		}
1420	}
1421
1422	return hfld;
1423}
1424
1425/**
1426 * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
1427 * @adapter: pointer to the VF adapter structure
1428 * @cmd: ethtool rxnfc command
1429 *
1430 * Returns Success if the flow input set is supported.
1431 */
1432static int
1433iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
1434			  struct ethtool_rxnfc *cmd)
1435{
1436	struct iavf_adv_rss *rss_old, *rss_new;
1437	bool rss_new_add = false;
1438	int count = 50, err = 0;
1439	bool symm = false;
1440	u64 hash_flds;
1441	u32 hdrs;
1442
1443	if (!ADV_RSS_SUPPORT(adapter))
1444		return -EOPNOTSUPP;
1445
1446	symm = !!(adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC);
1447
1448	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1449	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1450		return -EINVAL;
1451
1452	hash_flds = iavf_adv_rss_parse_hash_flds(cmd, symm);
1453	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1454		return -EINVAL;
1455
1456	rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
1457	if (!rss_new)
1458		return -ENOMEM;
1459
1460	if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds,
1461				      symm)) {
1462		kfree(rss_new);
1463		return -EINVAL;
1464	}
1465
1466	while (!mutex_trylock(&adapter->crit_lock)) {
1467		if (--count == 0) {
1468			kfree(rss_new);
1469			return -EINVAL;
1470		}
1471
1472		udelay(1);
1473	}
1474
1475	spin_lock_bh(&adapter->adv_rss_lock);
1476	rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1477	if (rss_old) {
1478		if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
1479			err = -EBUSY;
1480		} else if (rss_old->hash_flds != hash_flds ||
1481			   rss_old->symm != symm) {
1482			rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
1483			rss_old->hash_flds = hash_flds;
1484			rss_old->symm = symm;
1485			memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
1486			       sizeof(rss_new->cfg_msg));
 
1487		} else {
1488			err = -EEXIST;
1489		}
1490	} else {
1491		rss_new_add = true;
1492		rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
1493		rss_new->packet_hdrs = hdrs;
1494		rss_new->hash_flds = hash_flds;
1495		rss_new->symm = symm;
1496		list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
 
1497	}
1498	spin_unlock_bh(&adapter->adv_rss_lock);
1499
1500	if (!err)
1501		iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
1502
1503	mutex_unlock(&adapter->crit_lock);
1504
1505	if (!rss_new_add)
1506		kfree(rss_new);
1507
1508	return err;
1509}
1510
1511/**
1512 * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
1513 * @adapter: pointer to the VF adapter structure
1514 * @cmd: ethtool rxnfc command
1515 *
1516 * Returns Success if the flow input set is supported.
1517 */
1518static int
1519iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
1520			  struct ethtool_rxnfc *cmd)
1521{
1522	struct iavf_adv_rss *rss;
1523	u64 hash_flds;
1524	u32 hdrs;
1525
1526	if (!ADV_RSS_SUPPORT(adapter))
1527		return -EOPNOTSUPP;
1528
1529	cmd->data = 0;
1530
1531	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1532	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1533		return -EINVAL;
1534
1535	spin_lock_bh(&adapter->adv_rss_lock);
1536	rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1537	if (rss)
1538		hash_flds = rss->hash_flds;
1539	else
1540		hash_flds = IAVF_ADV_RSS_HASH_INVALID;
1541	spin_unlock_bh(&adapter->adv_rss_lock);
1542
1543	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1544		return -EINVAL;
1545
1546	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
1547			 IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
1548		cmd->data |= (u64)RXH_IP_SRC;
1549
1550	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
1551			 IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
1552		cmd->data |= (u64)RXH_IP_DST;
1553
1554	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
1555			 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
1556			 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
1557		cmd->data |= (u64)RXH_L4_B_0_1;
1558
1559	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
1560			 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
1561			 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
1562		cmd->data |= (u64)RXH_L4_B_2_3;
1563
1564	return 0;
1565}
1566
1567/**
1568 * iavf_set_rxnfc - command to set Rx flow rules.
1569 * @netdev: network interface device structure
1570 * @cmd: ethtool rxnfc command
1571 *
1572 * Returns 0 for success and negative values for errors
1573 */
1574static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1575{
1576	struct iavf_adapter *adapter = netdev_priv(netdev);
1577	int ret = -EOPNOTSUPP;
1578
1579	switch (cmd->cmd) {
1580	case ETHTOOL_SRXCLSRLINS:
1581		ret = iavf_add_fdir_ethtool(adapter, cmd);
1582		break;
1583	case ETHTOOL_SRXCLSRLDEL:
1584		ret = iavf_del_fdir_ethtool(adapter, cmd);
1585		break;
1586	case ETHTOOL_SRXFH:
1587		ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
1588		break;
1589	default:
1590		break;
1591	}
1592
1593	return ret;
1594}
1595
1596/**
1597 * iavf_get_rxnfc - command to get RX flow classification rules
1598 * @netdev: network interface device structure
1599 * @cmd: ethtool rxnfc command
1600 * @rule_locs: pointer to store rule locations
1601 *
1602 * Returns Success if the command is supported.
1603 **/
1604static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1605			  u32 *rule_locs)
1606{
1607	struct iavf_adapter *adapter = netdev_priv(netdev);
1608	int ret = -EOPNOTSUPP;
1609
1610	switch (cmd->cmd) {
1611	case ETHTOOL_GRXRINGS:
1612		cmd->data = adapter->num_active_queues;
1613		ret = 0;
1614		break;
1615	case ETHTOOL_GRXCLSRLCNT:
1616		if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED))
1617			break;
1618		spin_lock_bh(&adapter->fdir_fltr_lock);
1619		cmd->rule_cnt = adapter->fdir_active_fltr;
1620		spin_unlock_bh(&adapter->fdir_fltr_lock);
1621		cmd->data = IAVF_MAX_FDIR_FILTERS;
1622		ret = 0;
1623		break;
1624	case ETHTOOL_GRXCLSRULE:
1625		ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
1626		break;
1627	case ETHTOOL_GRXCLSRLALL:
1628		ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
1629		break;
1630	case ETHTOOL_GRXFH:
1631		ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
1632		break;
1633	default:
1634		break;
1635	}
1636
1637	return ret;
1638}
1639/**
1640 * iavf_get_channels: get the number of channels supported by the device
1641 * @netdev: network interface device structure
1642 * @ch: channel information structure
1643 *
1644 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
1645 * queue pair. Report one extra channel to match our "other" MSI-X vector.
1646 **/
1647static void iavf_get_channels(struct net_device *netdev,
1648			      struct ethtool_channels *ch)
1649{
1650	struct iavf_adapter *adapter = netdev_priv(netdev);
1651
1652	/* Report maximum channels */
1653	ch->max_combined = adapter->vsi_res->num_queue_pairs;
1654
1655	ch->max_other = NONQ_VECS;
1656	ch->other_count = NONQ_VECS;
1657
1658	ch->combined_count = adapter->num_active_queues;
1659}
1660
1661/**
1662 * iavf_set_channels: set the new channel count
1663 * @netdev: network interface device structure
1664 * @ch: channel information structure
1665 *
1666 * Negotiate a new number of channels with the PF then do a reset.  During
1667 * reset we'll realloc queues and fix the RSS table.  Returns 0 on success,
1668 * negative on failure.
1669 **/
1670static int iavf_set_channels(struct net_device *netdev,
1671			     struct ethtool_channels *ch)
1672{
1673	struct iavf_adapter *adapter = netdev_priv(netdev);
1674	u32 num_req = ch->combined_count;
1675	int ret = 0;
1676
1677	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1678	    adapter->num_tc) {
1679		dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
1680		return -EINVAL;
1681	}
1682
1683	/* All of these should have already been checked by ethtool before this
1684	 * even gets to us, but just to be sure.
1685	 */
1686	if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
1687		return -EINVAL;
1688
1689	if (num_req == adapter->num_active_queues)
1690		return 0;
1691
1692	if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
1693		return -EINVAL;
1694
1695	adapter->num_req_queues = num_req;
1696	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1697	iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
1698
1699	ret = iavf_wait_for_reset(adapter);
1700	if (ret)
1701		netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset");
 
 
 
 
 
 
 
 
 
1702
1703	return ret;
1704}
1705
1706/**
1707 * iavf_get_rxfh_key_size - get the RSS hash key size
1708 * @netdev: network interface device structure
1709 *
1710 * Returns the table size.
1711 **/
1712static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
1713{
1714	struct iavf_adapter *adapter = netdev_priv(netdev);
1715
1716	return adapter->rss_key_size;
1717}
1718
1719/**
1720 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
1721 * @netdev: network interface device structure
1722 *
1723 * Returns the table size.
1724 **/
1725static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
1726{
1727	struct iavf_adapter *adapter = netdev_priv(netdev);
1728
1729	return adapter->rss_lut_size;
1730}
1731
1732/**
1733 * iavf_get_rxfh - get the rx flow hash indirection table
1734 * @netdev: network interface device structure
1735 * @rxfh: pointer to param struct (indir, key, hfunc)
 
 
1736 *
1737 * Reads the indirection table directly from the hardware. Always returns 0.
1738 **/
1739static int iavf_get_rxfh(struct net_device *netdev,
1740			 struct ethtool_rxfh_param *rxfh)
1741{
1742	struct iavf_adapter *adapter = netdev_priv(netdev);
1743	u16 i;
1744
1745	rxfh->hfunc = ETH_RSS_HASH_TOP;
1746	if (adapter->hfunc == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
1747		rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
1748
1749	if (rxfh->key)
1750		memcpy(rxfh->key, adapter->rss_key, adapter->rss_key_size);
1751
1752	if (rxfh->indir)
1753		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1754		for (i = 0; i < adapter->rss_lut_size; i++)
1755			rxfh->indir[i] = (u32)adapter->rss_lut[i];
1756
1757	return 0;
1758}
1759
1760/**
1761 * iavf_set_rxfh - set the rx flow hash indirection table
1762 * @netdev: network interface device structure
1763 * @rxfh: pointer to param struct (indir, key, hfunc)
1764 * @extack: extended ACK from the Netlink message
 
1765 *
1766 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
1767 * returns 0 after programming the table.
1768 **/
1769static int iavf_set_rxfh(struct net_device *netdev,
1770			 struct ethtool_rxfh_param *rxfh,
1771			 struct netlink_ext_ack *extack)
1772{
1773	struct iavf_adapter *adapter = netdev_priv(netdev);
1774	u16 i;
1775
1776	/* Only support toeplitz hash function */
1777	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1778	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1779		return -EOPNOTSUPP;
1780
1781	if ((rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
1782	    adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC) {
1783		if (!ADV_RSS_SUPPORT(adapter))
1784			return -EOPNOTSUPP;
1785		adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
1786		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
1787	} else if (!(rxfh->input_xfrm & RXH_XFRM_SYM_XOR) &&
1788		    adapter->hfunc != VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC) {
1789		adapter->hfunc = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
1790		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_HFUNC;
1791	}
1792
1793	if (!rxfh->key && !rxfh->indir)
1794		return 0;
1795
1796	if (rxfh->key)
1797		memcpy(adapter->rss_key, rxfh->key, adapter->rss_key_size);
1798
1799	if (rxfh->indir) {
1800		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1801		for (i = 0; i < adapter->rss_lut_size; i++)
1802			adapter->rss_lut[i] = (u8)(rxfh->indir[i]);
1803	}
1804
1805	return iavf_config_rss(adapter);
1806}
1807
1808static const struct ethtool_ops iavf_ethtool_ops = {
1809	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1810				     ETHTOOL_COALESCE_USE_ADAPTIVE,
1811	.cap_rss_sym_xor_supported = true,
1812	.get_drvinfo		= iavf_get_drvinfo,
1813	.get_link		= ethtool_op_get_link,
1814	.get_ringparam		= iavf_get_ringparam,
1815	.set_ringparam		= iavf_set_ringparam,
1816	.get_strings		= iavf_get_strings,
1817	.get_ethtool_stats	= iavf_get_ethtool_stats,
1818	.get_sset_count		= iavf_get_sset_count,
 
 
1819	.get_msglevel		= iavf_get_msglevel,
1820	.set_msglevel		= iavf_set_msglevel,
1821	.get_coalesce		= iavf_get_coalesce,
1822	.set_coalesce		= iavf_set_coalesce,
1823	.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1824	.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1825	.set_rxnfc		= iavf_set_rxnfc,
1826	.get_rxnfc		= iavf_get_rxnfc,
1827	.get_rxfh_indir_size	= iavf_get_rxfh_indir_size,
1828	.get_rxfh		= iavf_get_rxfh,
1829	.set_rxfh		= iavf_set_rxfh,
1830	.get_channels		= iavf_get_channels,
1831	.set_channels		= iavf_set_channels,
1832	.get_rxfh_key_size	= iavf_get_rxfh_key_size,
1833	.get_link_ksettings	= iavf_get_link_ksettings,
1834};
1835
1836/**
1837 * iavf_set_ethtool_ops - Initialize ethtool ops struct
1838 * @netdev: network interface device structure
1839 *
1840 * Sets ethtool ops struct in our netdev so that ethtool can call
1841 * our functions.
1842 **/
1843void iavf_set_ethtool_ops(struct net_device *netdev)
1844{
1845	netdev->ethtool_ops = &iavf_ethtool_ops;
1846}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
 
 
 
   4/* ethtool support for iavf */
   5#include "iavf.h"
   6
   7#include <linux/uaccess.h>
   8
   9/* ethtool statistics helpers */
  10
  11/**
  12 * struct iavf_stats - definition for an ethtool statistic
  13 * @stat_string: statistic name to display in ethtool -S output
  14 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
  15 * @stat_offset: offsetof() the stat from a base pointer
  16 *
  17 * This structure defines a statistic to be added to the ethtool stats buffer.
  18 * It defines a statistic as offset from a common base pointer. Stats should
  19 * be defined in constant arrays using the IAVF_STAT macro, with every element
  20 * of the array using the same _type for calculating the sizeof_stat and
  21 * stat_offset.
  22 *
  23 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
  24 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
  25 * the iavf_add_ethtool_stat() helper function.
  26 *
  27 * The @stat_string is interpreted as a format string, allowing formatted
  28 * values to be inserted while looping over multiple structures for a given
  29 * statistics array. Thus, every statistic string in an array should have the
  30 * same type and number of format specifiers, to be formatted by variadic
  31 * arguments to the iavf_add_stat_string() helper function.
  32 **/
  33struct iavf_stats {
  34	char stat_string[ETH_GSTRING_LEN];
  35	int sizeof_stat;
  36	int stat_offset;
  37};
  38
  39/* Helper macro to define an iavf_stat structure with proper size and type.
  40 * Use this when defining constant statistics arrays. Note that @_type expects
  41 * only a type name and is used multiple times.
  42 */
  43#define IAVF_STAT(_type, _name, _stat) { \
  44	.stat_string = _name, \
  45	.sizeof_stat = sizeof_field(_type, _stat), \
  46	.stat_offset = offsetof(_type, _stat) \
  47}
  48
  49/* Helper macro for defining some statistics related to queues */
  50#define IAVF_QUEUE_STAT(_name, _stat) \
  51	IAVF_STAT(struct iavf_ring, _name, _stat)
  52
  53/* Stats associated with a Tx or Rx ring */
  54static const struct iavf_stats iavf_gstrings_queue_stats[] = {
  55	IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
  56	IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
  57};
  58
  59/**
  60 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
  61 * @data: location to store the stat value
  62 * @pointer: basis for where to copy from
  63 * @stat: the stat definition
  64 *
  65 * Copies the stat data defined by the pointer and stat structure pair into
  66 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
  67 * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
  68 */
  69static void
  70iavf_add_one_ethtool_stat(u64 *data, void *pointer,
  71			  const struct iavf_stats *stat)
  72{
  73	char *p;
  74
  75	if (!pointer) {
  76		/* ensure that the ethtool data buffer is zero'd for any stats
  77		 * which don't have a valid pointer.
  78		 */
  79		*data = 0;
  80		return;
  81	}
  82
  83	p = (char *)pointer + stat->stat_offset;
  84	switch (stat->sizeof_stat) {
  85	case sizeof(u64):
  86		*data = *((u64 *)p);
  87		break;
  88	case sizeof(u32):
  89		*data = *((u32 *)p);
  90		break;
  91	case sizeof(u16):
  92		*data = *((u16 *)p);
  93		break;
  94	case sizeof(u8):
  95		*data = *((u8 *)p);
  96		break;
  97	default:
  98		WARN_ONCE(1, "unexpected stat size for %s",
  99			  stat->stat_string);
 100		*data = 0;
 101	}
 102}
 103
 104/**
 105 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
 106 * @data: ethtool stats buffer
 107 * @pointer: location to copy stats from
 108 * @stats: array of stats to copy
 109 * @size: the size of the stats definition
 110 *
 111 * Copy the stats defined by the stats array using the pointer as a base into
 112 * the data buffer supplied by ethtool. Updates the data pointer to point to
 113 * the next empty location for successive calls to __iavf_add_ethtool_stats.
 114 * If pointer is null, set the data values to zero and update the pointer to
 115 * skip these stats.
 116 **/
 117static void
 118__iavf_add_ethtool_stats(u64 **data, void *pointer,
 119			 const struct iavf_stats stats[],
 120			 const unsigned int size)
 121{
 122	unsigned int i;
 123
 124	for (i = 0; i < size; i++)
 125		iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
 126}
 127
 128/**
 129 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
 130 * @data: ethtool stats buffer
 131 * @pointer: location where stats are stored
 132 * @stats: static const array of stat definitions
 133 *
 134 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
 135 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
 136 * ensuring that we pass the size associated with the given stats array.
 137 *
 138 * The parameter @stats is evaluated twice, so parameters with side effects
 139 * should be avoided.
 140 **/
 141#define iavf_add_ethtool_stats(data, pointer, stats) \
 142	__iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
 143
 144/**
 145 * iavf_add_queue_stats - copy queue statistics into supplied buffer
 146 * @data: ethtool stats buffer
 147 * @ring: the ring to copy
 148 *
 149 * Queue statistics must be copied while protected by
 150 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
 151 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
 152 * ring pointer is null, zero out the queue stat values and update the data
 153 * pointer. Otherwise safely copy the stats from the ring into the supplied
 154 * buffer and update the data pointer when finished.
 155 *
 156 * This function expects to be called while under rcu_read_lock().
 157 **/
 158static void
 159iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
 160{
 161	const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
 162	const struct iavf_stats *stats = iavf_gstrings_queue_stats;
 163	unsigned int start;
 164	unsigned int i;
 165
 166	/* To avoid invalid statistics values, ensure that we keep retrying
 167	 * the copy until we get a consistent value according to
 168	 * u64_stats_fetch_retry. But first, make sure our ring is
 169	 * non-null before attempting to access its syncp.
 170	 */
 171	do {
 172		start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
 173		for (i = 0; i < size; i++)
 174			iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
 175	} while (ring && u64_stats_fetch_retry(&ring->syncp, start));
 176
 177	/* Once we successfully copy the stats in, update the data pointer */
 178	*data += size;
 179}
 180
 181/**
 182 * __iavf_add_stat_strings - copy stat strings into ethtool buffer
 183 * @p: ethtool supplied buffer
 184 * @stats: stat definitions array
 185 * @size: size of the stats array
 186 *
 187 * Format and copy the strings described by stats into the buffer pointed at
 188 * by p.
 189 **/
 190static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
 191				    const unsigned int size, ...)
 192{
 193	unsigned int i;
 194
 195	for (i = 0; i < size; i++) {
 196		va_list args;
 197
 198		va_start(args, size);
 199		vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
 200		*p += ETH_GSTRING_LEN;
 201		va_end(args);
 202	}
 203}
 204
 205/**
 206 * iavf_add_stat_strings - copy stat strings into ethtool buffer
 207 * @p: ethtool supplied buffer
 208 * @stats: stat definitions array
 209 *
 210 * Format and copy the strings described by the const static stats value into
 211 * the buffer pointed at by p.
 212 *
 213 * The parameter @stats is evaluated twice, so parameters with side effects
 214 * should be avoided. Additionally, stats must be an array such that
 215 * ARRAY_SIZE can be called on it.
 216 **/
 217#define iavf_add_stat_strings(p, stats, ...) \
 218	__iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
 219
 220#define VF_STAT(_name, _stat) \
 221	IAVF_STAT(struct iavf_adapter, _name, _stat)
 222
 223static const struct iavf_stats iavf_gstrings_stats[] = {
 224	VF_STAT("rx_bytes", current_stats.rx_bytes),
 225	VF_STAT("rx_unicast", current_stats.rx_unicast),
 226	VF_STAT("rx_multicast", current_stats.rx_multicast),
 227	VF_STAT("rx_broadcast", current_stats.rx_broadcast),
 228	VF_STAT("rx_discards", current_stats.rx_discards),
 229	VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
 230	VF_STAT("tx_bytes", current_stats.tx_bytes),
 231	VF_STAT("tx_unicast", current_stats.tx_unicast),
 232	VF_STAT("tx_multicast", current_stats.tx_multicast),
 233	VF_STAT("tx_broadcast", current_stats.tx_broadcast),
 234	VF_STAT("tx_discards", current_stats.tx_discards),
 235	VF_STAT("tx_errors", current_stats.tx_errors),
 236};
 237
 238#define IAVF_STATS_LEN	ARRAY_SIZE(iavf_gstrings_stats)
 239
 240#define IAVF_QUEUE_STATS_LEN	ARRAY_SIZE(iavf_gstrings_queue_stats)
 241
 242/* For now we have one and only one private flag and it is only defined
 243 * when we have support for the SKIP_CPU_SYNC DMA attribute.  Instead
 244 * of leaving all this code sitting around empty we will strip it unless
 245 * our one private flag is actually available.
 246 */
 247struct iavf_priv_flags {
 248	char flag_string[ETH_GSTRING_LEN];
 249	u32 flag;
 250	bool read_only;
 251};
 252
 253#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
 254	.flag_string = _name, \
 255	.flag = _flag, \
 256	.read_only = _read_only, \
 257}
 258
 259static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
 260	IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
 261};
 262
 263#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
 264
 265/**
 266 * iavf_get_link_ksettings - Get Link Speed and Duplex settings
 267 * @netdev: network interface device structure
 268 * @cmd: ethtool command
 269 *
 270 * Reports speed/duplex settings. Because this is a VF, we don't know what
 271 * kind of link we really have, so we fake it.
 272 **/
 273static int iavf_get_link_ksettings(struct net_device *netdev,
 274				   struct ethtool_link_ksettings *cmd)
 275{
 276	struct iavf_adapter *adapter = netdev_priv(netdev);
 277
 278	ethtool_link_ksettings_zero_link_mode(cmd, supported);
 279	cmd->base.autoneg = AUTONEG_DISABLE;
 280	cmd->base.port = PORT_NONE;
 281	cmd->base.duplex = DUPLEX_FULL;
 282
 283	if (ADV_LINK_SUPPORT(adapter)) {
 284		if (adapter->link_speed_mbps &&
 285		    adapter->link_speed_mbps < U32_MAX)
 286			cmd->base.speed = adapter->link_speed_mbps;
 287		else
 288			cmd->base.speed = SPEED_UNKNOWN;
 289
 290		return 0;
 291	}
 292
 293	switch (adapter->link_speed) {
 294	case VIRTCHNL_LINK_SPEED_40GB:
 295		cmd->base.speed = SPEED_40000;
 296		break;
 297	case VIRTCHNL_LINK_SPEED_25GB:
 298		cmd->base.speed = SPEED_25000;
 299		break;
 300	case VIRTCHNL_LINK_SPEED_20GB:
 301		cmd->base.speed = SPEED_20000;
 302		break;
 303	case VIRTCHNL_LINK_SPEED_10GB:
 304		cmd->base.speed = SPEED_10000;
 305		break;
 306	case VIRTCHNL_LINK_SPEED_5GB:
 307		cmd->base.speed = SPEED_5000;
 308		break;
 309	case VIRTCHNL_LINK_SPEED_2_5GB:
 310		cmd->base.speed = SPEED_2500;
 311		break;
 312	case VIRTCHNL_LINK_SPEED_1GB:
 313		cmd->base.speed = SPEED_1000;
 314		break;
 315	case VIRTCHNL_LINK_SPEED_100MB:
 316		cmd->base.speed = SPEED_100;
 317		break;
 318	default:
 319		break;
 320	}
 321
 322	return 0;
 323}
 324
 325/**
 326 * iavf_get_sset_count - Get length of string set
 327 * @netdev: network interface device structure
 328 * @sset: id of string set
 329 *
 330 * Reports size of various string tables.
 331 **/
 332static int iavf_get_sset_count(struct net_device *netdev, int sset)
 333{
 334	/* Report the maximum number queues, even if not every queue is
 335	 * currently configured. Since allocation of queues is in pairs,
 336	 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
 337	 * at device creation and never changes.
 338	 */
 339
 340	if (sset == ETH_SS_STATS)
 341		return IAVF_STATS_LEN +
 342			(IAVF_QUEUE_STATS_LEN * 2 *
 343			 netdev->real_num_tx_queues);
 344	else if (sset == ETH_SS_PRIV_FLAGS)
 345		return IAVF_PRIV_FLAGS_STR_LEN;
 346	else
 347		return -EINVAL;
 348}
 349
 350/**
 351 * iavf_get_ethtool_stats - report device statistics
 352 * @netdev: network interface device structure
 353 * @stats: ethtool statistics structure
 354 * @data: pointer to data buffer
 355 *
 356 * All statistics are added to the data buffer as an array of u64.
 357 **/
 358static void iavf_get_ethtool_stats(struct net_device *netdev,
 359				   struct ethtool_stats *stats, u64 *data)
 360{
 361	struct iavf_adapter *adapter = netdev_priv(netdev);
 362	unsigned int i;
 363
 364	/* Explicitly request stats refresh */
 365	iavf_schedule_request_stats(adapter);
 366
 367	iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
 368
 369	rcu_read_lock();
 370	/* As num_active_queues describe both tx and rx queues, we can use
 371	 * it to iterate over rings' stats.
 372	 */
 373	for (i = 0; i < adapter->num_active_queues; i++) {
 374		struct iavf_ring *ring;
 375
 376		/* Tx rings stats */
 377		ring = &adapter->tx_rings[i];
 378		iavf_add_queue_stats(&data, ring);
 379
 380		/* Rx rings stats */
 381		ring = &adapter->rx_rings[i];
 382		iavf_add_queue_stats(&data, ring);
 383	}
 384	rcu_read_unlock();
 385}
 386
 387/**
 388 * iavf_get_priv_flag_strings - Get private flag strings
 389 * @netdev: network interface device structure
 390 * @data: buffer for string data
 391 *
 392 * Builds the private flags string table
 393 **/
 394static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
 395{
 396	unsigned int i;
 397
 398	for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
 399		snprintf(data, ETH_GSTRING_LEN, "%s",
 400			 iavf_gstrings_priv_flags[i].flag_string);
 401		data += ETH_GSTRING_LEN;
 402	}
 403}
 404
 405/**
 406 * iavf_get_stat_strings - Get stat strings
 407 * @netdev: network interface device structure
 408 * @data: buffer for string data
 409 *
 410 * Builds the statistics string table
 411 **/
 412static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
 413{
 414	unsigned int i;
 415
 416	iavf_add_stat_strings(&data, iavf_gstrings_stats);
 417
 418	/* Queues are always allocated in pairs, so we just use
 419	 * real_num_tx_queues for both Tx and Rx queues.
 420	 */
 421	for (i = 0; i < netdev->real_num_tx_queues; i++) {
 422		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
 423				      "tx", i);
 424		iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
 425				      "rx", i);
 426	}
 427}
 428
 429/**
 430 * iavf_get_strings - Get string set
 431 * @netdev: network interface device structure
 432 * @sset: id of string set
 433 * @data: buffer for string data
 434 *
 435 * Builds string tables for various string sets
 436 **/
 437static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 438{
 439	switch (sset) {
 440	case ETH_SS_STATS:
 441		iavf_get_stat_strings(netdev, data);
 442		break;
 443	case ETH_SS_PRIV_FLAGS:
 444		iavf_get_priv_flag_strings(netdev, data);
 445		break;
 446	default:
 447		break;
 448	}
 449}
 450
 451/**
 452 * iavf_get_priv_flags - report device private flags
 453 * @netdev: network interface device structure
 454 *
 455 * The get string set count and the string set should be matched for each
 456 * flag returned.  Add new strings for each flag to the iavf_gstrings_priv_flags
 457 * array.
 458 *
 459 * Returns a u32 bitmap of flags.
 460 **/
 461static u32 iavf_get_priv_flags(struct net_device *netdev)
 462{
 463	struct iavf_adapter *adapter = netdev_priv(netdev);
 464	u32 i, ret_flags = 0;
 465
 466	for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
 467		const struct iavf_priv_flags *priv_flags;
 468
 469		priv_flags = &iavf_gstrings_priv_flags[i];
 470
 471		if (priv_flags->flag & adapter->flags)
 472			ret_flags |= BIT(i);
 473	}
 474
 475	return ret_flags;
 476}
 477
 478/**
 479 * iavf_set_priv_flags - set private flags
 480 * @netdev: network interface device structure
 481 * @flags: bit flags to be set
 482 **/
 483static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
 484{
 485	struct iavf_adapter *adapter = netdev_priv(netdev);
 486	u32 orig_flags, new_flags, changed_flags;
 487	u32 i;
 488
 489	orig_flags = READ_ONCE(adapter->flags);
 490	new_flags = orig_flags;
 491
 492	for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
 493		const struct iavf_priv_flags *priv_flags;
 494
 495		priv_flags = &iavf_gstrings_priv_flags[i];
 496
 497		if (flags & BIT(i))
 498			new_flags |= priv_flags->flag;
 499		else
 500			new_flags &= ~(priv_flags->flag);
 501
 502		if (priv_flags->read_only &&
 503		    ((orig_flags ^ new_flags) & ~BIT(i)))
 504			return -EOPNOTSUPP;
 505	}
 506
 507	/* Before we finalize any flag changes, any checks which we need to
 508	 * perform to determine if the new flags will be supported should go
 509	 * here...
 510	 */
 511
 512	/* Compare and exchange the new flags into place. If we failed, that
 513	 * is if cmpxchg returns anything but the old value, this means
 514	 * something else must have modified the flags variable since we
 515	 * copied it. We'll just punt with an error and log something in the
 516	 * message buffer.
 517	 */
 518	if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
 519		dev_warn(&adapter->pdev->dev,
 520			 "Unable to update adapter->flags as it was modified by another thread...\n");
 521		return -EAGAIN;
 522	}
 523
 524	changed_flags = orig_flags ^ new_flags;
 525
 526	/* Process any additional changes needed as a result of flag changes.
 527	 * The changed_flags value reflects the list of bits that were changed
 528	 * in the code above.
 529	 */
 530
 531	/* issue a reset to force legacy-rx change to take effect */
 532	if (changed_flags & IAVF_FLAG_LEGACY_RX) {
 533		if (netif_running(netdev)) {
 534			adapter->flags |= IAVF_FLAG_RESET_NEEDED;
 535			queue_work(adapter->wq, &adapter->reset_task);
 536		}
 537	}
 538
 539	return 0;
 540}
 541
 542/**
 543 * iavf_get_msglevel - Get debug message level
 544 * @netdev: network interface device structure
 545 *
 546 * Returns current debug message level.
 547 **/
 548static u32 iavf_get_msglevel(struct net_device *netdev)
 549{
 550	struct iavf_adapter *adapter = netdev_priv(netdev);
 551
 552	return adapter->msg_enable;
 553}
 554
 555/**
 556 * iavf_set_msglevel - Set debug message level
 557 * @netdev: network interface device structure
 558 * @data: message level
 559 *
 560 * Set current debug message level. Higher values cause the driver to
 561 * be noisier.
 562 **/
 563static void iavf_set_msglevel(struct net_device *netdev, u32 data)
 564{
 565	struct iavf_adapter *adapter = netdev_priv(netdev);
 566
 567	if (IAVF_DEBUG_USER & data)
 568		adapter->hw.debug_mask = data;
 569	adapter->msg_enable = data;
 570}
 571
 572/**
 573 * iavf_get_drvinfo - Get driver info
 574 * @netdev: network interface device structure
 575 * @drvinfo: ethool driver info structure
 576 *
 577 * Returns information about the driver and device for display to the user.
 578 **/
 579static void iavf_get_drvinfo(struct net_device *netdev,
 580			     struct ethtool_drvinfo *drvinfo)
 581{
 582	struct iavf_adapter *adapter = netdev_priv(netdev);
 583
 584	strscpy(drvinfo->driver, iavf_driver_name, 32);
 585	strscpy(drvinfo->fw_version, "N/A", 4);
 586	strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
 587	drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
 588}
 589
 590/**
 591 * iavf_get_ringparam - Get ring parameters
 592 * @netdev: network interface device structure
 593 * @ring: ethtool ringparam structure
 594 * @kernel_ring: ethtool extenal ringparam structure
 595 * @extack: netlink extended ACK report struct
 596 *
 597 * Returns current ring parameters. TX and RX rings are reported separately,
 598 * but the number of rings is not reported.
 599 **/
 600static void iavf_get_ringparam(struct net_device *netdev,
 601			       struct ethtool_ringparam *ring,
 602			       struct kernel_ethtool_ringparam *kernel_ring,
 603			       struct netlink_ext_ack *extack)
 604{
 605	struct iavf_adapter *adapter = netdev_priv(netdev);
 606
 607	ring->rx_max_pending = IAVF_MAX_RXD;
 608	ring->tx_max_pending = IAVF_MAX_TXD;
 609	ring->rx_pending = adapter->rx_desc_count;
 610	ring->tx_pending = adapter->tx_desc_count;
 611}
 612
 613/**
 614 * iavf_set_ringparam - Set ring parameters
 615 * @netdev: network interface device structure
 616 * @ring: ethtool ringparam structure
 617 * @kernel_ring: ethtool external ringparam structure
 618 * @extack: netlink extended ACK report struct
 619 *
 620 * Sets ring parameters. TX and RX rings are controlled separately, but the
 621 * number of rings is not specified, so all rings get the same settings.
 622 **/
 623static int iavf_set_ringparam(struct net_device *netdev,
 624			      struct ethtool_ringparam *ring,
 625			      struct kernel_ethtool_ringparam *kernel_ring,
 626			      struct netlink_ext_ack *extack)
 627{
 628	struct iavf_adapter *adapter = netdev_priv(netdev);
 629	u32 new_rx_count, new_tx_count;
 
 630
 631	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
 632		return -EINVAL;
 633
 634	if (ring->tx_pending > IAVF_MAX_TXD ||
 635	    ring->tx_pending < IAVF_MIN_TXD ||
 636	    ring->rx_pending > IAVF_MAX_RXD ||
 637	    ring->rx_pending < IAVF_MIN_RXD) {
 638		netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
 639			   ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
 640			   IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
 641		return -EINVAL;
 642	}
 643
 644	new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
 645	if (new_tx_count != ring->tx_pending)
 646		netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
 647			    new_tx_count);
 648
 649	new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
 650	if (new_rx_count != ring->rx_pending)
 651		netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
 652			    new_rx_count);
 653
 654	/* if nothing to do return success */
 655	if ((new_tx_count == adapter->tx_desc_count) &&
 656	    (new_rx_count == adapter->rx_desc_count)) {
 657		netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
 658		return 0;
 659	}
 660
 661	if (new_tx_count != adapter->tx_desc_count) {
 662		netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
 663			   adapter->tx_desc_count, new_tx_count);
 664		adapter->tx_desc_count = new_tx_count;
 665	}
 666
 667	if (new_rx_count != adapter->rx_desc_count) {
 668		netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
 669			   adapter->rx_desc_count, new_rx_count);
 670		adapter->rx_desc_count = new_rx_count;
 671	}
 672
 673	if (netif_running(netdev)) {
 674		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
 675		queue_work(adapter->wq, &adapter->reset_task);
 
 
 676	}
 677
 678	return 0;
 679}
 680
 681/**
 682 * __iavf_get_coalesce - get per-queue coalesce settings
 683 * @netdev: the netdev to check
 684 * @ec: ethtool coalesce data structure
 685 * @queue: which queue to pick
 686 *
 687 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
 688 * are per queue. If queue is <0 then we default to queue 0 as the
 689 * representative value.
 690 **/
 691static int __iavf_get_coalesce(struct net_device *netdev,
 692			       struct ethtool_coalesce *ec, int queue)
 693{
 694	struct iavf_adapter *adapter = netdev_priv(netdev);
 695	struct iavf_ring *rx_ring, *tx_ring;
 696
 697	/* Rx and Tx usecs per queue value. If user doesn't specify the
 698	 * queue, return queue 0's value to represent.
 699	 */
 700	if (queue < 0)
 701		queue = 0;
 702	else if (queue >= adapter->num_active_queues)
 703		return -EINVAL;
 704
 705	rx_ring = &adapter->rx_rings[queue];
 706	tx_ring = &adapter->tx_rings[queue];
 707
 708	if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
 709		ec->use_adaptive_rx_coalesce = 1;
 710
 711	if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
 712		ec->use_adaptive_tx_coalesce = 1;
 713
 714	ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 715	ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 716
 717	return 0;
 718}
 719
 720/**
 721 * iavf_get_coalesce - Get interrupt coalescing settings
 722 * @netdev: network interface device structure
 723 * @ec: ethtool coalesce structure
 724 * @kernel_coal: ethtool CQE mode setting structure
 725 * @extack: extack for reporting error messages
 726 *
 727 * Returns current coalescing settings. This is referred to elsewhere in the
 728 * driver as Interrupt Throttle Rate, as this is how the hardware describes
 729 * this functionality. Note that if per-queue settings have been modified this
 730 * only represents the settings of queue 0.
 731 **/
 732static int iavf_get_coalesce(struct net_device *netdev,
 733			     struct ethtool_coalesce *ec,
 734			     struct kernel_ethtool_coalesce *kernel_coal,
 735			     struct netlink_ext_ack *extack)
 736{
 737	return __iavf_get_coalesce(netdev, ec, -1);
 738}
 739
 740/**
 741 * iavf_get_per_queue_coalesce - get coalesce values for specific queue
 742 * @netdev: netdev to read
 743 * @ec: coalesce settings from ethtool
 744 * @queue: the queue to read
 745 *
 746 * Read specific queue's coalesce settings.
 747 **/
 748static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
 749				       struct ethtool_coalesce *ec)
 750{
 751	return __iavf_get_coalesce(netdev, ec, queue);
 752}
 753
 754/**
 755 * iavf_set_itr_per_queue - set ITR values for specific queue
 756 * @adapter: the VF adapter struct to set values for
 757 * @ec: coalesce settings from ethtool
 758 * @queue: the queue to modify
 759 *
 760 * Change the ITR settings for a specific queue.
 761 **/
 762static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
 763				  struct ethtool_coalesce *ec, int queue)
 764{
 765	struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
 766	struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
 767	struct iavf_q_vector *q_vector;
 768	u16 itr_setting;
 769
 770	itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 771
 772	if (ec->rx_coalesce_usecs != itr_setting &&
 773	    ec->use_adaptive_rx_coalesce) {
 774		netif_info(adapter, drv, adapter->netdev,
 775			   "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
 776		return -EINVAL;
 777	}
 778
 779	itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
 780
 781	if (ec->tx_coalesce_usecs != itr_setting &&
 782	    ec->use_adaptive_tx_coalesce) {
 783		netif_info(adapter, drv, adapter->netdev,
 784			   "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
 785		return -EINVAL;
 786	}
 787
 788	rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
 789	tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
 790
 791	rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
 792	if (!ec->use_adaptive_rx_coalesce)
 793		rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
 794
 795	tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
 796	if (!ec->use_adaptive_tx_coalesce)
 797		tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
 798
 799	q_vector = rx_ring->q_vector;
 800	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
 801
 802	q_vector = tx_ring->q_vector;
 803	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
 804
 805	/* The interrupt handler itself will take care of programming
 806	 * the Tx and Rx ITR values based on the values we have entered
 807	 * into the q_vector, no need to write the values now.
 808	 */
 809	return 0;
 810}
 811
 812/**
 813 * __iavf_set_coalesce - set coalesce settings for particular queue
 814 * @netdev: the netdev to change
 815 * @ec: ethtool coalesce settings
 816 * @queue: the queue to change
 817 *
 818 * Sets the coalesce settings for a particular queue.
 819 **/
 820static int __iavf_set_coalesce(struct net_device *netdev,
 821			       struct ethtool_coalesce *ec, int queue)
 822{
 823	struct iavf_adapter *adapter = netdev_priv(netdev);
 824	int i;
 825
 826	if (ec->rx_coalesce_usecs == 0) {
 827		if (ec->use_adaptive_rx_coalesce)
 828			netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
 829	} else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
 830		   (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
 831		netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
 832		return -EINVAL;
 833	} else if (ec->tx_coalesce_usecs == 0) {
 834		if (ec->use_adaptive_tx_coalesce)
 835			netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
 836	} else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
 837		   (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
 838		netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
 839		return -EINVAL;
 840	}
 841
 842	/* Rx and Tx usecs has per queue value. If user doesn't specify the
 843	 * queue, apply to all queues.
 844	 */
 845	if (queue < 0) {
 846		for (i = 0; i < adapter->num_active_queues; i++)
 847			if (iavf_set_itr_per_queue(adapter, ec, i))
 848				return -EINVAL;
 849	} else if (queue < adapter->num_active_queues) {
 850		if (iavf_set_itr_per_queue(adapter, ec, queue))
 851			return -EINVAL;
 852	} else {
 853		netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
 854			   adapter->num_active_queues - 1);
 855		return -EINVAL;
 856	}
 857
 858	return 0;
 859}
 860
 861/**
 862 * iavf_set_coalesce - Set interrupt coalescing settings
 863 * @netdev: network interface device structure
 864 * @ec: ethtool coalesce structure
 865 * @kernel_coal: ethtool CQE mode setting structure
 866 * @extack: extack for reporting error messages
 867 *
 868 * Change current coalescing settings for every queue.
 869 **/
 870static int iavf_set_coalesce(struct net_device *netdev,
 871			     struct ethtool_coalesce *ec,
 872			     struct kernel_ethtool_coalesce *kernel_coal,
 873			     struct netlink_ext_ack *extack)
 874{
 875	return __iavf_set_coalesce(netdev, ec, -1);
 876}
 877
 878/**
 879 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
 880 * @netdev: the netdev to change
 881 * @ec: ethtool's coalesce settings
 882 * @queue: the queue to modify
 883 *
 884 * Modifies a specific queue's coalesce settings.
 885 */
 886static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
 887				       struct ethtool_coalesce *ec)
 888{
 889	return __iavf_set_coalesce(netdev, ec, queue);
 890}
 891
 892/**
 893 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
 894 * flow type values
 895 * @flow: filter type to be converted
 896 *
 897 * Returns the corresponding ethtool flow type.
 898 */
 899static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
 900{
 901	switch (flow) {
 902	case IAVF_FDIR_FLOW_IPV4_TCP:
 903		return TCP_V4_FLOW;
 904	case IAVF_FDIR_FLOW_IPV4_UDP:
 905		return UDP_V4_FLOW;
 906	case IAVF_FDIR_FLOW_IPV4_SCTP:
 907		return SCTP_V4_FLOW;
 908	case IAVF_FDIR_FLOW_IPV4_AH:
 909		return AH_V4_FLOW;
 910	case IAVF_FDIR_FLOW_IPV4_ESP:
 911		return ESP_V4_FLOW;
 912	case IAVF_FDIR_FLOW_IPV4_OTHER:
 913		return IPV4_USER_FLOW;
 914	case IAVF_FDIR_FLOW_IPV6_TCP:
 915		return TCP_V6_FLOW;
 916	case IAVF_FDIR_FLOW_IPV6_UDP:
 917		return UDP_V6_FLOW;
 918	case IAVF_FDIR_FLOW_IPV6_SCTP:
 919		return SCTP_V6_FLOW;
 920	case IAVF_FDIR_FLOW_IPV6_AH:
 921		return AH_V6_FLOW;
 922	case IAVF_FDIR_FLOW_IPV6_ESP:
 923		return ESP_V6_FLOW;
 924	case IAVF_FDIR_FLOW_IPV6_OTHER:
 925		return IPV6_USER_FLOW;
 926	case IAVF_FDIR_FLOW_NON_IP_L2:
 927		return ETHER_FLOW;
 928	default:
 929		/* 0 is undefined ethtool flow */
 930		return 0;
 931	}
 932}
 933
 934/**
 935 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
 936 * @eth: Ethtool flow type to be converted
 937 *
 938 * Returns flow enum
 939 */
 940static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
 941{
 942	switch (eth) {
 943	case TCP_V4_FLOW:
 944		return IAVF_FDIR_FLOW_IPV4_TCP;
 945	case UDP_V4_FLOW:
 946		return IAVF_FDIR_FLOW_IPV4_UDP;
 947	case SCTP_V4_FLOW:
 948		return IAVF_FDIR_FLOW_IPV4_SCTP;
 949	case AH_V4_FLOW:
 950		return IAVF_FDIR_FLOW_IPV4_AH;
 951	case ESP_V4_FLOW:
 952		return IAVF_FDIR_FLOW_IPV4_ESP;
 953	case IPV4_USER_FLOW:
 954		return IAVF_FDIR_FLOW_IPV4_OTHER;
 955	case TCP_V6_FLOW:
 956		return IAVF_FDIR_FLOW_IPV6_TCP;
 957	case UDP_V6_FLOW:
 958		return IAVF_FDIR_FLOW_IPV6_UDP;
 959	case SCTP_V6_FLOW:
 960		return IAVF_FDIR_FLOW_IPV6_SCTP;
 961	case AH_V6_FLOW:
 962		return IAVF_FDIR_FLOW_IPV6_AH;
 963	case ESP_V6_FLOW:
 964		return IAVF_FDIR_FLOW_IPV6_ESP;
 965	case IPV6_USER_FLOW:
 966		return IAVF_FDIR_FLOW_IPV6_OTHER;
 967	case ETHER_FLOW:
 968		return IAVF_FDIR_FLOW_NON_IP_L2;
 969	default:
 970		return IAVF_FDIR_FLOW_NONE;
 971	}
 972}
 973
 974/**
 975 * iavf_is_mask_valid - check mask field set
 976 * @mask: full mask to check
 977 * @field: field for which mask should be valid
 978 *
 979 * If the mask is fully set return true. If it is not valid for field return
 980 * false.
 981 */
 982static bool iavf_is_mask_valid(u64 mask, u64 field)
 983{
 984	return (mask & field) == field;
 985}
 986
 987/**
 988 * iavf_parse_rx_flow_user_data - deconstruct user-defined data
 989 * @fsp: pointer to ethtool Rx flow specification
 990 * @fltr: pointer to Flow Director filter for userdef data storage
 991 *
 992 * Returns 0 on success, negative error value on failure
 993 */
 994static int
 995iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
 996			     struct iavf_fdir_fltr *fltr)
 997{
 998	struct iavf_flex_word *flex;
 999	int i, cnt = 0;
1000
1001	if (!(fsp->flow_type & FLOW_EXT))
1002		return 0;
1003
1004	for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
1005#define IAVF_USERDEF_FLEX_WORD_M	GENMASK(15, 0)
1006#define IAVF_USERDEF_FLEX_OFFS_S	16
1007#define IAVF_USERDEF_FLEX_OFFS_M	GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
1008#define IAVF_USERDEF_FLEX_FLTR_M	GENMASK(31, 0)
1009		u32 value = be32_to_cpu(fsp->h_ext.data[i]);
1010		u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
1011
1012		if (!value || !mask)
1013			continue;
1014
1015		if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
1016			return -EINVAL;
1017
1018		/* 504 is the maximum value for offsets, and offset is measured
1019		 * from the start of the MAC address.
1020		 */
1021#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
1022		flex = &fltr->flex_words[cnt++];
1023		flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
1024		flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
1025			     IAVF_USERDEF_FLEX_OFFS_S;
1026		if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
1027			return -EINVAL;
1028	}
1029
1030	fltr->flex_cnt = cnt;
1031
1032	return 0;
1033}
1034
1035/**
1036 * iavf_fill_rx_flow_ext_data - fill the additional data
1037 * @fsp: pointer to ethtool Rx flow specification
1038 * @fltr: pointer to Flow Director filter to get additional data
1039 */
1040static void
1041iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
1042			   struct iavf_fdir_fltr *fltr)
1043{
1044	if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
1045		return;
1046
1047	fsp->flow_type |= FLOW_EXT;
1048
1049	memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
1050	memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
1051}
1052
1053/**
1054 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
1055 * @adapter: the VF adapter structure that contains filter list
1056 * @cmd: ethtool command data structure to receive the filter data
1057 *
1058 * Returns 0 as expected for success by ethtool
1059 */
1060static int
1061iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
1062			    struct ethtool_rxnfc *cmd)
1063{
1064	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1065	struct iavf_fdir_fltr *rule = NULL;
1066	int ret = 0;
1067
1068	if (!FDIR_FLTR_SUPPORT(adapter))
1069		return -EOPNOTSUPP;
1070
1071	spin_lock_bh(&adapter->fdir_fltr_lock);
1072
1073	rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
1074	if (!rule) {
1075		ret = -EINVAL;
1076		goto release_lock;
1077	}
1078
1079	fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
1080
1081	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
1082	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
1083
1084	switch (fsp->flow_type) {
1085	case TCP_V4_FLOW:
1086	case UDP_V4_FLOW:
1087	case SCTP_V4_FLOW:
1088		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1089		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1090		fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
1091		fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
1092		fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
1093		fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1094		fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1095		fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
1096		fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
1097		fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
1098		break;
1099	case AH_V4_FLOW:
1100	case ESP_V4_FLOW:
1101		fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1102		fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1103		fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
1104		fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
1105		fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1106		fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1107		fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
1108		fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
1109		break;
1110	case IPV4_USER_FLOW:
1111		fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1112		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1113		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
1114		fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
1115		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1116		fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
1117		fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1118		fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1119		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
1120		fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
1121		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
1122		fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
1123		break;
1124	case TCP_V6_FLOW:
1125	case UDP_V6_FLOW:
1126	case SCTP_V6_FLOW:
1127		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1128		       sizeof(struct in6_addr));
1129		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1130		       sizeof(struct in6_addr));
1131		fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
1132		fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
1133		fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
1134		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1135		       sizeof(struct in6_addr));
1136		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1137		       sizeof(struct in6_addr));
1138		fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
1139		fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
1140		fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
1141		break;
1142	case AH_V6_FLOW:
1143	case ESP_V6_FLOW:
1144		memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1145		       sizeof(struct in6_addr));
1146		memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1147		       sizeof(struct in6_addr));
1148		fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
1149		fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
1150		memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1151		       sizeof(struct in6_addr));
1152		memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1153		       sizeof(struct in6_addr));
1154		fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
1155		fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
1156		break;
1157	case IPV6_USER_FLOW:
1158		memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1159		       sizeof(struct in6_addr));
1160		memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1161		       sizeof(struct in6_addr));
1162		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
1163		fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
1164		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
1165		memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1166		       sizeof(struct in6_addr));
1167		memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1168		       sizeof(struct in6_addr));
1169		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
1170		fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
1171		fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
1172		break;
1173	case ETHER_FLOW:
1174		fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
1175		fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
1176		break;
1177	default:
1178		ret = -EINVAL;
1179		break;
1180	}
1181
1182	iavf_fill_rx_flow_ext_data(fsp, rule);
1183
1184	if (rule->action == VIRTCHNL_ACTION_DROP)
1185		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1186	else
1187		fsp->ring_cookie = rule->q_index;
1188
1189release_lock:
1190	spin_unlock_bh(&adapter->fdir_fltr_lock);
1191	return ret;
1192}
1193
1194/**
1195 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
1196 * @adapter: the VF adapter structure containing the filter list
1197 * @cmd: ethtool command data structure
1198 * @rule_locs: ethtool array passed in from OS to receive filter IDs
1199 *
1200 * Returns 0 as expected for success by ethtool
1201 */
1202static int
1203iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
1204		       u32 *rule_locs)
1205{
1206	struct iavf_fdir_fltr *fltr;
1207	unsigned int cnt = 0;
1208	int val = 0;
1209
1210	if (!FDIR_FLTR_SUPPORT(adapter))
1211		return -EOPNOTSUPP;
1212
1213	cmd->data = IAVF_MAX_FDIR_FILTERS;
1214
1215	spin_lock_bh(&adapter->fdir_fltr_lock);
1216
1217	list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
 
 
 
1218		if (cnt == cmd->rule_cnt) {
1219			val = -EMSGSIZE;
1220			goto release_lock;
1221		}
1222		rule_locs[cnt] = fltr->loc;
1223		cnt++;
1224	}
1225
1226release_lock:
1227	spin_unlock_bh(&adapter->fdir_fltr_lock);
1228	if (!val)
1229		cmd->rule_cnt = cnt;
1230
1231	return val;
1232}
1233
1234/**
1235 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
1236 * @adapter: pointer to the VF adapter structure
1237 * @fsp: pointer to ethtool Rx flow specification
1238 * @fltr: filter structure
1239 */
1240static int
1241iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
1242			struct iavf_fdir_fltr *fltr)
1243{
1244	u32 flow_type, q_index = 0;
1245	enum virtchnl_action act;
1246	int err;
1247
1248	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1249		act = VIRTCHNL_ACTION_DROP;
1250	} else {
1251		q_index = fsp->ring_cookie;
1252		if (q_index >= adapter->num_active_queues)
1253			return -EINVAL;
1254
1255		act = VIRTCHNL_ACTION_QUEUE;
1256	}
1257
1258	fltr->action = act;
1259	fltr->loc = fsp->location;
1260	fltr->q_index = q_index;
1261
1262	if (fsp->flow_type & FLOW_EXT) {
1263		memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
1264		       sizeof(fltr->ext_data.usr_def));
1265		memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
1266		       sizeof(fltr->ext_mask.usr_def));
1267	}
1268
1269	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
1270	fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
1271
1272	switch (flow_type) {
1273	case TCP_V4_FLOW:
1274	case UDP_V4_FLOW:
1275	case SCTP_V4_FLOW:
1276		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1277		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1278		fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1279		fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1280		fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
1281		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1282		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1283		fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1284		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1285		fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
 
1286		break;
1287	case AH_V4_FLOW:
1288	case ESP_V4_FLOW:
1289		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
1290		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
1291		fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
1292		fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
1293		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
1294		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
1295		fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
1296		fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
 
1297		break;
1298	case IPV4_USER_FLOW:
1299		fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1300		fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1301		fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1302		fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
1303		fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
1304		fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1305		fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1306		fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1307		fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
1308		fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
 
1309		break;
1310	case TCP_V6_FLOW:
1311	case UDP_V6_FLOW:
1312	case SCTP_V6_FLOW:
1313		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1314		       sizeof(struct in6_addr));
1315		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1316		       sizeof(struct in6_addr));
1317		fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1318		fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1319		fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
1320		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1321		       sizeof(struct in6_addr));
1322		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1323		       sizeof(struct in6_addr));
1324		fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1325		fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1326		fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
 
1327		break;
1328	case AH_V6_FLOW:
1329	case ESP_V6_FLOW:
1330		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
1331		       sizeof(struct in6_addr));
1332		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
1333		       sizeof(struct in6_addr));
1334		fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
1335		fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
1336		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
1337		       sizeof(struct in6_addr));
1338		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
1339		       sizeof(struct in6_addr));
1340		fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
1341		fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
 
1342		break;
1343	case IPV6_USER_FLOW:
1344		memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1345		       sizeof(struct in6_addr));
1346		memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1347		       sizeof(struct in6_addr));
1348		fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1349		fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
1350		fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1351		memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1352		       sizeof(struct in6_addr));
1353		memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1354		       sizeof(struct in6_addr));
1355		fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1356		fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
1357		fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
 
1358		break;
1359	case ETHER_FLOW:
1360		fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
1361		fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
1362		break;
1363	default:
1364		/* not doing un-parsed flow types */
1365		return -EINVAL;
1366	}
1367
 
 
 
 
1368	if (iavf_fdir_is_dup_fltr(adapter, fltr))
1369		return -EEXIST;
1370
1371	err = iavf_parse_rx_flow_user_data(fsp, fltr);
1372	if (err)
1373		return err;
1374
1375	return iavf_fill_fdir_add_msg(adapter, fltr);
1376}
1377
1378/**
1379 * iavf_add_fdir_ethtool - add Flow Director filter
1380 * @adapter: pointer to the VF adapter structure
1381 * @cmd: command to add Flow Director filter
1382 *
1383 * Returns 0 on success and negative values for failure
1384 */
1385static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1386{
1387	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1388	struct iavf_fdir_fltr *fltr;
1389	int count = 50;
1390	int err;
1391
1392	if (!FDIR_FLTR_SUPPORT(adapter))
1393		return -EOPNOTSUPP;
1394
1395	if (fsp->flow_type & FLOW_MAC_EXT)
1396		return -EINVAL;
1397
1398	if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
1399		dev_err(&adapter->pdev->dev,
1400			"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
1401			IAVF_MAX_FDIR_FILTERS);
1402		return -ENOSPC;
1403	}
1404
1405	spin_lock_bh(&adapter->fdir_fltr_lock);
1406	if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
1407		dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
1408		spin_unlock_bh(&adapter->fdir_fltr_lock);
1409		return -EEXIST;
1410	}
1411	spin_unlock_bh(&adapter->fdir_fltr_lock);
1412
1413	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
1414	if (!fltr)
1415		return -ENOMEM;
1416
1417	while (!mutex_trylock(&adapter->crit_lock)) {
1418		if (--count == 0) {
1419			kfree(fltr);
1420			return -EINVAL;
1421		}
1422		udelay(1);
1423	}
1424
1425	err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
 
 
 
1426	if (err)
1427		goto ret;
1428
1429	spin_lock_bh(&adapter->fdir_fltr_lock);
1430	iavf_fdir_list_add_fltr(adapter, fltr);
1431	adapter->fdir_active_fltr++;
1432	fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
1433	adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1434	spin_unlock_bh(&adapter->fdir_fltr_lock);
1435
1436	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1437
1438ret:
1439	if (err && fltr)
1440		kfree(fltr);
1441
1442	mutex_unlock(&adapter->crit_lock);
1443	return err;
1444}
1445
1446/**
1447 * iavf_del_fdir_ethtool - delete Flow Director filter
1448 * @adapter: pointer to the VF adapter structure
1449 * @cmd: command to delete Flow Director filter
1450 *
1451 * Returns 0 on success and negative values for failure
1452 */
1453static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1454{
1455	struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1456	struct iavf_fdir_fltr *fltr = NULL;
1457	int err = 0;
1458
1459	if (!FDIR_FLTR_SUPPORT(adapter))
1460		return -EOPNOTSUPP;
1461
1462	spin_lock_bh(&adapter->fdir_fltr_lock);
1463	fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
1464	if (fltr) {
1465		if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
1466			fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1467			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1468		} else {
1469			err = -EBUSY;
1470		}
1471	} else if (adapter->fdir_active_fltr) {
1472		err = -EINVAL;
1473	}
1474	spin_unlock_bh(&adapter->fdir_fltr_lock);
1475
1476	if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
1477		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1478
1479	return err;
1480}
1481
1482/**
1483 * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
1484 * @cmd: ethtool rxnfc command
1485 *
1486 * This function parses the rxnfc command and returns intended
1487 * header types for RSS configuration
1488 */
1489static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
1490{
1491	u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
1492
1493	switch (cmd->flow_type) {
1494	case TCP_V4_FLOW:
1495		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1496			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1497		break;
1498	case UDP_V4_FLOW:
1499		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1500			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1501		break;
1502	case SCTP_V4_FLOW:
1503		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1504			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1505		break;
1506	case TCP_V6_FLOW:
1507		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1508			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1509		break;
1510	case UDP_V6_FLOW:
1511		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1512			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1513		break;
1514	case SCTP_V6_FLOW:
1515		hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1516			IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1517		break;
1518	default:
1519		break;
1520	}
1521
1522	return hdrs;
1523}
1524
1525/**
1526 * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
1527 * @cmd: ethtool rxnfc command
 
1528 *
1529 * This function parses the rxnfc command and returns intended hash fields for
1530 * RSS configuration
1531 */
1532static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
1533{
1534	u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
1535
1536	if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
1537		switch (cmd->flow_type) {
1538		case TCP_V4_FLOW:
1539		case UDP_V4_FLOW:
1540		case SCTP_V4_FLOW:
1541			if (cmd->data & RXH_IP_SRC)
1542				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
1543			if (cmd->data & RXH_IP_DST)
1544				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
1545			break;
1546		case TCP_V6_FLOW:
1547		case UDP_V6_FLOW:
1548		case SCTP_V6_FLOW:
1549			if (cmd->data & RXH_IP_SRC)
1550				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
1551			if (cmd->data & RXH_IP_DST)
1552				hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
1553			break;
1554		default:
1555			break;
1556		}
1557	}
1558
1559	if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
1560		switch (cmd->flow_type) {
1561		case TCP_V4_FLOW:
1562		case TCP_V6_FLOW:
1563			if (cmd->data & RXH_L4_B_0_1)
1564				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
1565			if (cmd->data & RXH_L4_B_2_3)
1566				hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
1567			break;
1568		case UDP_V4_FLOW:
1569		case UDP_V6_FLOW:
1570			if (cmd->data & RXH_L4_B_0_1)
1571				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
1572			if (cmd->data & RXH_L4_B_2_3)
1573				hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
1574			break;
1575		case SCTP_V4_FLOW:
1576		case SCTP_V6_FLOW:
1577			if (cmd->data & RXH_L4_B_0_1)
1578				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
1579			if (cmd->data & RXH_L4_B_2_3)
1580				hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
1581			break;
1582		default:
1583			break;
1584		}
1585	}
1586
1587	return hfld;
1588}
1589
1590/**
1591 * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
1592 * @adapter: pointer to the VF adapter structure
1593 * @cmd: ethtool rxnfc command
1594 *
1595 * Returns Success if the flow input set is supported.
1596 */
1597static int
1598iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
1599			  struct ethtool_rxnfc *cmd)
1600{
1601	struct iavf_adv_rss *rss_old, *rss_new;
1602	bool rss_new_add = false;
1603	int count = 50, err = 0;
 
1604	u64 hash_flds;
1605	u32 hdrs;
1606
1607	if (!ADV_RSS_SUPPORT(adapter))
1608		return -EOPNOTSUPP;
1609
 
 
1610	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1611	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1612		return -EINVAL;
1613
1614	hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
1615	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1616		return -EINVAL;
1617
1618	rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
1619	if (!rss_new)
1620		return -ENOMEM;
1621
1622	if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
 
1623		kfree(rss_new);
1624		return -EINVAL;
1625	}
1626
1627	while (!mutex_trylock(&adapter->crit_lock)) {
1628		if (--count == 0) {
1629			kfree(rss_new);
1630			return -EINVAL;
1631		}
1632
1633		udelay(1);
1634	}
1635
1636	spin_lock_bh(&adapter->adv_rss_lock);
1637	rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1638	if (rss_old) {
1639		if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
1640			err = -EBUSY;
1641		} else if (rss_old->hash_flds != hash_flds) {
 
1642			rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
1643			rss_old->hash_flds = hash_flds;
 
1644			memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
1645			       sizeof(rss_new->cfg_msg));
1646			adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1647		} else {
1648			err = -EEXIST;
1649		}
1650	} else {
1651		rss_new_add = true;
1652		rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
1653		rss_new->packet_hdrs = hdrs;
1654		rss_new->hash_flds = hash_flds;
 
1655		list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
1656		adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1657	}
1658	spin_unlock_bh(&adapter->adv_rss_lock);
1659
1660	if (!err)
1661		mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1662
1663	mutex_unlock(&adapter->crit_lock);
1664
1665	if (!rss_new_add)
1666		kfree(rss_new);
1667
1668	return err;
1669}
1670
1671/**
1672 * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
1673 * @adapter: pointer to the VF adapter structure
1674 * @cmd: ethtool rxnfc command
1675 *
1676 * Returns Success if the flow input set is supported.
1677 */
1678static int
1679iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
1680			  struct ethtool_rxnfc *cmd)
1681{
1682	struct iavf_adv_rss *rss;
1683	u64 hash_flds;
1684	u32 hdrs;
1685
1686	if (!ADV_RSS_SUPPORT(adapter))
1687		return -EOPNOTSUPP;
1688
1689	cmd->data = 0;
1690
1691	hdrs = iavf_adv_rss_parse_hdrs(cmd);
1692	if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1693		return -EINVAL;
1694
1695	spin_lock_bh(&adapter->adv_rss_lock);
1696	rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
1697	if (rss)
1698		hash_flds = rss->hash_flds;
1699	else
1700		hash_flds = IAVF_ADV_RSS_HASH_INVALID;
1701	spin_unlock_bh(&adapter->adv_rss_lock);
1702
1703	if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1704		return -EINVAL;
1705
1706	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
1707			 IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
1708		cmd->data |= (u64)RXH_IP_SRC;
1709
1710	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
1711			 IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
1712		cmd->data |= (u64)RXH_IP_DST;
1713
1714	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
1715			 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
1716			 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
1717		cmd->data |= (u64)RXH_L4_B_0_1;
1718
1719	if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
1720			 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
1721			 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
1722		cmd->data |= (u64)RXH_L4_B_2_3;
1723
1724	return 0;
1725}
1726
1727/**
1728 * iavf_set_rxnfc - command to set Rx flow rules.
1729 * @netdev: network interface device structure
1730 * @cmd: ethtool rxnfc command
1731 *
1732 * Returns 0 for success and negative values for errors
1733 */
1734static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1735{
1736	struct iavf_adapter *adapter = netdev_priv(netdev);
1737	int ret = -EOPNOTSUPP;
1738
1739	switch (cmd->cmd) {
1740	case ETHTOOL_SRXCLSRLINS:
1741		ret = iavf_add_fdir_ethtool(adapter, cmd);
1742		break;
1743	case ETHTOOL_SRXCLSRLDEL:
1744		ret = iavf_del_fdir_ethtool(adapter, cmd);
1745		break;
1746	case ETHTOOL_SRXFH:
1747		ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
1748		break;
1749	default:
1750		break;
1751	}
1752
1753	return ret;
1754}
1755
1756/**
1757 * iavf_get_rxnfc - command to get RX flow classification rules
1758 * @netdev: network interface device structure
1759 * @cmd: ethtool rxnfc command
1760 * @rule_locs: pointer to store rule locations
1761 *
1762 * Returns Success if the command is supported.
1763 **/
1764static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1765			  u32 *rule_locs)
1766{
1767	struct iavf_adapter *adapter = netdev_priv(netdev);
1768	int ret = -EOPNOTSUPP;
1769
1770	switch (cmd->cmd) {
1771	case ETHTOOL_GRXRINGS:
1772		cmd->data = adapter->num_active_queues;
1773		ret = 0;
1774		break;
1775	case ETHTOOL_GRXCLSRLCNT:
1776		if (!FDIR_FLTR_SUPPORT(adapter))
1777			break;
 
1778		cmd->rule_cnt = adapter->fdir_active_fltr;
 
1779		cmd->data = IAVF_MAX_FDIR_FILTERS;
1780		ret = 0;
1781		break;
1782	case ETHTOOL_GRXCLSRULE:
1783		ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
1784		break;
1785	case ETHTOOL_GRXCLSRLALL:
1786		ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
1787		break;
1788	case ETHTOOL_GRXFH:
1789		ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
1790		break;
1791	default:
1792		break;
1793	}
1794
1795	return ret;
1796}
1797/**
1798 * iavf_get_channels: get the number of channels supported by the device
1799 * @netdev: network interface device structure
1800 * @ch: channel information structure
1801 *
1802 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
1803 * queue pair. Report one extra channel to match our "other" MSI-X vector.
1804 **/
1805static void iavf_get_channels(struct net_device *netdev,
1806			      struct ethtool_channels *ch)
1807{
1808	struct iavf_adapter *adapter = netdev_priv(netdev);
1809
1810	/* Report maximum channels */
1811	ch->max_combined = adapter->vsi_res->num_queue_pairs;
1812
1813	ch->max_other = NONQ_VECS;
1814	ch->other_count = NONQ_VECS;
1815
1816	ch->combined_count = adapter->num_active_queues;
1817}
1818
1819/**
1820 * iavf_set_channels: set the new channel count
1821 * @netdev: network interface device structure
1822 * @ch: channel information structure
1823 *
1824 * Negotiate a new number of channels with the PF then do a reset.  During
1825 * reset we'll realloc queues and fix the RSS table.  Returns 0 on success,
1826 * negative on failure.
1827 **/
1828static int iavf_set_channels(struct net_device *netdev,
1829			     struct ethtool_channels *ch)
1830{
1831	struct iavf_adapter *adapter = netdev_priv(netdev);
1832	u32 num_req = ch->combined_count;
1833	int i;
1834
1835	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1836	    adapter->num_tc) {
1837		dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
1838		return -EINVAL;
1839	}
1840
1841	/* All of these should have already been checked by ethtool before this
1842	 * even gets to us, but just to be sure.
1843	 */
1844	if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
1845		return -EINVAL;
1846
1847	if (num_req == adapter->num_active_queues)
1848		return 0;
1849
1850	if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
1851		return -EINVAL;
1852
1853	adapter->num_req_queues = num_req;
1854	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1855	iavf_schedule_reset(adapter);
1856
1857	/* wait for the reset is done */
1858	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
1859		msleep(IAVF_RESET_WAIT_MS);
1860		if (adapter->flags & IAVF_FLAG_RESET_PENDING)
1861			continue;
1862		break;
1863	}
1864	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
1865		adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1866		adapter->num_active_queues = num_req;
1867		return -EOPNOTSUPP;
1868	}
1869
1870	return 0;
1871}
1872
1873/**
1874 * iavf_get_rxfh_key_size - get the RSS hash key size
1875 * @netdev: network interface device structure
1876 *
1877 * Returns the table size.
1878 **/
1879static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
1880{
1881	struct iavf_adapter *adapter = netdev_priv(netdev);
1882
1883	return adapter->rss_key_size;
1884}
1885
1886/**
1887 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
1888 * @netdev: network interface device structure
1889 *
1890 * Returns the table size.
1891 **/
1892static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
1893{
1894	struct iavf_adapter *adapter = netdev_priv(netdev);
1895
1896	return adapter->rss_lut_size;
1897}
1898
1899/**
1900 * iavf_get_rxfh - get the rx flow hash indirection table
1901 * @netdev: network interface device structure
1902 * @indir: indirection table
1903 * @key: hash key
1904 * @hfunc: hash function in use
1905 *
1906 * Reads the indirection table directly from the hardware. Always returns 0.
1907 **/
1908static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1909			 u8 *hfunc)
1910{
1911	struct iavf_adapter *adapter = netdev_priv(netdev);
1912	u16 i;
1913
1914	if (hfunc)
1915		*hfunc = ETH_RSS_HASH_TOP;
1916	if (key)
1917		memcpy(key, adapter->rss_key, adapter->rss_key_size);
 
 
1918
1919	if (indir)
1920		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1921		for (i = 0; i < adapter->rss_lut_size; i++)
1922			indir[i] = (u32)adapter->rss_lut[i];
1923
1924	return 0;
1925}
1926
1927/**
1928 * iavf_set_rxfh - set the rx flow hash indirection table
1929 * @netdev: network interface device structure
1930 * @indir: indirection table
1931 * @key: hash key
1932 * @hfunc: hash function to use
1933 *
1934 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
1935 * returns 0 after programming the table.
1936 **/
1937static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
1938			 const u8 *key, const u8 hfunc)
 
1939{
1940	struct iavf_adapter *adapter = netdev_priv(netdev);
1941	u16 i;
1942
1943	/* Only support toeplitz hash function */
1944	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 
1945		return -EOPNOTSUPP;
1946
1947	if (!key && !indir)
 
 
 
 
 
 
 
 
 
 
 
 
1948		return 0;
1949
1950	if (key)
1951		memcpy(adapter->rss_key, key, adapter->rss_key_size);
1952
1953	if (indir) {
1954		/* Each 32 bits pointed by 'indir' is stored with a lut entry */
1955		for (i = 0; i < adapter->rss_lut_size; i++)
1956			adapter->rss_lut[i] = (u8)(indir[i]);
1957	}
1958
1959	return iavf_config_rss(adapter);
1960}
1961
1962static const struct ethtool_ops iavf_ethtool_ops = {
1963	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1964				     ETHTOOL_COALESCE_USE_ADAPTIVE,
 
1965	.get_drvinfo		= iavf_get_drvinfo,
1966	.get_link		= ethtool_op_get_link,
1967	.get_ringparam		= iavf_get_ringparam,
1968	.set_ringparam		= iavf_set_ringparam,
1969	.get_strings		= iavf_get_strings,
1970	.get_ethtool_stats	= iavf_get_ethtool_stats,
1971	.get_sset_count		= iavf_get_sset_count,
1972	.get_priv_flags		= iavf_get_priv_flags,
1973	.set_priv_flags		= iavf_set_priv_flags,
1974	.get_msglevel		= iavf_get_msglevel,
1975	.set_msglevel		= iavf_set_msglevel,
1976	.get_coalesce		= iavf_get_coalesce,
1977	.set_coalesce		= iavf_set_coalesce,
1978	.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1979	.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1980	.set_rxnfc		= iavf_set_rxnfc,
1981	.get_rxnfc		= iavf_get_rxnfc,
1982	.get_rxfh_indir_size	= iavf_get_rxfh_indir_size,
1983	.get_rxfh		= iavf_get_rxfh,
1984	.set_rxfh		= iavf_set_rxfh,
1985	.get_channels		= iavf_get_channels,
1986	.set_channels		= iavf_set_channels,
1987	.get_rxfh_key_size	= iavf_get_rxfh_key_size,
1988	.get_link_ksettings	= iavf_get_link_ksettings,
1989};
1990
1991/**
1992 * iavf_set_ethtool_ops - Initialize ethtool ops struct
1993 * @netdev: network interface device structure
1994 *
1995 * Sets ethtool ops struct in our netdev so that ethtool can call
1996 * our functions.
1997 **/
1998void iavf_set_ethtool_ops(struct net_device *netdev)
1999{
2000	netdev->ethtool_ops = &iavf_ethtool_ops;
2001}