Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4/* ethtool support for ixgbe */
5
6#include <linux/interrupt.h>
7#include <linux/types.h>
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/vmalloc.h>
14#include <linux/highmem.h>
15#include <linux/uaccess.h>
16
17#include "ixgbe.h"
18#include "ixgbe_phy.h"
19
20
21enum {NETDEV_STATS, IXGBE_STATS};
22
23struct ixgbe_stats {
24 char stat_string[ETH_GSTRING_LEN];
25 int type;
26 int sizeof_stat;
27 int stat_offset;
28};
29
30#define IXGBE_STAT(m) IXGBE_STATS, \
31 sizeof(((struct ixgbe_adapter *)0)->m), \
32 offsetof(struct ixgbe_adapter, m)
33#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
34 sizeof(((struct rtnl_link_stats64 *)0)->m), \
35 offsetof(struct rtnl_link_stats64, m)
36
37static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
38 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
39 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
40 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
41 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
42 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
43 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
44 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
45 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
46 {"lsc_int", IXGBE_STAT(lsc_int)},
47 {"tx_busy", IXGBE_STAT(tx_busy)},
48 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
49 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
50 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
51 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
52 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
53 {"multicast", IXGBE_NETDEV_STAT(multicast)},
54 {"broadcast", IXGBE_STAT(stats.bprc)},
55 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
56 {"collisions", IXGBE_NETDEV_STAT(collisions)},
57 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
58 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
59 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
60 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
61 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
62 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
63 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
64 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
65 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
66 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
67 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
68 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
69 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
70 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
71 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
72 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
73 {"rx_length_errors", IXGBE_STAT(stats.rlec)},
74 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
75 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
76 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
77 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
78 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
79 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
80 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
81 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
82 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
83 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
84 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
85 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
86 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
87 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
88 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
89 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
90 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
91 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
92 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
93 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
94#ifdef IXGBE_FCOE
95 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
96 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
97 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
98 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
99 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
100 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
101 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
102 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
103#endif /* IXGBE_FCOE */
104};
105
106/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
107 * we set the num_rx_queues to evaluate to num_tx_queues. This is
108 * used because we do not have a good way to get the max number of
109 * rx queues with CONFIG_RPS disabled.
110 */
111#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
112
113#define IXGBE_QUEUE_STATS_LEN ( \
114 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
115 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
116#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
117#define IXGBE_PB_STATS_LEN ( \
118 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
119 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
120 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
121 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
122 / sizeof(u64))
123#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
124 IXGBE_PB_STATS_LEN + \
125 IXGBE_QUEUE_STATS_LEN)
126
127static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
128 "Register test (offline)", "Eeprom test (offline)",
129 "Interrupt test (offline)", "Loopback test (offline)",
130 "Link test (on/offline)"
131};
132#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
133
134static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
135#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
136 "legacy-rx",
137#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
138 "vf-ipsec",
139#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
140 "mdd-disable-vf",
141};
142
143#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
144
145#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
146
147static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
148 struct ethtool_link_ksettings *cmd)
149{
150 if (!ixgbe_isbackplane(hw->phy.media_type)) {
151 ethtool_link_ksettings_add_link_mode(cmd, supported,
152 10000baseT_Full);
153 return;
154 }
155
156 switch (hw->device_id) {
157 case IXGBE_DEV_ID_82598:
158 case IXGBE_DEV_ID_82599_KX4:
159 case IXGBE_DEV_ID_82599_KX4_MEZZ:
160 case IXGBE_DEV_ID_X550EM_X_KX4:
161 ethtool_link_ksettings_add_link_mode
162 (cmd, supported, 10000baseKX4_Full);
163 break;
164 case IXGBE_DEV_ID_82598_BX:
165 case IXGBE_DEV_ID_82599_KR:
166 case IXGBE_DEV_ID_X550EM_X_KR:
167 case IXGBE_DEV_ID_X550EM_X_XFI:
168 ethtool_link_ksettings_add_link_mode
169 (cmd, supported, 10000baseKR_Full);
170 break;
171 default:
172 ethtool_link_ksettings_add_link_mode
173 (cmd, supported, 10000baseKX4_Full);
174 ethtool_link_ksettings_add_link_mode
175 (cmd, supported, 10000baseKR_Full);
176 break;
177 }
178}
179
180static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
181 struct ethtool_link_ksettings *cmd)
182{
183 if (!ixgbe_isbackplane(hw->phy.media_type)) {
184 ethtool_link_ksettings_add_link_mode(cmd, advertising,
185 10000baseT_Full);
186 return;
187 }
188
189 switch (hw->device_id) {
190 case IXGBE_DEV_ID_82598:
191 case IXGBE_DEV_ID_82599_KX4:
192 case IXGBE_DEV_ID_82599_KX4_MEZZ:
193 case IXGBE_DEV_ID_X550EM_X_KX4:
194 ethtool_link_ksettings_add_link_mode
195 (cmd, advertising, 10000baseKX4_Full);
196 break;
197 case IXGBE_DEV_ID_82598_BX:
198 case IXGBE_DEV_ID_82599_KR:
199 case IXGBE_DEV_ID_X550EM_X_KR:
200 case IXGBE_DEV_ID_X550EM_X_XFI:
201 ethtool_link_ksettings_add_link_mode
202 (cmd, advertising, 10000baseKR_Full);
203 break;
204 default:
205 ethtool_link_ksettings_add_link_mode
206 (cmd, advertising, 10000baseKX4_Full);
207 ethtool_link_ksettings_add_link_mode
208 (cmd, advertising, 10000baseKR_Full);
209 break;
210 }
211}
212
213static int ixgbe_get_link_ksettings(struct net_device *netdev,
214 struct ethtool_link_ksettings *cmd)
215{
216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
217 struct ixgbe_hw *hw = &adapter->hw;
218 ixgbe_link_speed supported_link;
219 bool autoneg = false;
220
221 ethtool_link_ksettings_zero_link_mode(cmd, supported);
222 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
223
224 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
225
226 /* set the supported link speeds */
227 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
228 ixgbe_set_supported_10gtypes(hw, cmd);
229 ixgbe_set_advertising_10gtypes(hw, cmd);
230 }
231 if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
232 ethtool_link_ksettings_add_link_mode(cmd, supported,
233 5000baseT_Full);
234
235 if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
236 ethtool_link_ksettings_add_link_mode(cmd, supported,
237 2500baseT_Full);
238
239 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
240 if (ixgbe_isbackplane(hw->phy.media_type)) {
241 ethtool_link_ksettings_add_link_mode(cmd, supported,
242 1000baseKX_Full);
243 ethtool_link_ksettings_add_link_mode(cmd, advertising,
244 1000baseKX_Full);
245 } else {
246 ethtool_link_ksettings_add_link_mode(cmd, supported,
247 1000baseT_Full);
248 ethtool_link_ksettings_add_link_mode(cmd, advertising,
249 1000baseT_Full);
250 }
251 }
252 if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
253 ethtool_link_ksettings_add_link_mode(cmd, supported,
254 100baseT_Full);
255 ethtool_link_ksettings_add_link_mode(cmd, advertising,
256 100baseT_Full);
257 }
258 if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
259 ethtool_link_ksettings_add_link_mode(cmd, supported,
260 10baseT_Full);
261 ethtool_link_ksettings_add_link_mode(cmd, advertising,
262 10baseT_Full);
263 }
264
265 /* set the advertised speeds */
266 if (hw->phy.autoneg_advertised) {
267 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
268 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
269 ethtool_link_ksettings_add_link_mode(cmd, advertising,
270 10baseT_Full);
271 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
272 ethtool_link_ksettings_add_link_mode(cmd, advertising,
273 100baseT_Full);
274 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
275 ixgbe_set_advertising_10gtypes(hw, cmd);
276 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
277 if (ethtool_link_ksettings_test_link_mode
278 (cmd, supported, 1000baseKX_Full))
279 ethtool_link_ksettings_add_link_mode
280 (cmd, advertising, 1000baseKX_Full);
281 else
282 ethtool_link_ksettings_add_link_mode
283 (cmd, advertising, 1000baseT_Full);
284 }
285 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
286 ethtool_link_ksettings_add_link_mode(cmd, advertising,
287 5000baseT_Full);
288 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
289 ethtool_link_ksettings_add_link_mode(cmd, advertising,
290 2500baseT_Full);
291 } else {
292 if (hw->phy.multispeed_fiber && !autoneg) {
293 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
294 ethtool_link_ksettings_add_link_mode
295 (cmd, advertising, 10000baseT_Full);
296 }
297 }
298
299 if (autoneg) {
300 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
301 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
302 cmd->base.autoneg = AUTONEG_ENABLE;
303 } else
304 cmd->base.autoneg = AUTONEG_DISABLE;
305
306 /* Determine the remaining settings based on the PHY type. */
307 switch (adapter->hw.phy.type) {
308 case ixgbe_phy_tn:
309 case ixgbe_phy_aq:
310 case ixgbe_phy_x550em_ext_t:
311 case ixgbe_phy_fw:
312 case ixgbe_phy_cu_unknown:
313 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
314 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
315 cmd->base.port = PORT_TP;
316 break;
317 case ixgbe_phy_qt:
318 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
319 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
320 cmd->base.port = PORT_FIBRE;
321 break;
322 case ixgbe_phy_nl:
323 case ixgbe_phy_sfp_passive_tyco:
324 case ixgbe_phy_sfp_passive_unknown:
325 case ixgbe_phy_sfp_ftl:
326 case ixgbe_phy_sfp_avago:
327 case ixgbe_phy_sfp_intel:
328 case ixgbe_phy_sfp_unknown:
329 case ixgbe_phy_qsfp_passive_unknown:
330 case ixgbe_phy_qsfp_active_unknown:
331 case ixgbe_phy_qsfp_intel:
332 case ixgbe_phy_qsfp_unknown:
333 /* SFP+ devices, further checking needed */
334 switch (adapter->hw.phy.sfp_type) {
335 case ixgbe_sfp_type_da_cu:
336 case ixgbe_sfp_type_da_cu_core0:
337 case ixgbe_sfp_type_da_cu_core1:
338 ethtool_link_ksettings_add_link_mode(cmd, supported,
339 FIBRE);
340 ethtool_link_ksettings_add_link_mode(cmd, advertising,
341 FIBRE);
342 cmd->base.port = PORT_DA;
343 break;
344 case ixgbe_sfp_type_sr:
345 case ixgbe_sfp_type_lr:
346 case ixgbe_sfp_type_srlr_core0:
347 case ixgbe_sfp_type_srlr_core1:
348 case ixgbe_sfp_type_1g_sx_core0:
349 case ixgbe_sfp_type_1g_sx_core1:
350 case ixgbe_sfp_type_1g_lx_core0:
351 case ixgbe_sfp_type_1g_lx_core1:
352 case ixgbe_sfp_type_1g_bx_core0:
353 case ixgbe_sfp_type_1g_bx_core1:
354 ethtool_link_ksettings_add_link_mode(cmd, supported,
355 FIBRE);
356 ethtool_link_ksettings_add_link_mode(cmd, advertising,
357 FIBRE);
358 cmd->base.port = PORT_FIBRE;
359 break;
360 case ixgbe_sfp_type_not_present:
361 ethtool_link_ksettings_add_link_mode(cmd, supported,
362 FIBRE);
363 ethtool_link_ksettings_add_link_mode(cmd, advertising,
364 FIBRE);
365 cmd->base.port = PORT_NONE;
366 break;
367 case ixgbe_sfp_type_1g_cu_core0:
368 case ixgbe_sfp_type_1g_cu_core1:
369 ethtool_link_ksettings_add_link_mode(cmd, supported,
370 TP);
371 ethtool_link_ksettings_add_link_mode(cmd, advertising,
372 TP);
373 cmd->base.port = PORT_TP;
374 break;
375 case ixgbe_sfp_type_unknown:
376 default:
377 ethtool_link_ksettings_add_link_mode(cmd, supported,
378 FIBRE);
379 ethtool_link_ksettings_add_link_mode(cmd, advertising,
380 FIBRE);
381 cmd->base.port = PORT_OTHER;
382 break;
383 }
384 break;
385 case ixgbe_phy_xaui:
386 ethtool_link_ksettings_add_link_mode(cmd, supported,
387 FIBRE);
388 ethtool_link_ksettings_add_link_mode(cmd, advertising,
389 FIBRE);
390 cmd->base.port = PORT_NONE;
391 break;
392 case ixgbe_phy_unknown:
393 case ixgbe_phy_generic:
394 case ixgbe_phy_sfp_unsupported:
395 default:
396 ethtool_link_ksettings_add_link_mode(cmd, supported,
397 FIBRE);
398 ethtool_link_ksettings_add_link_mode(cmd, advertising,
399 FIBRE);
400 cmd->base.port = PORT_OTHER;
401 break;
402 }
403
404 /* Indicate pause support */
405 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
406
407 switch (hw->fc.requested_mode) {
408 case ixgbe_fc_full:
409 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
410 break;
411 case ixgbe_fc_rx_pause:
412 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
413 ethtool_link_ksettings_add_link_mode(cmd, advertising,
414 Asym_Pause);
415 break;
416 case ixgbe_fc_tx_pause:
417 ethtool_link_ksettings_add_link_mode(cmd, advertising,
418 Asym_Pause);
419 break;
420 default:
421 ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
422 ethtool_link_ksettings_del_link_mode(cmd, advertising,
423 Asym_Pause);
424 }
425
426 if (netif_carrier_ok(netdev)) {
427 switch (adapter->link_speed) {
428 case IXGBE_LINK_SPEED_10GB_FULL:
429 cmd->base.speed = SPEED_10000;
430 break;
431 case IXGBE_LINK_SPEED_5GB_FULL:
432 cmd->base.speed = SPEED_5000;
433 break;
434 case IXGBE_LINK_SPEED_2_5GB_FULL:
435 cmd->base.speed = SPEED_2500;
436 break;
437 case IXGBE_LINK_SPEED_1GB_FULL:
438 cmd->base.speed = SPEED_1000;
439 break;
440 case IXGBE_LINK_SPEED_100_FULL:
441 cmd->base.speed = SPEED_100;
442 break;
443 case IXGBE_LINK_SPEED_10_FULL:
444 cmd->base.speed = SPEED_10;
445 break;
446 default:
447 break;
448 }
449 cmd->base.duplex = DUPLEX_FULL;
450 } else {
451 cmd->base.speed = SPEED_UNKNOWN;
452 cmd->base.duplex = DUPLEX_UNKNOWN;
453 }
454
455 return 0;
456}
457
458static int ixgbe_set_link_ksettings(struct net_device *netdev,
459 const struct ethtool_link_ksettings *cmd)
460{
461 struct ixgbe_adapter *adapter = netdev_priv(netdev);
462 struct ixgbe_hw *hw = &adapter->hw;
463 u32 advertised, old;
464 int err = 0;
465
466 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
467 (hw->phy.multispeed_fiber)) {
468 /*
469 * this function does not support duplex forcing, but can
470 * limit the advertising of the adapter to the specified speed
471 */
472 if (!linkmode_subset(cmd->link_modes.advertising,
473 cmd->link_modes.supported))
474 return -EINVAL;
475
476 /* only allow one speed at a time if no autoneg */
477 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
478 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
479 10000baseT_Full) &&
480 ethtool_link_ksettings_test_link_mode(cmd, advertising,
481 1000baseT_Full))
482 return -EINVAL;
483 }
484
485 old = hw->phy.autoneg_advertised;
486 advertised = 0;
487 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
488 10000baseT_Full))
489 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
490 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
491 5000baseT_Full))
492 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
493 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
494 2500baseT_Full))
495 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
496 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
497 1000baseT_Full))
498 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
499
500 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
501 100baseT_Full))
502 advertised |= IXGBE_LINK_SPEED_100_FULL;
503
504 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
505 10baseT_Full))
506 advertised |= IXGBE_LINK_SPEED_10_FULL;
507
508 if (old == advertised)
509 return err;
510 /* this sets the link speed and restarts auto-neg */
511 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
512 usleep_range(1000, 2000);
513
514 hw->mac.autotry_restart = true;
515 err = hw->mac.ops.setup_link(hw, advertised, true);
516 if (err) {
517 e_info(probe, "setup link failed with code %d\n", err);
518 hw->mac.ops.setup_link(hw, old, true);
519 }
520 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
521 } else {
522 /* in this case we currently only support 10Gb/FULL */
523 u32 speed = cmd->base.speed;
524
525 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
526 (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
527 10000baseT_Full)) ||
528 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
529 return -EINVAL;
530 }
531
532 return err;
533}
534
535static void ixgbe_get_pause_stats(struct net_device *netdev,
536 struct ethtool_pause_stats *stats)
537{
538 struct ixgbe_adapter *adapter = netdev_priv(netdev);
539 struct ixgbe_hw_stats *hwstats = &adapter->stats;
540
541 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
542 stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
543}
544
545static void ixgbe_get_pauseparam(struct net_device *netdev,
546 struct ethtool_pauseparam *pause)
547{
548 struct ixgbe_adapter *adapter = netdev_priv(netdev);
549 struct ixgbe_hw *hw = &adapter->hw;
550
551 if (ixgbe_device_supports_autoneg_fc(hw) &&
552 !hw->fc.disable_fc_autoneg)
553 pause->autoneg = 1;
554 else
555 pause->autoneg = 0;
556
557 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
558 pause->rx_pause = 1;
559 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
560 pause->tx_pause = 1;
561 } else if (hw->fc.current_mode == ixgbe_fc_full) {
562 pause->rx_pause = 1;
563 pause->tx_pause = 1;
564 }
565}
566
567static int ixgbe_set_pauseparam(struct net_device *netdev,
568 struct ethtool_pauseparam *pause)
569{
570 struct ixgbe_adapter *adapter = netdev_priv(netdev);
571 struct ixgbe_hw *hw = &adapter->hw;
572 struct ixgbe_fc_info fc = hw->fc;
573
574 /* 82598 does no support link flow control with DCB enabled */
575 if ((hw->mac.type == ixgbe_mac_82598EB) &&
576 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
577 return -EINVAL;
578
579 /* some devices do not support autoneg of link flow control */
580 if ((pause->autoneg == AUTONEG_ENABLE) &&
581 !ixgbe_device_supports_autoneg_fc(hw))
582 return -EINVAL;
583
584 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
585
586 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
587 fc.requested_mode = ixgbe_fc_full;
588 else if (pause->rx_pause && !pause->tx_pause)
589 fc.requested_mode = ixgbe_fc_rx_pause;
590 else if (!pause->rx_pause && pause->tx_pause)
591 fc.requested_mode = ixgbe_fc_tx_pause;
592 else
593 fc.requested_mode = ixgbe_fc_none;
594
595 /* if the thing changed then we'll update and use new autoneg */
596 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
597 hw->fc = fc;
598 if (netif_running(netdev))
599 ixgbe_reinit_locked(adapter);
600 else
601 ixgbe_reset(adapter);
602 }
603
604 return 0;
605}
606
607static u32 ixgbe_get_msglevel(struct net_device *netdev)
608{
609 struct ixgbe_adapter *adapter = netdev_priv(netdev);
610 return adapter->msg_enable;
611}
612
613static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
614{
615 struct ixgbe_adapter *adapter = netdev_priv(netdev);
616 adapter->msg_enable = data;
617}
618
619static int ixgbe_get_regs_len(struct net_device *netdev)
620{
621#define IXGBE_REGS_LEN 1145
622 return IXGBE_REGS_LEN * sizeof(u32);
623}
624
625#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
626
627static void ixgbe_get_regs(struct net_device *netdev,
628 struct ethtool_regs *regs, void *p)
629{
630 struct ixgbe_adapter *adapter = netdev_priv(netdev);
631 struct ixgbe_hw *hw = &adapter->hw;
632 u32 *regs_buff = p;
633 u8 i;
634
635 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
636
637 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
638 hw->device_id;
639
640 /* General Registers */
641 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
642 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
643 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
644 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
645 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
646 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
647 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
648 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
649
650 /* NVM Register */
651 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
652 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
653 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
654 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
655 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
656 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
657 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
658 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
659 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
660 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
661
662 /* Interrupt */
663 /* don't read EICR because it can clear interrupt causes, instead
664 * read EICS which is a shadow but doesn't clear EICR */
665 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
666 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
667 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
668 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
669 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
670 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
671 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
672 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
673 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
674 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
675 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
676 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
677
678 /* Flow Control */
679 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
680 for (i = 0; i < 4; i++)
681 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
682 for (i = 0; i < 8; i++) {
683 switch (hw->mac.type) {
684 case ixgbe_mac_82598EB:
685 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
686 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
687 break;
688 case ixgbe_mac_82599EB:
689 case ixgbe_mac_X540:
690 case ixgbe_mac_X550:
691 case ixgbe_mac_X550EM_x:
692 case ixgbe_mac_x550em_a:
693 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
694 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
695 break;
696 default:
697 break;
698 }
699 }
700 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
701 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
702
703 /* Receive DMA */
704 for (i = 0; i < 64; i++)
705 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
706 for (i = 0; i < 64; i++)
707 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
708 for (i = 0; i < 64; i++)
709 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
710 for (i = 0; i < 64; i++)
711 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
712 for (i = 0; i < 64; i++)
713 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
714 for (i = 0; i < 64; i++)
715 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
716 for (i = 0; i < 16; i++)
717 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
718 for (i = 0; i < 16; i++)
719 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
720 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
721 for (i = 0; i < 8; i++)
722 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
723 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
724 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
725
726 /* Receive */
727 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
728 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
729 for (i = 0; i < 16; i++)
730 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
731 for (i = 0; i < 16; i++)
732 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
733 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
734 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
735 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
736 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
737 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
738 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
739 for (i = 0; i < 8; i++)
740 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
741 for (i = 0; i < 8; i++)
742 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
743 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
744
745 /* Transmit */
746 for (i = 0; i < 32; i++)
747 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
748 for (i = 0; i < 32; i++)
749 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
750 for (i = 0; i < 32; i++)
751 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
752 for (i = 0; i < 32; i++)
753 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
754 for (i = 0; i < 32; i++)
755 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
756 for (i = 0; i < 32; i++)
757 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
758 for (i = 0; i < 32; i++)
759 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
760 for (i = 0; i < 32; i++)
761 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
762 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
763 for (i = 0; i < 16; i++)
764 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
765 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
766 for (i = 0; i < 8; i++)
767 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
768 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
769
770 /* Wake Up */
771 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
772 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
773 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
774 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
775 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
776 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
777 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
778 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
779 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
780
781 /* DCB */
782 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
783 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
784
785 switch (hw->mac.type) {
786 case ixgbe_mac_82598EB:
787 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
788 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
789 for (i = 0; i < 8; i++)
790 regs_buff[833 + i] =
791 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
792 for (i = 0; i < 8; i++)
793 regs_buff[841 + i] =
794 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
795 for (i = 0; i < 8; i++)
796 regs_buff[849 + i] =
797 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
798 for (i = 0; i < 8; i++)
799 regs_buff[857 + i] =
800 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
801 break;
802 case ixgbe_mac_82599EB:
803 case ixgbe_mac_X540:
804 case ixgbe_mac_X550:
805 case ixgbe_mac_X550EM_x:
806 case ixgbe_mac_x550em_a:
807 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
808 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
809 for (i = 0; i < 8; i++)
810 regs_buff[833 + i] =
811 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
812 for (i = 0; i < 8; i++)
813 regs_buff[841 + i] =
814 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
815 for (i = 0; i < 8; i++)
816 regs_buff[849 + i] =
817 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
818 for (i = 0; i < 8; i++)
819 regs_buff[857 + i] =
820 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
821 break;
822 default:
823 break;
824 }
825
826 for (i = 0; i < 8; i++)
827 regs_buff[865 + i] =
828 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
829 for (i = 0; i < 8; i++)
830 regs_buff[873 + i] =
831 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
832
833 /* Statistics */
834 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
835 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
836 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
837 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
838 for (i = 0; i < 8; i++)
839 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
840 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
841 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
842 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
843 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
844 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
845 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
846 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
847 for (i = 0; i < 8; i++)
848 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
849 for (i = 0; i < 8; i++)
850 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
851 for (i = 0; i < 8; i++)
852 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
853 for (i = 0; i < 8; i++)
854 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
855 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
856 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
857 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
858 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
859 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
860 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
861 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
862 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
863 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
864 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
865 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
866 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
867 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
868 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
869 for (i = 0; i < 8; i++)
870 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
871 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
872 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
873 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
874 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
875 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
876 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
877 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
878 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
879 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
880 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
881 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
882 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
883 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
884 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
885 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
886 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
887 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
888 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
889 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
890 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
891 for (i = 0; i < 16; i++)
892 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
893 for (i = 0; i < 16; i++)
894 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
895 for (i = 0; i < 16; i++)
896 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
897 for (i = 0; i < 16; i++)
898 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
899
900 /* MAC */
901 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
902 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
903 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
904 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
905 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
906 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
907 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
908 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
909 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
910 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
911 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
912 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
913 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
914 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
915 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
916 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
917 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
918 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
919 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
920 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
921 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
922 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
923 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
924 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
925 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
926 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
927 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
928 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
929 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
930 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
931 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
932 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
933 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
934
935 /* Diagnostic */
936 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
937 for (i = 0; i < 8; i++)
938 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
939 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
940 for (i = 0; i < 4; i++)
941 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
942 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
943 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
944 for (i = 0; i < 8; i++)
945 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
946 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
947 for (i = 0; i < 4; i++)
948 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
949 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
950 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
951 for (i = 0; i < 4; i++)
952 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
953 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
954 for (i = 0; i < 4; i++)
955 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
956 for (i = 0; i < 8; i++)
957 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
958 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
959 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
960 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
961 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
962 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
963 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
964 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
965 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
966 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
967
968 /* 82599 X540 specific registers */
969 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
970
971 /* 82599 X540 specific DCB registers */
972 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
973 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
974 for (i = 0; i < 4; i++)
975 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
976 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
977 /* same as RTTQCNRM */
978 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
979 /* same as RTTQCNRR */
980
981 /* X540 specific DCB registers */
982 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
983 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
984
985 /* Security config registers */
986 regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
987 regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
988 regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
989 regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
990 regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
991 regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
992}
993
994static int ixgbe_get_eeprom_len(struct net_device *netdev)
995{
996 struct ixgbe_adapter *adapter = netdev_priv(netdev);
997 return adapter->hw.eeprom.word_size * 2;
998}
999
1000static int ixgbe_get_eeprom(struct net_device *netdev,
1001 struct ethtool_eeprom *eeprom, u8 *bytes)
1002{
1003 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1004 struct ixgbe_hw *hw = &adapter->hw;
1005 u16 *eeprom_buff;
1006 int first_word, last_word, eeprom_len;
1007 int ret_val = 0;
1008 u16 i;
1009
1010 if (eeprom->len == 0)
1011 return -EINVAL;
1012
1013 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1014
1015 first_word = eeprom->offset >> 1;
1016 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1017 eeprom_len = last_word - first_word + 1;
1018
1019 eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
1020 if (!eeprom_buff)
1021 return -ENOMEM;
1022
1023 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
1024 eeprom_buff);
1025
1026 /* Device's eeprom is always little-endian, word addressable */
1027 for (i = 0; i < eeprom_len; i++)
1028 le16_to_cpus(&eeprom_buff[i]);
1029
1030 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1031 kfree(eeprom_buff);
1032
1033 return ret_val;
1034}
1035
1036static int ixgbe_set_eeprom(struct net_device *netdev,
1037 struct ethtool_eeprom *eeprom, u8 *bytes)
1038{
1039 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1040 struct ixgbe_hw *hw = &adapter->hw;
1041 u16 *eeprom_buff;
1042 void *ptr;
1043 int max_len, first_word, last_word, ret_val = 0;
1044 u16 i;
1045
1046 if (eeprom->len == 0)
1047 return -EINVAL;
1048
1049 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1050 return -EINVAL;
1051
1052 max_len = hw->eeprom.word_size * 2;
1053
1054 first_word = eeprom->offset >> 1;
1055 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1056 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1057 if (!eeprom_buff)
1058 return -ENOMEM;
1059
1060 ptr = eeprom_buff;
1061
1062 if (eeprom->offset & 1) {
1063 /*
1064 * need read/modify/write of first changed EEPROM word
1065 * only the second byte of the word is being modified
1066 */
1067 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
1068 if (ret_val)
1069 goto err;
1070
1071 ptr++;
1072 }
1073 if ((eeprom->offset + eeprom->len) & 1) {
1074 /*
1075 * need read/modify/write of last changed EEPROM word
1076 * only the first byte of the word is being modified
1077 */
1078 ret_val = hw->eeprom.ops.read(hw, last_word,
1079 &eeprom_buff[last_word - first_word]);
1080 if (ret_val)
1081 goto err;
1082 }
1083
1084 /* Device's eeprom is always little-endian, word addressable */
1085 for (i = 0; i < last_word - first_word + 1; i++)
1086 le16_to_cpus(&eeprom_buff[i]);
1087
1088 memcpy(ptr, bytes, eeprom->len);
1089
1090 for (i = 0; i < last_word - first_word + 1; i++)
1091 cpu_to_le16s(&eeprom_buff[i]);
1092
1093 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1094 last_word - first_word + 1,
1095 eeprom_buff);
1096
1097 /* Update the checksum */
1098 if (ret_val == 0)
1099 hw->eeprom.ops.update_checksum(hw);
1100
1101err:
1102 kfree(eeprom_buff);
1103 return ret_val;
1104}
1105
1106static void ixgbe_get_drvinfo(struct net_device *netdev,
1107 struct ethtool_drvinfo *drvinfo)
1108{
1109 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1110
1111 strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1112
1113 strscpy(drvinfo->fw_version, adapter->eeprom_id,
1114 sizeof(drvinfo->fw_version));
1115
1116 strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
1117 sizeof(drvinfo->bus_info));
1118
1119 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1120}
1121
1122static u32 ixgbe_get_max_rxd(struct ixgbe_adapter *adapter)
1123{
1124 switch (adapter->hw.mac.type) {
1125 case ixgbe_mac_82598EB:
1126 return IXGBE_MAX_RXD_82598;
1127 case ixgbe_mac_82599EB:
1128 return IXGBE_MAX_RXD_82599;
1129 case ixgbe_mac_X540:
1130 return IXGBE_MAX_RXD_X540;
1131 case ixgbe_mac_X550:
1132 case ixgbe_mac_X550EM_x:
1133 case ixgbe_mac_x550em_a:
1134 return IXGBE_MAX_RXD_X550;
1135 default:
1136 return IXGBE_MAX_RXD_82598;
1137 }
1138}
1139
1140static u32 ixgbe_get_max_txd(struct ixgbe_adapter *adapter)
1141{
1142 switch (adapter->hw.mac.type) {
1143 case ixgbe_mac_82598EB:
1144 return IXGBE_MAX_TXD_82598;
1145 case ixgbe_mac_82599EB:
1146 return IXGBE_MAX_TXD_82599;
1147 case ixgbe_mac_X540:
1148 return IXGBE_MAX_TXD_X540;
1149 case ixgbe_mac_X550:
1150 case ixgbe_mac_X550EM_x:
1151 case ixgbe_mac_x550em_a:
1152 return IXGBE_MAX_TXD_X550;
1153 default:
1154 return IXGBE_MAX_TXD_82598;
1155 }
1156}
1157
1158static void ixgbe_get_ringparam(struct net_device *netdev,
1159 struct ethtool_ringparam *ring,
1160 struct kernel_ethtool_ringparam *kernel_ring,
1161 struct netlink_ext_ack *extack)
1162{
1163 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1164 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1165 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1166
1167 ring->rx_max_pending = ixgbe_get_max_rxd(adapter);
1168 ring->tx_max_pending = ixgbe_get_max_txd(adapter);
1169 ring->rx_pending = rx_ring->count;
1170 ring->tx_pending = tx_ring->count;
1171}
1172
1173static int ixgbe_set_ringparam(struct net_device *netdev,
1174 struct ethtool_ringparam *ring,
1175 struct kernel_ethtool_ringparam *kernel_ring,
1176 struct netlink_ext_ack *extack)
1177{
1178 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1179 struct ixgbe_ring *temp_ring;
1180 int i, j, err = 0;
1181 u32 new_rx_count, new_tx_count;
1182
1183 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1184 return -EINVAL;
1185
1186 new_tx_count = clamp_t(u32, ring->tx_pending,
1187 IXGBE_MIN_TXD, ixgbe_get_max_txd(adapter));
1188 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1189
1190 new_rx_count = clamp_t(u32, ring->rx_pending,
1191 IXGBE_MIN_RXD, ixgbe_get_max_rxd(adapter));
1192 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1193
1194 if ((new_tx_count == adapter->tx_ring_count) &&
1195 (new_rx_count == adapter->rx_ring_count)) {
1196 /* nothing to do */
1197 return 0;
1198 }
1199
1200 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1201 usleep_range(1000, 2000);
1202
1203 if (!netif_running(adapter->netdev)) {
1204 for (i = 0; i < adapter->num_tx_queues; i++)
1205 adapter->tx_ring[i]->count = new_tx_count;
1206 for (i = 0; i < adapter->num_xdp_queues; i++)
1207 adapter->xdp_ring[i]->count = new_tx_count;
1208 for (i = 0; i < adapter->num_rx_queues; i++)
1209 adapter->rx_ring[i]->count = new_rx_count;
1210 adapter->tx_ring_count = new_tx_count;
1211 adapter->xdp_ring_count = new_tx_count;
1212 adapter->rx_ring_count = new_rx_count;
1213 goto clear_reset;
1214 }
1215
1216 /* allocate temporary buffer to store rings in */
1217 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1218 adapter->num_rx_queues);
1219 temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
1220
1221 if (!temp_ring) {
1222 err = -ENOMEM;
1223 goto clear_reset;
1224 }
1225
1226 ixgbe_down(adapter);
1227
1228 /*
1229 * Setup new Tx resources and free the old Tx resources in that order.
1230 * We can then assign the new resources to the rings via a memcpy.
1231 * The advantage to this approach is that we are guaranteed to still
1232 * have resources even in the case of an allocation failure.
1233 */
1234 if (new_tx_count != adapter->tx_ring_count) {
1235 for (i = 0; i < adapter->num_tx_queues; i++) {
1236 memcpy(&temp_ring[i], adapter->tx_ring[i],
1237 sizeof(struct ixgbe_ring));
1238
1239 temp_ring[i].count = new_tx_count;
1240 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1241 if (err) {
1242 while (i) {
1243 i--;
1244 ixgbe_free_tx_resources(&temp_ring[i]);
1245 }
1246 goto err_setup;
1247 }
1248 }
1249
1250 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1251 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1252 sizeof(struct ixgbe_ring));
1253
1254 temp_ring[i].count = new_tx_count;
1255 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1256 if (err) {
1257 while (i) {
1258 i--;
1259 ixgbe_free_tx_resources(&temp_ring[i]);
1260 }
1261 goto err_setup;
1262 }
1263 }
1264
1265 for (i = 0; i < adapter->num_tx_queues; i++) {
1266 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1267
1268 memcpy(adapter->tx_ring[i], &temp_ring[i],
1269 sizeof(struct ixgbe_ring));
1270 }
1271 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1272 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1273
1274 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1275 sizeof(struct ixgbe_ring));
1276 }
1277
1278 adapter->tx_ring_count = new_tx_count;
1279 }
1280
1281 /* Repeat the process for the Rx rings if needed */
1282 if (new_rx_count != adapter->rx_ring_count) {
1283 for (i = 0; i < adapter->num_rx_queues; i++) {
1284 memcpy(&temp_ring[i], adapter->rx_ring[i],
1285 sizeof(struct ixgbe_ring));
1286
1287 /* Clear copied XDP RX-queue info */
1288 memset(&temp_ring[i].xdp_rxq, 0,
1289 sizeof(temp_ring[i].xdp_rxq));
1290
1291 temp_ring[i].count = new_rx_count;
1292 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1293 if (err) {
1294 while (i) {
1295 i--;
1296 ixgbe_free_rx_resources(&temp_ring[i]);
1297 }
1298 goto err_setup;
1299 }
1300
1301 }
1302
1303 for (i = 0; i < adapter->num_rx_queues; i++) {
1304 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1305
1306 memcpy(adapter->rx_ring[i], &temp_ring[i],
1307 sizeof(struct ixgbe_ring));
1308 }
1309
1310 adapter->rx_ring_count = new_rx_count;
1311 }
1312
1313err_setup:
1314 ixgbe_up(adapter);
1315 vfree(temp_ring);
1316clear_reset:
1317 clear_bit(__IXGBE_RESETTING, &adapter->state);
1318 return err;
1319}
1320
1321static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1322{
1323 switch (sset) {
1324 case ETH_SS_TEST:
1325 return IXGBE_TEST_LEN;
1326 case ETH_SS_STATS:
1327 return IXGBE_STATS_LEN;
1328 case ETH_SS_PRIV_FLAGS:
1329 return IXGBE_PRIV_FLAGS_STR_LEN;
1330 default:
1331 return -EOPNOTSUPP;
1332 }
1333}
1334
1335static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1336 struct ethtool_stats *stats, u64 *data)
1337{
1338 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1339 struct rtnl_link_stats64 temp;
1340 const struct rtnl_link_stats64 *net_stats;
1341 unsigned int start;
1342 struct ixgbe_ring *ring;
1343 int i, j;
1344 char *p = NULL;
1345
1346 ixgbe_update_stats(adapter);
1347 net_stats = dev_get_stats(netdev, &temp);
1348 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1349 switch (ixgbe_gstrings_stats[i].type) {
1350 case NETDEV_STATS:
1351 p = (char *) net_stats +
1352 ixgbe_gstrings_stats[i].stat_offset;
1353 break;
1354 case IXGBE_STATS:
1355 p = (char *) adapter +
1356 ixgbe_gstrings_stats[i].stat_offset;
1357 break;
1358 default:
1359 data[i] = 0;
1360 continue;
1361 }
1362
1363 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1364 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1365 }
1366 for (j = 0; j < netdev->num_tx_queues; j++) {
1367 ring = adapter->tx_ring[j];
1368 if (!ring) {
1369 data[i] = 0;
1370 data[i+1] = 0;
1371 i += 2;
1372 continue;
1373 }
1374
1375 do {
1376 start = u64_stats_fetch_begin(&ring->syncp);
1377 data[i] = ring->stats.packets;
1378 data[i+1] = ring->stats.bytes;
1379 } while (u64_stats_fetch_retry(&ring->syncp, start));
1380 i += 2;
1381 }
1382 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1383 ring = adapter->rx_ring[j];
1384 if (!ring) {
1385 data[i] = 0;
1386 data[i+1] = 0;
1387 i += 2;
1388 continue;
1389 }
1390
1391 do {
1392 start = u64_stats_fetch_begin(&ring->syncp);
1393 data[i] = ring->stats.packets;
1394 data[i+1] = ring->stats.bytes;
1395 } while (u64_stats_fetch_retry(&ring->syncp, start));
1396 i += 2;
1397 }
1398
1399 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1400 data[i++] = adapter->stats.pxontxc[j];
1401 data[i++] = adapter->stats.pxofftxc[j];
1402 }
1403 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1404 data[i++] = adapter->stats.pxonrxc[j];
1405 data[i++] = adapter->stats.pxoffrxc[j];
1406 }
1407}
1408
1409static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1410 u8 *data)
1411{
1412 unsigned int i;
1413 u8 *p = data;
1414
1415 switch (stringset) {
1416 case ETH_SS_TEST:
1417 for (i = 0; i < IXGBE_TEST_LEN; i++)
1418 ethtool_puts(&p, ixgbe_gstrings_test[i]);
1419 break;
1420 case ETH_SS_STATS:
1421 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
1422 ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string);
1423 for (i = 0; i < netdev->num_tx_queues; i++) {
1424 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1425 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1426 }
1427 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1428 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1429 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1430 }
1431 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1432 ethtool_sprintf(&p, "tx_pb_%u_pxon", i);
1433 ethtool_sprintf(&p, "tx_pb_%u_pxoff", i);
1434 }
1435 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1436 ethtool_sprintf(&p, "rx_pb_%u_pxon", i);
1437 ethtool_sprintf(&p, "rx_pb_%u_pxoff", i);
1438 }
1439 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1440 break;
1441 case ETH_SS_PRIV_FLAGS:
1442 memcpy(data, ixgbe_priv_flags_strings,
1443 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1444 }
1445}
1446
1447static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1448{
1449 struct ixgbe_hw *hw = &adapter->hw;
1450 bool link_up;
1451 u32 link_speed = 0;
1452
1453 if (ixgbe_removed(hw->hw_addr)) {
1454 *data = 1;
1455 return 1;
1456 }
1457 *data = 0;
1458
1459 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1460 if (link_up)
1461 return *data;
1462 else
1463 *data = 1;
1464 return *data;
1465}
1466
1467/* ethtool register test data */
1468struct ixgbe_reg_test {
1469 u16 reg;
1470 u8 array_len;
1471 u8 test_type;
1472 u32 mask;
1473 u32 write;
1474};
1475
1476/* In the hardware, registers are laid out either singly, in arrays
1477 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1478 * most tests take place on arrays or single registers (handled
1479 * as a single-element array) and special-case the tables.
1480 * Table tests are always pattern tests.
1481 *
1482 * We also make provision for some required setup steps by specifying
1483 * registers to be written without any read-back testing.
1484 */
1485
1486#define PATTERN_TEST 1
1487#define SET_READ_TEST 2
1488#define WRITE_NO_TEST 3
1489#define TABLE32_TEST 4
1490#define TABLE64_TEST_LO 5
1491#define TABLE64_TEST_HI 6
1492
1493/* default 82599 register test */
1494static const struct ixgbe_reg_test reg_test_82599[] = {
1495 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1496 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1497 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1498 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1499 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1500 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1501 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1502 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1503 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1504 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1505 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1506 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1507 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1508 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1509 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1510 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1511 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1512 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1513 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1514 { .reg = 0 }
1515};
1516
1517/* default 82598 register test */
1518static const struct ixgbe_reg_test reg_test_82598[] = {
1519 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1520 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1521 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1522 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1523 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1524 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1525 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1526 /* Enable all four RX queues before testing. */
1527 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1528 /* RDH is read-only for 82598, only test RDT. */
1529 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1530 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1531 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1532 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1533 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1534 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1535 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1536 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1537 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1538 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1539 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1540 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1541 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1542 { .reg = 0 }
1543};
1544
1545static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1546 u32 mask, u32 write)
1547{
1548 u32 pat, val, before;
1549 static const u32 test_pattern[] = {
1550 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1551
1552 if (ixgbe_removed(adapter->hw.hw_addr)) {
1553 *data = 1;
1554 return true;
1555 }
1556 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1557 before = ixgbe_read_reg(&adapter->hw, reg);
1558 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1559 val = ixgbe_read_reg(&adapter->hw, reg);
1560 if (val != (test_pattern[pat] & write & mask)) {
1561 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1562 reg, val, (test_pattern[pat] & write & mask));
1563 *data = reg;
1564 ixgbe_write_reg(&adapter->hw, reg, before);
1565 return true;
1566 }
1567 ixgbe_write_reg(&adapter->hw, reg, before);
1568 }
1569 return false;
1570}
1571
1572static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1573 u32 mask, u32 write)
1574{
1575 u32 val, before;
1576
1577 if (ixgbe_removed(adapter->hw.hw_addr)) {
1578 *data = 1;
1579 return true;
1580 }
1581 before = ixgbe_read_reg(&adapter->hw, reg);
1582 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1583 val = ixgbe_read_reg(&adapter->hw, reg);
1584 if ((write & mask) != (val & mask)) {
1585 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1586 reg, (val & mask), (write & mask));
1587 *data = reg;
1588 ixgbe_write_reg(&adapter->hw, reg, before);
1589 return true;
1590 }
1591 ixgbe_write_reg(&adapter->hw, reg, before);
1592 return false;
1593}
1594
1595static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1596{
1597 const struct ixgbe_reg_test *test;
1598 u32 value, before, after;
1599 u32 i, toggle;
1600
1601 if (ixgbe_removed(adapter->hw.hw_addr)) {
1602 e_err(drv, "Adapter removed - register test blocked\n");
1603 *data = 1;
1604 return 1;
1605 }
1606 switch (adapter->hw.mac.type) {
1607 case ixgbe_mac_82598EB:
1608 toggle = 0x7FFFF3FF;
1609 test = reg_test_82598;
1610 break;
1611 case ixgbe_mac_82599EB:
1612 case ixgbe_mac_X540:
1613 case ixgbe_mac_X550:
1614 case ixgbe_mac_X550EM_x:
1615 case ixgbe_mac_x550em_a:
1616 toggle = 0x7FFFF30F;
1617 test = reg_test_82599;
1618 break;
1619 default:
1620 *data = 1;
1621 return 1;
1622 }
1623
1624 /*
1625 * Because the status register is such a special case,
1626 * we handle it separately from the rest of the register
1627 * tests. Some bits are read-only, some toggle, and some
1628 * are writeable on newer MACs.
1629 */
1630 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1631 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1632 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1633 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1634 if (value != after) {
1635 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1636 after, value);
1637 *data = 1;
1638 return 1;
1639 }
1640 /* restore previous status */
1641 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1642
1643 /*
1644 * Perform the remainder of the register test, looping through
1645 * the test table until we either fail or reach the null entry.
1646 */
1647 while (test->reg) {
1648 for (i = 0; i < test->array_len; i++) {
1649 bool b = false;
1650
1651 switch (test->test_type) {
1652 case PATTERN_TEST:
1653 b = reg_pattern_test(adapter, data,
1654 test->reg + (i * 0x40),
1655 test->mask,
1656 test->write);
1657 break;
1658 case SET_READ_TEST:
1659 b = reg_set_and_check(adapter, data,
1660 test->reg + (i * 0x40),
1661 test->mask,
1662 test->write);
1663 break;
1664 case WRITE_NO_TEST:
1665 ixgbe_write_reg(&adapter->hw,
1666 test->reg + (i * 0x40),
1667 test->write);
1668 break;
1669 case TABLE32_TEST:
1670 b = reg_pattern_test(adapter, data,
1671 test->reg + (i * 4),
1672 test->mask,
1673 test->write);
1674 break;
1675 case TABLE64_TEST_LO:
1676 b = reg_pattern_test(adapter, data,
1677 test->reg + (i * 8),
1678 test->mask,
1679 test->write);
1680 break;
1681 case TABLE64_TEST_HI:
1682 b = reg_pattern_test(adapter, data,
1683 (test->reg + 4) + (i * 8),
1684 test->mask,
1685 test->write);
1686 break;
1687 }
1688 if (b)
1689 return 1;
1690 }
1691 test++;
1692 }
1693
1694 *data = 0;
1695 return 0;
1696}
1697
1698static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1699{
1700 struct ixgbe_hw *hw = &adapter->hw;
1701 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1702 *data = 1;
1703 else
1704 *data = 0;
1705 return *data;
1706}
1707
1708static irqreturn_t ixgbe_test_intr(int irq, void *data)
1709{
1710 struct net_device *netdev = (struct net_device *) data;
1711 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1712
1713 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1714
1715 return IRQ_HANDLED;
1716}
1717
1718static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1719{
1720 struct net_device *netdev = adapter->netdev;
1721 u32 mask, i = 0, shared_int = true;
1722 u32 irq = adapter->pdev->irq;
1723
1724 *data = 0;
1725
1726 /* Hook up test interrupt handler just for this test */
1727 if (adapter->msix_entries) {
1728 /* NOTE: we don't test MSI-X interrupts here, yet */
1729 return 0;
1730 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1731 shared_int = false;
1732 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1733 netdev)) {
1734 *data = 1;
1735 return -1;
1736 }
1737 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1738 netdev->name, netdev)) {
1739 shared_int = false;
1740 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1741 netdev->name, netdev)) {
1742 *data = 1;
1743 return -1;
1744 }
1745 e_info(hw, "testing %s interrupt\n", shared_int ?
1746 "shared" : "unshared");
1747
1748 /* Disable all the interrupts */
1749 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1750 IXGBE_WRITE_FLUSH(&adapter->hw);
1751 usleep_range(10000, 20000);
1752
1753 /* Test each interrupt */
1754 for (; i < 10; i++) {
1755 /* Interrupt to test */
1756 mask = BIT(i);
1757
1758 if (!shared_int) {
1759 /*
1760 * Disable the interrupts to be reported in
1761 * the cause register and then force the same
1762 * interrupt and see if one gets posted. If
1763 * an interrupt was posted to the bus, the
1764 * test failed.
1765 */
1766 adapter->test_icr = 0;
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1768 ~mask & 0x00007FFF);
1769 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1770 ~mask & 0x00007FFF);
1771 IXGBE_WRITE_FLUSH(&adapter->hw);
1772 usleep_range(10000, 20000);
1773
1774 if (adapter->test_icr & mask) {
1775 *data = 3;
1776 break;
1777 }
1778 }
1779
1780 /*
1781 * Enable the interrupt to be reported in the cause
1782 * register and then force the same interrupt and see
1783 * if one gets posted. If an interrupt was not posted
1784 * to the bus, the test failed.
1785 */
1786 adapter->test_icr = 0;
1787 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1789 IXGBE_WRITE_FLUSH(&adapter->hw);
1790 usleep_range(10000, 20000);
1791
1792 if (!(adapter->test_icr & mask)) {
1793 *data = 4;
1794 break;
1795 }
1796
1797 if (!shared_int) {
1798 /*
1799 * Disable the other interrupts to be reported in
1800 * the cause register and then force the other
1801 * interrupts and see if any get posted. If
1802 * an interrupt was posted to the bus, the
1803 * test failed.
1804 */
1805 adapter->test_icr = 0;
1806 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1807 ~mask & 0x00007FFF);
1808 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1809 ~mask & 0x00007FFF);
1810 IXGBE_WRITE_FLUSH(&adapter->hw);
1811 usleep_range(10000, 20000);
1812
1813 if (adapter->test_icr) {
1814 *data = 5;
1815 break;
1816 }
1817 }
1818 }
1819
1820 /* Disable all the interrupts */
1821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1822 IXGBE_WRITE_FLUSH(&adapter->hw);
1823 usleep_range(10000, 20000);
1824
1825 /* Unhook test interrupt handler */
1826 free_irq(irq, netdev);
1827
1828 return *data;
1829}
1830
1831static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1832{
1833 /* Shut down the DMA engines now so they can be reinitialized later,
1834 * since the test rings and normally used rings should overlap on
1835 * queue 0 we can just use the standard disable Rx/Tx calls and they
1836 * will take care of disabling the test rings for us.
1837 */
1838
1839 /* first Rx */
1840 ixgbe_disable_rx(adapter);
1841
1842 /* now Tx */
1843 ixgbe_disable_tx(adapter);
1844
1845 ixgbe_reset(adapter);
1846
1847 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1848 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1849}
1850
1851static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1852{
1853 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1854 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1855 struct ixgbe_hw *hw = &adapter->hw;
1856 u32 rctl, reg_data;
1857 int ret_val;
1858 int err;
1859
1860 /* Setup Tx descriptor ring and Tx buffers */
1861 tx_ring->count = IXGBE_DEFAULT_TXD;
1862 tx_ring->queue_index = 0;
1863 tx_ring->dev = &adapter->pdev->dev;
1864 tx_ring->netdev = adapter->netdev;
1865 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1866
1867 err = ixgbe_setup_tx_resources(tx_ring);
1868 if (err)
1869 return 1;
1870
1871 switch (adapter->hw.mac.type) {
1872 case ixgbe_mac_82599EB:
1873 case ixgbe_mac_X540:
1874 case ixgbe_mac_X550:
1875 case ixgbe_mac_X550EM_x:
1876 case ixgbe_mac_x550em_a:
1877 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1878 reg_data |= IXGBE_DMATXCTL_TE;
1879 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1880 break;
1881 default:
1882 break;
1883 }
1884
1885 ixgbe_configure_tx_ring(adapter, tx_ring);
1886
1887 /* Setup Rx Descriptor ring and Rx buffers */
1888 rx_ring->count = IXGBE_DEFAULT_RXD;
1889 rx_ring->queue_index = 0;
1890 rx_ring->dev = &adapter->pdev->dev;
1891 rx_ring->netdev = adapter->netdev;
1892 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1893
1894 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1895 if (err) {
1896 ret_val = 4;
1897 goto err_nomem;
1898 }
1899
1900 hw->mac.ops.disable_rx(hw);
1901
1902 ixgbe_configure_rx_ring(adapter, rx_ring);
1903
1904 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1905 rctl |= IXGBE_RXCTRL_DMBYPS;
1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1907
1908 hw->mac.ops.enable_rx(hw);
1909
1910 return 0;
1911
1912err_nomem:
1913 ixgbe_free_desc_rings(adapter);
1914 return ret_val;
1915}
1916
1917static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1918{
1919 struct ixgbe_hw *hw = &adapter->hw;
1920 u32 reg_data;
1921
1922
1923 /* Setup MAC loopback */
1924 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1925 reg_data |= IXGBE_HLREG0_LPBK;
1926 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1927
1928 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1929 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1930 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1931
1932 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1933 switch (adapter->hw.mac.type) {
1934 case ixgbe_mac_X540:
1935 case ixgbe_mac_X550:
1936 case ixgbe_mac_X550EM_x:
1937 case ixgbe_mac_x550em_a:
1938 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1939 reg_data |= IXGBE_MACC_FLU;
1940 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1941 break;
1942 default:
1943 if (hw->mac.orig_autoc) {
1944 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1945 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1946 } else {
1947 return 10;
1948 }
1949 }
1950 IXGBE_WRITE_FLUSH(hw);
1951 usleep_range(10000, 20000);
1952
1953 /* Disable Atlas Tx lanes; re-enabled in reset path */
1954 if (hw->mac.type == ixgbe_mac_82598EB) {
1955 u8 atlas;
1956
1957 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1958 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1959 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1960
1961 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1962 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1963 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1964
1965 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1966 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1967 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1968
1969 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1970 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1971 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1972 }
1973
1974 return 0;
1975}
1976
1977static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1978{
1979 u32 reg_data;
1980
1981 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1982 reg_data &= ~IXGBE_HLREG0_LPBK;
1983 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1984}
1985
1986static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1987 unsigned int frame_size)
1988{
1989 memset(skb->data, 0xFF, frame_size);
1990 frame_size >>= 1;
1991 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1992 skb->data[frame_size + 10] = 0xBE;
1993 skb->data[frame_size + 12] = 0xAF;
1994}
1995
1996static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1997 unsigned int frame_size)
1998{
1999 unsigned char *data;
2000
2001 frame_size >>= 1;
2002
2003 data = page_address(rx_buffer->page) + rx_buffer->page_offset;
2004
2005 return data[3] == 0xFF && data[frame_size + 10] == 0xBE &&
2006 data[frame_size + 12] == 0xAF;
2007}
2008
2009static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
2010 struct ixgbe_ring *tx_ring,
2011 unsigned int size)
2012{
2013 union ixgbe_adv_rx_desc *rx_desc;
2014 u16 rx_ntc, tx_ntc, count = 0;
2015
2016 /* initialize next to clean and descriptor values */
2017 rx_ntc = rx_ring->next_to_clean;
2018 tx_ntc = tx_ring->next_to_clean;
2019 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2020
2021 while (tx_ntc != tx_ring->next_to_use) {
2022 union ixgbe_adv_tx_desc *tx_desc;
2023 struct ixgbe_tx_buffer *tx_buffer;
2024
2025 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
2026
2027 /* if DD is not set transmit has not completed */
2028 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2029 return count;
2030
2031 /* unmap buffer on Tx side */
2032 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
2033
2034 /* Free all the Tx ring sk_buffs */
2035 dev_kfree_skb_any(tx_buffer->skb);
2036
2037 /* unmap skb header data */
2038 dma_unmap_single(tx_ring->dev,
2039 dma_unmap_addr(tx_buffer, dma),
2040 dma_unmap_len(tx_buffer, len),
2041 DMA_TO_DEVICE);
2042 dma_unmap_len_set(tx_buffer, len, 0);
2043
2044 /* increment Tx next to clean counter */
2045 tx_ntc++;
2046 if (tx_ntc == tx_ring->count)
2047 tx_ntc = 0;
2048 }
2049
2050 while (rx_desc->wb.upper.length) {
2051 struct ixgbe_rx_buffer *rx_buffer;
2052
2053 /* check Rx buffer */
2054 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
2055
2056 /* sync Rx buffer for CPU read */
2057 dma_sync_single_for_cpu(rx_ring->dev,
2058 rx_buffer->dma,
2059 ixgbe_rx_bufsz(rx_ring),
2060 DMA_FROM_DEVICE);
2061
2062 /* verify contents of skb */
2063 if (ixgbe_check_lbtest_frame(rx_buffer, size))
2064 count++;
2065 else
2066 break;
2067
2068 /* sync Rx buffer for device write */
2069 dma_sync_single_for_device(rx_ring->dev,
2070 rx_buffer->dma,
2071 ixgbe_rx_bufsz(rx_ring),
2072 DMA_FROM_DEVICE);
2073
2074 /* increment Rx next to clean counter */
2075 rx_ntc++;
2076 if (rx_ntc == rx_ring->count)
2077 rx_ntc = 0;
2078
2079 /* fetch next descriptor */
2080 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2081 }
2082
2083 netdev_tx_reset_queue(txring_txq(tx_ring));
2084
2085 /* re-map buffers to ring, store next to clean values */
2086 ixgbe_alloc_rx_buffers(rx_ring, count);
2087 rx_ring->next_to_clean = rx_ntc;
2088 tx_ring->next_to_clean = tx_ntc;
2089
2090 return count;
2091}
2092
2093static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2094{
2095 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2096 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2097 int i, j, lc, good_cnt, ret_val = 0;
2098 unsigned int size = 1024;
2099 netdev_tx_t tx_ret_val;
2100 struct sk_buff *skb;
2101 u32 flags_orig = adapter->flags;
2102
2103 /* DCB can modify the frames on Tx */
2104 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2105
2106 /* allocate test skb */
2107 skb = alloc_skb(size, GFP_KERNEL);
2108 if (!skb)
2109 return 11;
2110
2111 /* place data into test skb */
2112 ixgbe_create_lbtest_frame(skb, size);
2113 skb_put(skb, size);
2114
2115 /*
2116 * Calculate the loop count based on the largest descriptor ring
2117 * The idea is to wrap the largest ring a number of times using 64
2118 * send/receive pairs during each loop
2119 */
2120
2121 if (rx_ring->count <= tx_ring->count)
2122 lc = ((tx_ring->count / 64) * 2) + 1;
2123 else
2124 lc = ((rx_ring->count / 64) * 2) + 1;
2125
2126 for (j = 0; j <= lc; j++) {
2127 /* reset count of good packets */
2128 good_cnt = 0;
2129
2130 /* place 64 packets on the transmit queue*/
2131 for (i = 0; i < 64; i++) {
2132 skb_get(skb);
2133 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2134 adapter,
2135 tx_ring);
2136 if (tx_ret_val == NETDEV_TX_OK)
2137 good_cnt++;
2138 }
2139
2140 if (good_cnt != 64) {
2141 ret_val = 12;
2142 break;
2143 }
2144
2145 /* allow 200 milliseconds for packets to go from Tx to Rx */
2146 msleep(200);
2147
2148 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2149 if (good_cnt != 64) {
2150 ret_val = 13;
2151 break;
2152 }
2153 }
2154
2155 /* free the original skb */
2156 kfree_skb(skb);
2157 adapter->flags = flags_orig;
2158
2159 return ret_val;
2160}
2161
2162static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2163{
2164 *data = ixgbe_setup_desc_rings(adapter);
2165 if (*data)
2166 goto out;
2167 *data = ixgbe_setup_loopback_test(adapter);
2168 if (*data)
2169 goto err_loopback;
2170 *data = ixgbe_run_loopback_test(adapter);
2171 ixgbe_loopback_cleanup(adapter);
2172
2173err_loopback:
2174 ixgbe_free_desc_rings(adapter);
2175out:
2176 return *data;
2177}
2178
2179static void ixgbe_diag_test(struct net_device *netdev,
2180 struct ethtool_test *eth_test, u64 *data)
2181{
2182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2183 bool if_running = netif_running(netdev);
2184
2185 if (ixgbe_removed(adapter->hw.hw_addr)) {
2186 e_err(hw, "Adapter removed - test blocked\n");
2187 data[0] = 1;
2188 data[1] = 1;
2189 data[2] = 1;
2190 data[3] = 1;
2191 data[4] = 1;
2192 eth_test->flags |= ETH_TEST_FL_FAILED;
2193 return;
2194 }
2195 set_bit(__IXGBE_TESTING, &adapter->state);
2196 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2197 struct ixgbe_hw *hw = &adapter->hw;
2198
2199 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2200 int i;
2201 for (i = 0; i < adapter->num_vfs; i++) {
2202 if (adapter->vfinfo[i].clear_to_send) {
2203 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2204 data[0] = 1;
2205 data[1] = 1;
2206 data[2] = 1;
2207 data[3] = 1;
2208 data[4] = 1;
2209 eth_test->flags |= ETH_TEST_FL_FAILED;
2210 clear_bit(__IXGBE_TESTING,
2211 &adapter->state);
2212 return;
2213 }
2214 }
2215 }
2216
2217 /* Offline tests */
2218 e_info(hw, "offline testing starting\n");
2219
2220 /* Link test performed before hardware reset so autoneg doesn't
2221 * interfere with test result
2222 */
2223 if (ixgbe_link_test(adapter, &data[4]))
2224 eth_test->flags |= ETH_TEST_FL_FAILED;
2225
2226 if (if_running)
2227 /* indicate we're in test mode */
2228 ixgbe_close(netdev);
2229 else
2230 ixgbe_reset(adapter);
2231
2232 e_info(hw, "register testing starting\n");
2233 if (ixgbe_reg_test(adapter, &data[0]))
2234 eth_test->flags |= ETH_TEST_FL_FAILED;
2235
2236 ixgbe_reset(adapter);
2237 e_info(hw, "eeprom testing starting\n");
2238 if (ixgbe_eeprom_test(adapter, &data[1]))
2239 eth_test->flags |= ETH_TEST_FL_FAILED;
2240
2241 ixgbe_reset(adapter);
2242 e_info(hw, "interrupt testing starting\n");
2243 if (ixgbe_intr_test(adapter, &data[2]))
2244 eth_test->flags |= ETH_TEST_FL_FAILED;
2245
2246 /* If SRIOV or VMDq is enabled then skip MAC
2247 * loopback diagnostic. */
2248 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2249 IXGBE_FLAG_VMDQ_ENABLED)) {
2250 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2251 data[3] = 0;
2252 goto skip_loopback;
2253 }
2254
2255 ixgbe_reset(adapter);
2256 e_info(hw, "loopback testing starting\n");
2257 if (ixgbe_loopback_test(adapter, &data[3]))
2258 eth_test->flags |= ETH_TEST_FL_FAILED;
2259
2260skip_loopback:
2261 ixgbe_reset(adapter);
2262
2263 /* clear testing bit and return adapter to previous state */
2264 clear_bit(__IXGBE_TESTING, &adapter->state);
2265 if (if_running)
2266 ixgbe_open(netdev);
2267 else if (hw->mac.ops.disable_tx_laser)
2268 hw->mac.ops.disable_tx_laser(hw);
2269 } else {
2270 e_info(hw, "online testing starting\n");
2271
2272 /* Online tests */
2273 if (ixgbe_link_test(adapter, &data[4]))
2274 eth_test->flags |= ETH_TEST_FL_FAILED;
2275
2276 /* Offline tests aren't run; pass by default */
2277 data[0] = 0;
2278 data[1] = 0;
2279 data[2] = 0;
2280 data[3] = 0;
2281
2282 clear_bit(__IXGBE_TESTING, &adapter->state);
2283 }
2284}
2285
2286static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2287 struct ethtool_wolinfo *wol)
2288{
2289 struct ixgbe_hw *hw = &adapter->hw;
2290 int retval = 0;
2291
2292 /* WOL not supported for all devices */
2293 if (!ixgbe_wol_supported(adapter, hw->device_id,
2294 hw->subsystem_device_id)) {
2295 retval = 1;
2296 wol->supported = 0;
2297 }
2298
2299 return retval;
2300}
2301
2302static void ixgbe_get_wol(struct net_device *netdev,
2303 struct ethtool_wolinfo *wol)
2304{
2305 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2306
2307 wol->supported = WAKE_UCAST | WAKE_MCAST |
2308 WAKE_BCAST | WAKE_MAGIC;
2309 wol->wolopts = 0;
2310
2311 if (ixgbe_wol_exclusion(adapter, wol) ||
2312 !device_can_wakeup(&adapter->pdev->dev))
2313 return;
2314
2315 if (adapter->wol & IXGBE_WUFC_EX)
2316 wol->wolopts |= WAKE_UCAST;
2317 if (adapter->wol & IXGBE_WUFC_MC)
2318 wol->wolopts |= WAKE_MCAST;
2319 if (adapter->wol & IXGBE_WUFC_BC)
2320 wol->wolopts |= WAKE_BCAST;
2321 if (adapter->wol & IXGBE_WUFC_MAG)
2322 wol->wolopts |= WAKE_MAGIC;
2323}
2324
2325static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2326{
2327 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2328
2329 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
2330 WAKE_FILTER))
2331 return -EOPNOTSUPP;
2332
2333 if (ixgbe_wol_exclusion(adapter, wol))
2334 return wol->wolopts ? -EOPNOTSUPP : 0;
2335
2336 adapter->wol = 0;
2337
2338 if (wol->wolopts & WAKE_UCAST)
2339 adapter->wol |= IXGBE_WUFC_EX;
2340 if (wol->wolopts & WAKE_MCAST)
2341 adapter->wol |= IXGBE_WUFC_MC;
2342 if (wol->wolopts & WAKE_BCAST)
2343 adapter->wol |= IXGBE_WUFC_BC;
2344 if (wol->wolopts & WAKE_MAGIC)
2345 adapter->wol |= IXGBE_WUFC_MAG;
2346
2347 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2348
2349 return 0;
2350}
2351
2352static int ixgbe_nway_reset(struct net_device *netdev)
2353{
2354 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2355
2356 if (netif_running(netdev))
2357 ixgbe_reinit_locked(adapter);
2358
2359 return 0;
2360}
2361
2362static int ixgbe_set_phys_id(struct net_device *netdev,
2363 enum ethtool_phys_id_state state)
2364{
2365 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2366 struct ixgbe_hw *hw = &adapter->hw;
2367
2368 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2369 return -EOPNOTSUPP;
2370
2371 switch (state) {
2372 case ETHTOOL_ID_ACTIVE:
2373 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2374 return 2;
2375
2376 case ETHTOOL_ID_ON:
2377 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2378 break;
2379
2380 case ETHTOOL_ID_OFF:
2381 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2382 break;
2383
2384 case ETHTOOL_ID_INACTIVE:
2385 /* Restore LED settings */
2386 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2387 break;
2388 }
2389
2390 return 0;
2391}
2392
2393static int ixgbe_get_coalesce(struct net_device *netdev,
2394 struct ethtool_coalesce *ec,
2395 struct kernel_ethtool_coalesce *kernel_coal,
2396 struct netlink_ext_ack *extack)
2397{
2398 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2399
2400 /* only valid if in constant ITR mode */
2401 if (adapter->rx_itr_setting <= 1)
2402 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2403 else
2404 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2405
2406 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2407 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2408 return 0;
2409
2410 /* only valid if in constant ITR mode */
2411 if (adapter->tx_itr_setting <= 1)
2412 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2413 else
2414 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2415
2416 return 0;
2417}
2418
2419/*
2420 * this function must be called before setting the new value of
2421 * rx_itr_setting
2422 */
2423static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2424{
2425 struct net_device *netdev = adapter->netdev;
2426
2427 /* nothing to do if LRO or RSC are not enabled */
2428 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2429 !(netdev->features & NETIF_F_LRO))
2430 return false;
2431
2432 /* check the feature flag value and enable RSC if necessary */
2433 if (adapter->rx_itr_setting == 1 ||
2434 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2435 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2436 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2437 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2438 return true;
2439 }
2440 /* if interrupt rate is too high then disable RSC */
2441 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2442 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2443 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2444 return true;
2445 }
2446 return false;
2447}
2448
2449static int ixgbe_set_coalesce(struct net_device *netdev,
2450 struct ethtool_coalesce *ec,
2451 struct kernel_ethtool_coalesce *kernel_coal,
2452 struct netlink_ext_ack *extack)
2453{
2454 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2455 struct ixgbe_q_vector *q_vector;
2456 int i;
2457 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2458 bool need_reset = false;
2459
2460 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2461 /* reject Tx specific changes in case of mixed RxTx vectors */
2462 if (ec->tx_coalesce_usecs)
2463 return -EINVAL;
2464 tx_itr_prev = adapter->rx_itr_setting;
2465 } else {
2466 tx_itr_prev = adapter->tx_itr_setting;
2467 }
2468
2469 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2470 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2471 return -EINVAL;
2472
2473 if (ec->rx_coalesce_usecs > 1)
2474 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2475 else
2476 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2477
2478 if (adapter->rx_itr_setting == 1)
2479 rx_itr_param = IXGBE_20K_ITR;
2480 else
2481 rx_itr_param = adapter->rx_itr_setting;
2482
2483 if (ec->tx_coalesce_usecs > 1)
2484 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2485 else
2486 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2487
2488 if (adapter->tx_itr_setting == 1)
2489 tx_itr_param = IXGBE_12K_ITR;
2490 else
2491 tx_itr_param = adapter->tx_itr_setting;
2492
2493 /* mixed Rx/Tx */
2494 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2495 adapter->tx_itr_setting = adapter->rx_itr_setting;
2496
2497 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2498 if ((adapter->tx_itr_setting != 1) &&
2499 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2500 if ((tx_itr_prev == 1) ||
2501 (tx_itr_prev >= IXGBE_100K_ITR))
2502 need_reset = true;
2503 } else {
2504 if ((tx_itr_prev != 1) &&
2505 (tx_itr_prev < IXGBE_100K_ITR))
2506 need_reset = true;
2507 }
2508
2509 /* check the old value and enable RSC if necessary */
2510 need_reset |= ixgbe_update_rsc(adapter);
2511
2512 for (i = 0; i < adapter->num_q_vectors; i++) {
2513 q_vector = adapter->q_vector[i];
2514 if (q_vector->tx.count && !q_vector->rx.count)
2515 /* tx only */
2516 q_vector->itr = tx_itr_param;
2517 else
2518 /* rx only or mixed */
2519 q_vector->itr = rx_itr_param;
2520 ixgbe_write_eitr(q_vector);
2521 }
2522
2523 /*
2524 * do reset here at the end to make sure EITR==0 case is handled
2525 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2526 * also locks in RSC enable/disable which requires reset
2527 */
2528 if (need_reset)
2529 ixgbe_do_reset(netdev);
2530
2531 return 0;
2532}
2533
2534static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2535 struct ethtool_rxnfc *cmd)
2536{
2537 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2538 struct ethtool_rx_flow_spec *fsp =
2539 (struct ethtool_rx_flow_spec *)&cmd->fs;
2540 struct hlist_node *node2;
2541 struct ixgbe_fdir_filter *rule = NULL;
2542
2543 /* report total rule count */
2544 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2545
2546 hlist_for_each_entry_safe(rule, node2,
2547 &adapter->fdir_filter_list, fdir_node) {
2548 if (fsp->location <= rule->sw_idx)
2549 break;
2550 }
2551
2552 if (!rule || fsp->location != rule->sw_idx)
2553 return -EINVAL;
2554
2555 /* fill out the flow spec entry */
2556
2557 /* set flow type field */
2558 switch (rule->filter.formatted.flow_type) {
2559 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2560 fsp->flow_type = TCP_V4_FLOW;
2561 break;
2562 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2563 fsp->flow_type = UDP_V4_FLOW;
2564 break;
2565 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2566 fsp->flow_type = SCTP_V4_FLOW;
2567 break;
2568 case IXGBE_ATR_FLOW_TYPE_IPV4:
2569 fsp->flow_type = IP_USER_FLOW;
2570 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2571 fsp->h_u.usr_ip4_spec.proto = 0;
2572 fsp->m_u.usr_ip4_spec.proto = 0;
2573 break;
2574 default:
2575 return -EINVAL;
2576 }
2577
2578 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2579 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2580 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2581 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2582 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2583 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2584 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2585 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2586 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2587 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2588 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2589 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2590 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2591 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2592 fsp->flow_type |= FLOW_EXT;
2593
2594 /* record action */
2595 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2596 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2597 else
2598 fsp->ring_cookie = rule->action;
2599
2600 return 0;
2601}
2602
2603static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2604 struct ethtool_rxnfc *cmd,
2605 u32 *rule_locs)
2606{
2607 struct hlist_node *node2;
2608 struct ixgbe_fdir_filter *rule;
2609 int cnt = 0;
2610
2611 /* report total rule count */
2612 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2613
2614 hlist_for_each_entry_safe(rule, node2,
2615 &adapter->fdir_filter_list, fdir_node) {
2616 if (cnt == cmd->rule_cnt)
2617 return -EMSGSIZE;
2618 rule_locs[cnt] = rule->sw_idx;
2619 cnt++;
2620 }
2621
2622 cmd->rule_cnt = cnt;
2623
2624 return 0;
2625}
2626
2627static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2628 struct ethtool_rxnfc *cmd)
2629{
2630 cmd->data = 0;
2631
2632 /* Report default options for RSS on ixgbe */
2633 switch (cmd->flow_type) {
2634 case TCP_V4_FLOW:
2635 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2636 fallthrough;
2637 case UDP_V4_FLOW:
2638 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2639 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2640 fallthrough;
2641 case SCTP_V4_FLOW:
2642 case AH_ESP_V4_FLOW:
2643 case AH_V4_FLOW:
2644 case ESP_V4_FLOW:
2645 case IPV4_FLOW:
2646 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2647 break;
2648 case TCP_V6_FLOW:
2649 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2650 fallthrough;
2651 case UDP_V6_FLOW:
2652 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2653 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2654 fallthrough;
2655 case SCTP_V6_FLOW:
2656 case AH_ESP_V6_FLOW:
2657 case AH_V6_FLOW:
2658 case ESP_V6_FLOW:
2659 case IPV6_FLOW:
2660 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2661 break;
2662 default:
2663 return -EINVAL;
2664 }
2665
2666 return 0;
2667}
2668
2669static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2670{
2671 if (adapter->hw.mac.type < ixgbe_mac_X550)
2672 return 16;
2673 else
2674 return 64;
2675}
2676
2677static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2678 u32 *rule_locs)
2679{
2680 struct ixgbe_adapter *adapter = netdev_priv(dev);
2681 int ret = -EOPNOTSUPP;
2682
2683 switch (cmd->cmd) {
2684 case ETHTOOL_GRXRINGS:
2685 cmd->data = min_t(int, adapter->num_rx_queues,
2686 ixgbe_rss_indir_tbl_max(adapter));
2687 ret = 0;
2688 break;
2689 case ETHTOOL_GRXCLSRLCNT:
2690 cmd->rule_cnt = adapter->fdir_filter_count;
2691 ret = 0;
2692 break;
2693 case ETHTOOL_GRXCLSRULE:
2694 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2695 break;
2696 case ETHTOOL_GRXCLSRLALL:
2697 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2698 break;
2699 case ETHTOOL_GRXFH:
2700 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2701 break;
2702 default:
2703 break;
2704 }
2705
2706 return ret;
2707}
2708
2709int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2710 struct ixgbe_fdir_filter *input,
2711 u16 sw_idx)
2712{
2713 struct ixgbe_hw *hw = &adapter->hw;
2714 struct hlist_node *node2;
2715 struct ixgbe_fdir_filter *rule, *parent;
2716 int err = -EINVAL;
2717
2718 parent = NULL;
2719 rule = NULL;
2720
2721 hlist_for_each_entry_safe(rule, node2,
2722 &adapter->fdir_filter_list, fdir_node) {
2723 /* hash found, or no matching entry */
2724 if (rule->sw_idx >= sw_idx)
2725 break;
2726 parent = rule;
2727 }
2728
2729 /* if there is an old rule occupying our place remove it */
2730 if (rule && (rule->sw_idx == sw_idx)) {
2731 if (!input || (rule->filter.formatted.bkt_hash !=
2732 input->filter.formatted.bkt_hash)) {
2733 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2734 &rule->filter,
2735 sw_idx);
2736 }
2737
2738 hlist_del(&rule->fdir_node);
2739 kfree(rule);
2740 adapter->fdir_filter_count--;
2741 }
2742
2743 /*
2744 * If no input this was a delete, err should be 0 if a rule was
2745 * successfully found and removed from the list else -EINVAL
2746 */
2747 if (!input)
2748 return err;
2749
2750 /* initialize node and set software index */
2751 INIT_HLIST_NODE(&input->fdir_node);
2752
2753 /* add filter to the list */
2754 if (parent)
2755 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2756 else
2757 hlist_add_head(&input->fdir_node,
2758 &adapter->fdir_filter_list);
2759
2760 /* update counts */
2761 adapter->fdir_filter_count++;
2762
2763 return 0;
2764}
2765
2766static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2767 u8 *flow_type)
2768{
2769 switch (fsp->flow_type & ~FLOW_EXT) {
2770 case TCP_V4_FLOW:
2771 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2772 break;
2773 case UDP_V4_FLOW:
2774 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2775 break;
2776 case SCTP_V4_FLOW:
2777 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2778 break;
2779 case IP_USER_FLOW:
2780 switch (fsp->h_u.usr_ip4_spec.proto) {
2781 case IPPROTO_TCP:
2782 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2783 break;
2784 case IPPROTO_UDP:
2785 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2786 break;
2787 case IPPROTO_SCTP:
2788 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2789 break;
2790 case 0:
2791 if (!fsp->m_u.usr_ip4_spec.proto) {
2792 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2793 break;
2794 }
2795 fallthrough;
2796 default:
2797 return 0;
2798 }
2799 break;
2800 default:
2801 return 0;
2802 }
2803
2804 return 1;
2805}
2806
2807static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2808 struct ethtool_rxnfc *cmd)
2809{
2810 struct ethtool_rx_flow_spec *fsp =
2811 (struct ethtool_rx_flow_spec *)&cmd->fs;
2812 struct ixgbe_hw *hw = &adapter->hw;
2813 struct ixgbe_fdir_filter *input;
2814 union ixgbe_atr_input mask;
2815 u8 queue;
2816 int err;
2817
2818 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2819 return -EOPNOTSUPP;
2820
2821 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2822 * we use the drop index.
2823 */
2824 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2825 queue = IXGBE_FDIR_DROP_QUEUE;
2826 } else {
2827 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2828 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2829
2830 if (!vf && (ring >= adapter->num_rx_queues))
2831 return -EINVAL;
2832 else if (vf &&
2833 ((vf > adapter->num_vfs) ||
2834 ring >= adapter->num_rx_queues_per_pool))
2835 return -EINVAL;
2836
2837 /* Map the ring onto the absolute queue index */
2838 if (!vf)
2839 queue = adapter->rx_ring[ring]->reg_idx;
2840 else
2841 queue = ((vf - 1) *
2842 adapter->num_rx_queues_per_pool) + ring;
2843 }
2844
2845 /* Don't allow indexes to exist outside of available space */
2846 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2847 e_err(drv, "Location out of range\n");
2848 return -EINVAL;
2849 }
2850
2851 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2852 if (!input)
2853 return -ENOMEM;
2854
2855 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2856
2857 /* set SW index */
2858 input->sw_idx = fsp->location;
2859
2860 /* record flow type */
2861 if (!ixgbe_flowspec_to_flow_type(fsp,
2862 &input->filter.formatted.flow_type)) {
2863 e_err(drv, "Unrecognized flow type\n");
2864 goto err_out;
2865 }
2866
2867 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2868 IXGBE_ATR_L4TYPE_MASK;
2869
2870 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2871 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2872
2873 /* Copy input into formatted structures */
2874 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2875 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2876 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2877 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2878 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2879 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2880 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2881 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2882
2883 if (fsp->flow_type & FLOW_EXT) {
2884 input->filter.formatted.vm_pool =
2885 (unsigned char)ntohl(fsp->h_ext.data[1]);
2886 mask.formatted.vm_pool =
2887 (unsigned char)ntohl(fsp->m_ext.data[1]);
2888 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2889 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2890 input->filter.formatted.flex_bytes =
2891 fsp->h_ext.vlan_etype;
2892 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2893 }
2894
2895 /* determine if we need to drop or route the packet */
2896 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2897 input->action = IXGBE_FDIR_DROP_QUEUE;
2898 else
2899 input->action = fsp->ring_cookie;
2900
2901 spin_lock(&adapter->fdir_perfect_lock);
2902
2903 if (hlist_empty(&adapter->fdir_filter_list)) {
2904 /* save mask and program input mask into HW */
2905 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2906 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2907 if (err) {
2908 e_err(drv, "Error writing mask\n");
2909 goto err_out_w_lock;
2910 }
2911 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2912 e_err(drv, "Only one mask supported per port\n");
2913 goto err_out_w_lock;
2914 }
2915
2916 /* apply mask and compute/store hash */
2917 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2918
2919 /* program filters to filter memory */
2920 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2921 &input->filter, input->sw_idx, queue);
2922 if (err)
2923 goto err_out_w_lock;
2924
2925 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2926
2927 spin_unlock(&adapter->fdir_perfect_lock);
2928
2929 return err;
2930err_out_w_lock:
2931 spin_unlock(&adapter->fdir_perfect_lock);
2932err_out:
2933 kfree(input);
2934 return -EINVAL;
2935}
2936
2937static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2938 struct ethtool_rxnfc *cmd)
2939{
2940 struct ethtool_rx_flow_spec *fsp =
2941 (struct ethtool_rx_flow_spec *)&cmd->fs;
2942 int err;
2943
2944 spin_lock(&adapter->fdir_perfect_lock);
2945 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2946 spin_unlock(&adapter->fdir_perfect_lock);
2947
2948 return err;
2949}
2950
2951#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2952 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2953static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2954 struct ethtool_rxnfc *nfc)
2955{
2956 u32 flags2 = adapter->flags2;
2957
2958 /*
2959 * RSS does not support anything other than hashing
2960 * to queues on src and dst IPs and ports
2961 */
2962 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2963 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2964 return -EINVAL;
2965
2966 switch (nfc->flow_type) {
2967 case TCP_V4_FLOW:
2968 case TCP_V6_FLOW:
2969 if (!(nfc->data & RXH_IP_SRC) ||
2970 !(nfc->data & RXH_IP_DST) ||
2971 !(nfc->data & RXH_L4_B_0_1) ||
2972 !(nfc->data & RXH_L4_B_2_3))
2973 return -EINVAL;
2974 break;
2975 case UDP_V4_FLOW:
2976 if (!(nfc->data & RXH_IP_SRC) ||
2977 !(nfc->data & RXH_IP_DST))
2978 return -EINVAL;
2979 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2980 case 0:
2981 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2982 break;
2983 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2984 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2985 break;
2986 default:
2987 return -EINVAL;
2988 }
2989 break;
2990 case UDP_V6_FLOW:
2991 if (!(nfc->data & RXH_IP_SRC) ||
2992 !(nfc->data & RXH_IP_DST))
2993 return -EINVAL;
2994 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2995 case 0:
2996 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2997 break;
2998 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2999 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
3000 break;
3001 default:
3002 return -EINVAL;
3003 }
3004 break;
3005 case AH_ESP_V4_FLOW:
3006 case AH_V4_FLOW:
3007 case ESP_V4_FLOW:
3008 case SCTP_V4_FLOW:
3009 case AH_ESP_V6_FLOW:
3010 case AH_V6_FLOW:
3011 case ESP_V6_FLOW:
3012 case SCTP_V6_FLOW:
3013 if (!(nfc->data & RXH_IP_SRC) ||
3014 !(nfc->data & RXH_IP_DST) ||
3015 (nfc->data & RXH_L4_B_0_1) ||
3016 (nfc->data & RXH_L4_B_2_3))
3017 return -EINVAL;
3018 break;
3019 default:
3020 return -EINVAL;
3021 }
3022
3023 /* if we changed something we need to update flags */
3024 if (flags2 != adapter->flags2) {
3025 struct ixgbe_hw *hw = &adapter->hw;
3026 u32 mrqc;
3027 unsigned int pf_pool = adapter->num_vfs;
3028
3029 if ((hw->mac.type >= ixgbe_mac_X550) &&
3030 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3031 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
3032 else
3033 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3034
3035 if ((flags2 & UDP_RSS_FLAGS) &&
3036 !(adapter->flags2 & UDP_RSS_FLAGS))
3037 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
3038
3039 adapter->flags2 = flags2;
3040
3041 /* Perform hash on these packet types */
3042 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
3043 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3044 | IXGBE_MRQC_RSS_FIELD_IPV6
3045 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3046
3047 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
3048 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
3049
3050 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3051 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3052
3053 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3054 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3055
3056 if ((hw->mac.type >= ixgbe_mac_X550) &&
3057 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3058 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
3059 else
3060 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3061 }
3062
3063 return 0;
3064}
3065
3066static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3067{
3068 struct ixgbe_adapter *adapter = netdev_priv(dev);
3069 int ret = -EOPNOTSUPP;
3070
3071 switch (cmd->cmd) {
3072 case ETHTOOL_SRXCLSRLINS:
3073 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
3074 break;
3075 case ETHTOOL_SRXCLSRLDEL:
3076 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
3077 break;
3078 case ETHTOOL_SRXFH:
3079 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
3080 break;
3081 default:
3082 break;
3083 }
3084
3085 return ret;
3086}
3087
3088static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
3089{
3090 return IXGBE_RSS_KEY_SIZE;
3091}
3092
3093static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3094{
3095 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3096
3097 return ixgbe_rss_indir_tbl_entries(adapter);
3098}
3099
3100static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3101{
3102 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3103 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3104
3105 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3106 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3107
3108 for (i = 0; i < reta_size; i++)
3109 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3110}
3111
3112static int ixgbe_get_rxfh(struct net_device *netdev,
3113 struct ethtool_rxfh_param *rxfh)
3114{
3115 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3116
3117 rxfh->hfunc = ETH_RSS_HASH_TOP;
3118
3119 if (rxfh->indir)
3120 ixgbe_get_reta(adapter, rxfh->indir);
3121
3122 if (rxfh->key)
3123 memcpy(rxfh->key, adapter->rss_key,
3124 ixgbe_get_rxfh_key_size(netdev));
3125
3126 return 0;
3127}
3128
3129static int ixgbe_set_rxfh(struct net_device *netdev,
3130 struct ethtool_rxfh_param *rxfh,
3131 struct netlink_ext_ack *extack)
3132{
3133 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3134 int i;
3135 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3136
3137 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3138 rxfh->hfunc != ETH_RSS_HASH_TOP)
3139 return -EOPNOTSUPP;
3140
3141 /* Fill out the redirection table */
3142 if (rxfh->indir) {
3143 int max_queues = min_t(int, adapter->num_rx_queues,
3144 ixgbe_rss_indir_tbl_max(adapter));
3145
3146 /*Allow at least 2 queues w/ SR-IOV.*/
3147 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3148 (max_queues < 2))
3149 max_queues = 2;
3150
3151 /* Verify user input. */
3152 for (i = 0; i < reta_entries; i++)
3153 if (rxfh->indir[i] >= max_queues)
3154 return -EINVAL;
3155
3156 for (i = 0; i < reta_entries; i++)
3157 adapter->rss_indir_tbl[i] = rxfh->indir[i];
3158
3159 ixgbe_store_reta(adapter);
3160 }
3161
3162 /* Fill out the rss hash key */
3163 if (rxfh->key) {
3164 memcpy(adapter->rss_key, rxfh->key,
3165 ixgbe_get_rxfh_key_size(netdev));
3166 ixgbe_store_key(adapter);
3167 }
3168
3169 return 0;
3170}
3171
3172static int ixgbe_get_ts_info(struct net_device *dev,
3173 struct kernel_ethtool_ts_info *info)
3174{
3175 struct ixgbe_adapter *adapter = netdev_priv(dev);
3176
3177 /* we always support timestamping disabled */
3178 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3179
3180 switch (adapter->hw.mac.type) {
3181 case ixgbe_mac_X550:
3182 case ixgbe_mac_X550EM_x:
3183 case ixgbe_mac_x550em_a:
3184 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3185 break;
3186 case ixgbe_mac_X540:
3187 case ixgbe_mac_82599EB:
3188 info->rx_filters |=
3189 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3190 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3191 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3192 break;
3193 default:
3194 return ethtool_op_get_ts_info(dev, info);
3195 }
3196
3197 info->so_timestamping =
3198 SOF_TIMESTAMPING_TX_SOFTWARE |
3199 SOF_TIMESTAMPING_TX_HARDWARE |
3200 SOF_TIMESTAMPING_RX_HARDWARE |
3201 SOF_TIMESTAMPING_RAW_HARDWARE;
3202
3203 if (adapter->ptp_clock)
3204 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3205
3206 info->tx_types =
3207 BIT(HWTSTAMP_TX_OFF) |
3208 BIT(HWTSTAMP_TX_ON);
3209
3210 return 0;
3211}
3212
3213static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3214{
3215 unsigned int max_combined;
3216 u8 tcs = adapter->hw_tcs;
3217
3218 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3219 /* We only support one q_vector without MSI-X */
3220 max_combined = 1;
3221 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3222 /* Limit value based on the queue mask */
3223 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3224 } else if (tcs > 1) {
3225 /* For DCB report channels per traffic class */
3226 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3227 /* 8 TC w/ 4 queues per TC */
3228 max_combined = 4;
3229 } else if (tcs > 4) {
3230 /* 8 TC w/ 8 queues per TC */
3231 max_combined = 8;
3232 } else {
3233 /* 4 TC w/ 16 queues per TC */
3234 max_combined = 16;
3235 }
3236 } else if (adapter->atr_sample_rate) {
3237 /* support up to 64 queues with ATR */
3238 max_combined = IXGBE_MAX_FDIR_INDICES;
3239 } else {
3240 /* support up to 16 queues with RSS */
3241 max_combined = ixgbe_max_rss_indices(adapter);
3242 }
3243
3244 return min_t(int, max_combined, num_online_cpus());
3245}
3246
3247static void ixgbe_get_channels(struct net_device *dev,
3248 struct ethtool_channels *ch)
3249{
3250 struct ixgbe_adapter *adapter = netdev_priv(dev);
3251
3252 /* report maximum channels */
3253 ch->max_combined = ixgbe_max_channels(adapter);
3254
3255 /* report info for other vector */
3256 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3257 ch->max_other = NON_Q_VECTORS;
3258 ch->other_count = NON_Q_VECTORS;
3259 }
3260
3261 /* record RSS queues */
3262 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3263
3264 /* nothing else to report if RSS is disabled */
3265 if (ch->combined_count == 1)
3266 return;
3267
3268 /* we do not support ATR queueing if SR-IOV is enabled */
3269 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3270 return;
3271
3272 /* same thing goes for being DCB enabled */
3273 if (adapter->hw_tcs > 1)
3274 return;
3275
3276 /* if ATR is disabled we can exit */
3277 if (!adapter->atr_sample_rate)
3278 return;
3279
3280 /* report flow director queues as maximum channels */
3281 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3282}
3283
3284static int ixgbe_set_channels(struct net_device *dev,
3285 struct ethtool_channels *ch)
3286{
3287 struct ixgbe_adapter *adapter = netdev_priv(dev);
3288 unsigned int count = ch->combined_count;
3289 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3290
3291 /* verify they are not requesting separate vectors */
3292 if (!count || ch->rx_count || ch->tx_count)
3293 return -EINVAL;
3294
3295 /* verify other_count has not changed */
3296 if (ch->other_count != NON_Q_VECTORS)
3297 return -EINVAL;
3298
3299 /* verify the number of channels does not exceed hardware limits */
3300 if (count > ixgbe_max_channels(adapter))
3301 return -EINVAL;
3302
3303 /* update feature limits from largest to smallest supported values */
3304 adapter->ring_feature[RING_F_FDIR].limit = count;
3305
3306 /* cap RSS limit */
3307 if (count > max_rss_indices)
3308 count = max_rss_indices;
3309 adapter->ring_feature[RING_F_RSS].limit = count;
3310
3311#ifdef IXGBE_FCOE
3312 /* cap FCoE limit at 8 */
3313 if (count > IXGBE_FCRETA_SIZE)
3314 count = IXGBE_FCRETA_SIZE;
3315 adapter->ring_feature[RING_F_FCOE].limit = count;
3316
3317#endif
3318 /* use setup TC to update any traffic class queue mapping */
3319 return ixgbe_setup_tc(dev, adapter->hw_tcs);
3320}
3321
3322static int ixgbe_get_module_info(struct net_device *dev,
3323 struct ethtool_modinfo *modinfo)
3324{
3325 struct ixgbe_adapter *adapter = netdev_priv(dev);
3326 struct ixgbe_hw *hw = &adapter->hw;
3327 u8 sff8472_rev, addr_mode;
3328 bool page_swap = false;
3329 int status;
3330
3331 if (hw->phy.type == ixgbe_phy_fw)
3332 return -ENXIO;
3333
3334 /* Check whether we support SFF-8472 or not */
3335 status = hw->phy.ops.read_i2c_eeprom(hw,
3336 IXGBE_SFF_SFF_8472_COMP,
3337 &sff8472_rev);
3338 if (status)
3339 return -EIO;
3340
3341 /* addressing mode is not supported */
3342 status = hw->phy.ops.read_i2c_eeprom(hw,
3343 IXGBE_SFF_SFF_8472_SWAP,
3344 &addr_mode);
3345 if (status)
3346 return -EIO;
3347
3348 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3349 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3350 page_swap = true;
3351 }
3352
3353 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
3354 !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
3355 /* We have a SFP, but it does not support SFF-8472 */
3356 modinfo->type = ETH_MODULE_SFF_8079;
3357 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3358 } else {
3359 /* We have a SFP which supports a revision of SFF-8472. */
3360 modinfo->type = ETH_MODULE_SFF_8472;
3361 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3362 }
3363
3364 return 0;
3365}
3366
3367static int ixgbe_get_module_eeprom(struct net_device *dev,
3368 struct ethtool_eeprom *ee,
3369 u8 *data)
3370{
3371 struct ixgbe_adapter *adapter = netdev_priv(dev);
3372 struct ixgbe_hw *hw = &adapter->hw;
3373 int status = -EFAULT;
3374 u8 databyte = 0xFF;
3375 int i = 0;
3376
3377 if (ee->len == 0)
3378 return -EINVAL;
3379
3380 if (hw->phy.type == ixgbe_phy_fw)
3381 return -ENXIO;
3382
3383 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3384 /* I2C reads can take long time */
3385 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3386 return -EBUSY;
3387
3388 if (i < ETH_MODULE_SFF_8079_LEN)
3389 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3390 else
3391 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3392
3393 if (status)
3394 return -EIO;
3395
3396 data[i - ee->offset] = databyte;
3397 }
3398
3399 return 0;
3400}
3401
3402static const struct {
3403 ixgbe_link_speed mac_speed;
3404 u32 link_mode;
3405} ixgbe_ls_map[] = {
3406 { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
3407 { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
3408 { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
3409 { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
3410 { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
3411};
3412
3413static const struct {
3414 u32 lp_advertised;
3415 u32 link_mode;
3416} ixgbe_lp_map[] = {
3417 { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
3418 { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
3419 { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
3420 { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
3421 { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
3422 { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
3423};
3424
3425static int
3426ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
3427{
3428 __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
3429 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3430 struct ixgbe_hw *hw = &adapter->hw;
3431 int rc;
3432 u16 i;
3433
3434 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3435 if (rc)
3436 return rc;
3437
3438 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3439 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3440 linkmode_set_bit(ixgbe_lp_map[i].link_mode,
3441 edata->lp_advertised);
3442 }
3443
3444 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3445 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3446 linkmode_set_bit(ixgbe_lp_map[i].link_mode,
3447 edata->supported);
3448 }
3449
3450 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3451 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3452 linkmode_set_bit(ixgbe_lp_map[i].link_mode,
3453 edata->advertised);
3454 }
3455
3456 edata->eee_enabled = !linkmode_empty(edata->advertised);
3457 edata->tx_lpi_enabled = edata->eee_enabled;
3458
3459 linkmode_and(common, edata->advertised, edata->lp_advertised);
3460 edata->eee_active = !linkmode_empty(common);
3461
3462 return 0;
3463}
3464
3465static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
3466{
3467 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3468 struct ixgbe_hw *hw = &adapter->hw;
3469
3470 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3471 return -EOPNOTSUPP;
3472
3473 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3474 return ixgbe_get_eee_fw(adapter, edata);
3475
3476 return -EOPNOTSUPP;
3477}
3478
3479static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
3480{
3481 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3482 struct ixgbe_hw *hw = &adapter->hw;
3483 struct ethtool_keee eee_data;
3484 int ret_val;
3485
3486 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3487 return -EOPNOTSUPP;
3488
3489 memset(&eee_data, 0, sizeof(struct ethtool_keee));
3490
3491 ret_val = ixgbe_get_eee(netdev, &eee_data);
3492 if (ret_val)
3493 return ret_val;
3494
3495 if (eee_data.eee_enabled && !edata->eee_enabled) {
3496 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3497 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3498 return -EINVAL;
3499 }
3500
3501 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3502 e_err(drv,
3503 "Setting EEE Tx LPI timer is not supported\n");
3504 return -EINVAL;
3505 }
3506
3507 if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
3508 e_err(drv,
3509 "Setting EEE advertised speeds is not supported\n");
3510 return -EINVAL;
3511 }
3512 }
3513
3514 if (eee_data.eee_enabled != edata->eee_enabled) {
3515 if (edata->eee_enabled) {
3516 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3517 hw->phy.eee_speeds_advertised =
3518 hw->phy.eee_speeds_supported;
3519 } else {
3520 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3521 hw->phy.eee_speeds_advertised = 0;
3522 }
3523
3524 /* reset link */
3525 if (netif_running(netdev))
3526 ixgbe_reinit_locked(adapter);
3527 else
3528 ixgbe_reset(adapter);
3529 }
3530
3531 return 0;
3532}
3533
3534static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3535{
3536 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3537 u32 priv_flags = 0;
3538
3539 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3540 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3541
3542 if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
3543 priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
3544
3545 if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
3546 priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
3547
3548 return priv_flags;
3549}
3550
3551static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3552{
3553 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3554 unsigned int flags2 = adapter->flags2;
3555 unsigned int i;
3556
3557 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3558 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3559 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3560
3561 flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
3562 if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
3563 flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
3564
3565 flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
3566 if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
3567 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3568 /* Reset primary abort counter */
3569 for (i = 0; i < adapter->num_vfs; i++)
3570 adapter->vfinfo[i].primary_abort_count = 0;
3571
3572 flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
3573 } else {
3574 e_info(probe,
3575 "Cannot set private flags: Operation not supported\n");
3576 return -EOPNOTSUPP;
3577 }
3578 }
3579
3580 if (flags2 != adapter->flags2) {
3581 adapter->flags2 = flags2;
3582
3583 /* reset interface to repopulate queues */
3584 if (netif_running(netdev))
3585 ixgbe_reinit_locked(adapter);
3586 }
3587
3588 return 0;
3589}
3590
3591static const struct ethtool_ops ixgbe_ethtool_ops = {
3592 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
3593 .get_drvinfo = ixgbe_get_drvinfo,
3594 .get_regs_len = ixgbe_get_regs_len,
3595 .get_regs = ixgbe_get_regs,
3596 .get_wol = ixgbe_get_wol,
3597 .set_wol = ixgbe_set_wol,
3598 .nway_reset = ixgbe_nway_reset,
3599 .get_link = ethtool_op_get_link,
3600 .get_eeprom_len = ixgbe_get_eeprom_len,
3601 .get_eeprom = ixgbe_get_eeprom,
3602 .set_eeprom = ixgbe_set_eeprom,
3603 .get_ringparam = ixgbe_get_ringparam,
3604 .set_ringparam = ixgbe_set_ringparam,
3605 .get_pause_stats = ixgbe_get_pause_stats,
3606 .get_pauseparam = ixgbe_get_pauseparam,
3607 .set_pauseparam = ixgbe_set_pauseparam,
3608 .get_msglevel = ixgbe_get_msglevel,
3609 .set_msglevel = ixgbe_set_msglevel,
3610 .self_test = ixgbe_diag_test,
3611 .get_strings = ixgbe_get_strings,
3612 .set_phys_id = ixgbe_set_phys_id,
3613 .get_sset_count = ixgbe_get_sset_count,
3614 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3615 .get_coalesce = ixgbe_get_coalesce,
3616 .set_coalesce = ixgbe_set_coalesce,
3617 .get_rxnfc = ixgbe_get_rxnfc,
3618 .set_rxnfc = ixgbe_set_rxnfc,
3619 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3620 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3621 .get_rxfh = ixgbe_get_rxfh,
3622 .set_rxfh = ixgbe_set_rxfh,
3623 .get_eee = ixgbe_get_eee,
3624 .set_eee = ixgbe_set_eee,
3625 .get_channels = ixgbe_get_channels,
3626 .set_channels = ixgbe_set_channels,
3627 .get_priv_flags = ixgbe_get_priv_flags,
3628 .set_priv_flags = ixgbe_set_priv_flags,
3629 .get_ts_info = ixgbe_get_ts_info,
3630 .get_module_info = ixgbe_get_module_info,
3631 .get_module_eeprom = ixgbe_get_module_eeprom,
3632 .get_link_ksettings = ixgbe_get_link_ksettings,
3633 .set_link_ksettings = ixgbe_set_link_ksettings,
3634};
3635
3636void ixgbe_set_ethtool_ops(struct net_device *netdev)
3637{
3638 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3639}
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* ethtool support for ixgbe */
30
31#include <linux/interrupt.h>
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/ethtool.h>
38#include <linux/vmalloc.h>
39#include <linux/highmem.h>
40#include <linux/uaccess.h>
41
42#include "ixgbe.h"
43#include "ixgbe_phy.h"
44
45
46#define IXGBE_ALL_RAR_ENTRIES 16
47
48enum {NETDEV_STATS, IXGBE_STATS};
49
50struct ixgbe_stats {
51 char stat_string[ETH_GSTRING_LEN];
52 int type;
53 int sizeof_stat;
54 int stat_offset;
55};
56
57#define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
63
64static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
70 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
71 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
72 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73 {"lsc_int", IXGBE_STAT(lsc_int)},
74 {"tx_busy", IXGBE_STAT(tx_busy)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast)},
81 {"broadcast", IXGBE_STAT(stats.bprc)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
90 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
100 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
101 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
102 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
103 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
104 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
105 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
106 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
107 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
108 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
109 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
110 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
111 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
112 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
113 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
114#ifdef IXGBE_FCOE
115 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
116 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
117 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
118 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
119 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
120 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
121 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
122 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
123#endif /* IXGBE_FCOE */
124};
125
126/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
127 * we set the num_rx_queues to evaluate to num_tx_queues. This is
128 * used because we do not have a good way to get the max number of
129 * rx queues with CONFIG_RPS disabled.
130 */
131#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
132
133#define IXGBE_QUEUE_STATS_LEN ( \
134 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
135 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
136#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
137#define IXGBE_PB_STATS_LEN ( \
138 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
140 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
141 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
142 / sizeof(u64))
143#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 IXGBE_PB_STATS_LEN + \
145 IXGBE_QUEUE_STATS_LEN)
146
147static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
148 "Register test (offline)", "Eeprom test (offline)",
149 "Interrupt test (offline)", "Loopback test (offline)",
150 "Link test (on/offline)"
151};
152#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
153
154/* currently supported speeds for 10G */
155#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
156 SUPPORTED_10000baseKX4_Full | \
157 SUPPORTED_10000baseKR_Full)
158
159#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
160
161static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
162{
163 if (!ixgbe_isbackplane(hw->phy.media_type))
164 return SUPPORTED_10000baseT_Full;
165
166 switch (hw->device_id) {
167 case IXGBE_DEV_ID_82598:
168 case IXGBE_DEV_ID_82599_KX4:
169 case IXGBE_DEV_ID_82599_KX4_MEZZ:
170 case IXGBE_DEV_ID_X550EM_X_KX4:
171 return SUPPORTED_10000baseKX4_Full;
172 case IXGBE_DEV_ID_82598_BX:
173 case IXGBE_DEV_ID_82599_KR:
174 case IXGBE_DEV_ID_X550EM_X_KR:
175 return SUPPORTED_10000baseKR_Full;
176 default:
177 return SUPPORTED_10000baseKX4_Full |
178 SUPPORTED_10000baseKR_Full;
179 }
180}
181
182static int ixgbe_get_settings(struct net_device *netdev,
183 struct ethtool_cmd *ecmd)
184{
185 struct ixgbe_adapter *adapter = netdev_priv(netdev);
186 struct ixgbe_hw *hw = &adapter->hw;
187 ixgbe_link_speed supported_link;
188 bool autoneg = false;
189
190 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
191
192 /* set the supported link speeds */
193 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
194 ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
195 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
196 ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
197 SUPPORTED_1000baseKX_Full :
198 SUPPORTED_1000baseT_Full;
199 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
200 ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
201 SUPPORTED_1000baseKX_Full :
202 SUPPORTED_1000baseT_Full;
203
204 /* default advertised speed if phy.autoneg_advertised isn't set */
205 ecmd->advertising = ecmd->supported;
206 /* set the advertised speeds */
207 if (hw->phy.autoneg_advertised) {
208 ecmd->advertising = 0;
209 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
210 ecmd->advertising |= ADVERTISED_100baseT_Full;
211 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
212 ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
213 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
214 if (ecmd->supported & SUPPORTED_1000baseKX_Full)
215 ecmd->advertising |= ADVERTISED_1000baseKX_Full;
216 else
217 ecmd->advertising |= ADVERTISED_1000baseT_Full;
218 }
219 } else {
220 if (hw->phy.multispeed_fiber && !autoneg) {
221 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
222 ecmd->advertising = ADVERTISED_10000baseT_Full;
223 }
224 }
225
226 if (autoneg) {
227 ecmd->supported |= SUPPORTED_Autoneg;
228 ecmd->advertising |= ADVERTISED_Autoneg;
229 ecmd->autoneg = AUTONEG_ENABLE;
230 } else
231 ecmd->autoneg = AUTONEG_DISABLE;
232
233 ecmd->transceiver = XCVR_EXTERNAL;
234
235 /* Determine the remaining settings based on the PHY type. */
236 switch (adapter->hw.phy.type) {
237 case ixgbe_phy_tn:
238 case ixgbe_phy_aq:
239 case ixgbe_phy_x550em_ext_t:
240 case ixgbe_phy_cu_unknown:
241 ecmd->supported |= SUPPORTED_TP;
242 ecmd->advertising |= ADVERTISED_TP;
243 ecmd->port = PORT_TP;
244 break;
245 case ixgbe_phy_qt:
246 ecmd->supported |= SUPPORTED_FIBRE;
247 ecmd->advertising |= ADVERTISED_FIBRE;
248 ecmd->port = PORT_FIBRE;
249 break;
250 case ixgbe_phy_nl:
251 case ixgbe_phy_sfp_passive_tyco:
252 case ixgbe_phy_sfp_passive_unknown:
253 case ixgbe_phy_sfp_ftl:
254 case ixgbe_phy_sfp_avago:
255 case ixgbe_phy_sfp_intel:
256 case ixgbe_phy_sfp_unknown:
257 case ixgbe_phy_qsfp_passive_unknown:
258 case ixgbe_phy_qsfp_active_unknown:
259 case ixgbe_phy_qsfp_intel:
260 case ixgbe_phy_qsfp_unknown:
261 /* SFP+ devices, further checking needed */
262 switch (adapter->hw.phy.sfp_type) {
263 case ixgbe_sfp_type_da_cu:
264 case ixgbe_sfp_type_da_cu_core0:
265 case ixgbe_sfp_type_da_cu_core1:
266 ecmd->supported |= SUPPORTED_FIBRE;
267 ecmd->advertising |= ADVERTISED_FIBRE;
268 ecmd->port = PORT_DA;
269 break;
270 case ixgbe_sfp_type_sr:
271 case ixgbe_sfp_type_lr:
272 case ixgbe_sfp_type_srlr_core0:
273 case ixgbe_sfp_type_srlr_core1:
274 case ixgbe_sfp_type_1g_sx_core0:
275 case ixgbe_sfp_type_1g_sx_core1:
276 case ixgbe_sfp_type_1g_lx_core0:
277 case ixgbe_sfp_type_1g_lx_core1:
278 ecmd->supported |= SUPPORTED_FIBRE;
279 ecmd->advertising |= ADVERTISED_FIBRE;
280 ecmd->port = PORT_FIBRE;
281 break;
282 case ixgbe_sfp_type_not_present:
283 ecmd->supported |= SUPPORTED_FIBRE;
284 ecmd->advertising |= ADVERTISED_FIBRE;
285 ecmd->port = PORT_NONE;
286 break;
287 case ixgbe_sfp_type_1g_cu_core0:
288 case ixgbe_sfp_type_1g_cu_core1:
289 ecmd->supported |= SUPPORTED_TP;
290 ecmd->advertising |= ADVERTISED_TP;
291 ecmd->port = PORT_TP;
292 break;
293 case ixgbe_sfp_type_unknown:
294 default:
295 ecmd->supported |= SUPPORTED_FIBRE;
296 ecmd->advertising |= ADVERTISED_FIBRE;
297 ecmd->port = PORT_OTHER;
298 break;
299 }
300 break;
301 case ixgbe_phy_xaui:
302 ecmd->supported |= SUPPORTED_FIBRE;
303 ecmd->advertising |= ADVERTISED_FIBRE;
304 ecmd->port = PORT_NONE;
305 break;
306 case ixgbe_phy_unknown:
307 case ixgbe_phy_generic:
308 case ixgbe_phy_sfp_unsupported:
309 default:
310 ecmd->supported |= SUPPORTED_FIBRE;
311 ecmd->advertising |= ADVERTISED_FIBRE;
312 ecmd->port = PORT_OTHER;
313 break;
314 }
315
316 /* Indicate pause support */
317 ecmd->supported |= SUPPORTED_Pause;
318
319 switch (hw->fc.requested_mode) {
320 case ixgbe_fc_full:
321 ecmd->advertising |= ADVERTISED_Pause;
322 break;
323 case ixgbe_fc_rx_pause:
324 ecmd->advertising |= ADVERTISED_Pause |
325 ADVERTISED_Asym_Pause;
326 break;
327 case ixgbe_fc_tx_pause:
328 ecmd->advertising |= ADVERTISED_Asym_Pause;
329 break;
330 default:
331 ecmd->advertising &= ~(ADVERTISED_Pause |
332 ADVERTISED_Asym_Pause);
333 }
334
335 if (netif_carrier_ok(netdev)) {
336 switch (adapter->link_speed) {
337 case IXGBE_LINK_SPEED_10GB_FULL:
338 ethtool_cmd_speed_set(ecmd, SPEED_10000);
339 break;
340 case IXGBE_LINK_SPEED_2_5GB_FULL:
341 ethtool_cmd_speed_set(ecmd, SPEED_2500);
342 break;
343 case IXGBE_LINK_SPEED_1GB_FULL:
344 ethtool_cmd_speed_set(ecmd, SPEED_1000);
345 break;
346 case IXGBE_LINK_SPEED_100_FULL:
347 ethtool_cmd_speed_set(ecmd, SPEED_100);
348 break;
349 default:
350 break;
351 }
352 ecmd->duplex = DUPLEX_FULL;
353 } else {
354 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
355 ecmd->duplex = DUPLEX_UNKNOWN;
356 }
357
358 return 0;
359}
360
361static int ixgbe_set_settings(struct net_device *netdev,
362 struct ethtool_cmd *ecmd)
363{
364 struct ixgbe_adapter *adapter = netdev_priv(netdev);
365 struct ixgbe_hw *hw = &adapter->hw;
366 u32 advertised, old;
367 s32 err = 0;
368
369 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
370 (hw->phy.multispeed_fiber)) {
371 /*
372 * this function does not support duplex forcing, but can
373 * limit the advertising of the adapter to the specified speed
374 */
375 if (ecmd->advertising & ~ecmd->supported)
376 return -EINVAL;
377
378 /* only allow one speed at a time if no autoneg */
379 if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
380 if (ecmd->advertising ==
381 (ADVERTISED_10000baseT_Full |
382 ADVERTISED_1000baseT_Full))
383 return -EINVAL;
384 }
385
386 old = hw->phy.autoneg_advertised;
387 advertised = 0;
388 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
389 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
390
391 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
392 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
393
394 if (ecmd->advertising & ADVERTISED_100baseT_Full)
395 advertised |= IXGBE_LINK_SPEED_100_FULL;
396
397 if (old == advertised)
398 return err;
399 /* this sets the link speed and restarts auto-neg */
400 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
401 usleep_range(1000, 2000);
402
403 hw->mac.autotry_restart = true;
404 err = hw->mac.ops.setup_link(hw, advertised, true);
405 if (err) {
406 e_info(probe, "setup link failed with code %d\n", err);
407 hw->mac.ops.setup_link(hw, old, true);
408 }
409 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
410 } else {
411 /* in this case we currently only support 10Gb/FULL */
412 u32 speed = ethtool_cmd_speed(ecmd);
413 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
414 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
415 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
416 return -EINVAL;
417 }
418
419 return err;
420}
421
422static void ixgbe_get_pauseparam(struct net_device *netdev,
423 struct ethtool_pauseparam *pause)
424{
425 struct ixgbe_adapter *adapter = netdev_priv(netdev);
426 struct ixgbe_hw *hw = &adapter->hw;
427
428 if (ixgbe_device_supports_autoneg_fc(hw) &&
429 !hw->fc.disable_fc_autoneg)
430 pause->autoneg = 1;
431 else
432 pause->autoneg = 0;
433
434 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
435 pause->rx_pause = 1;
436 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
437 pause->tx_pause = 1;
438 } else if (hw->fc.current_mode == ixgbe_fc_full) {
439 pause->rx_pause = 1;
440 pause->tx_pause = 1;
441 }
442}
443
444static int ixgbe_set_pauseparam(struct net_device *netdev,
445 struct ethtool_pauseparam *pause)
446{
447 struct ixgbe_adapter *adapter = netdev_priv(netdev);
448 struct ixgbe_hw *hw = &adapter->hw;
449 struct ixgbe_fc_info fc = hw->fc;
450
451 /* 82598 does no support link flow control with DCB enabled */
452 if ((hw->mac.type == ixgbe_mac_82598EB) &&
453 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
454 return -EINVAL;
455
456 /* some devices do not support autoneg of link flow control */
457 if ((pause->autoneg == AUTONEG_ENABLE) &&
458 !ixgbe_device_supports_autoneg_fc(hw))
459 return -EINVAL;
460
461 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
462
463 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
464 fc.requested_mode = ixgbe_fc_full;
465 else if (pause->rx_pause && !pause->tx_pause)
466 fc.requested_mode = ixgbe_fc_rx_pause;
467 else if (!pause->rx_pause && pause->tx_pause)
468 fc.requested_mode = ixgbe_fc_tx_pause;
469 else
470 fc.requested_mode = ixgbe_fc_none;
471
472 /* if the thing changed then we'll update and use new autoneg */
473 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
474 hw->fc = fc;
475 if (netif_running(netdev))
476 ixgbe_reinit_locked(adapter);
477 else
478 ixgbe_reset(adapter);
479 }
480
481 return 0;
482}
483
484static u32 ixgbe_get_msglevel(struct net_device *netdev)
485{
486 struct ixgbe_adapter *adapter = netdev_priv(netdev);
487 return adapter->msg_enable;
488}
489
490static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
491{
492 struct ixgbe_adapter *adapter = netdev_priv(netdev);
493 adapter->msg_enable = data;
494}
495
496static int ixgbe_get_regs_len(struct net_device *netdev)
497{
498#define IXGBE_REGS_LEN 1139
499 return IXGBE_REGS_LEN * sizeof(u32);
500}
501
502#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
503
504static void ixgbe_get_regs(struct net_device *netdev,
505 struct ethtool_regs *regs, void *p)
506{
507 struct ixgbe_adapter *adapter = netdev_priv(netdev);
508 struct ixgbe_hw *hw = &adapter->hw;
509 u32 *regs_buff = p;
510 u8 i;
511
512 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
513
514 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
515 hw->device_id;
516
517 /* General Registers */
518 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
519 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
520 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
521 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
522 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
523 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
524 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
525 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
526
527 /* NVM Register */
528 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
529 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
530 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
531 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
532 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
533 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
534 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
535 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
536 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
537 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
538
539 /* Interrupt */
540 /* don't read EICR because it can clear interrupt causes, instead
541 * read EICS which is a shadow but doesn't clear EICR */
542 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
543 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
544 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
545 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
546 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
547 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
548 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
549 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
550 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
551 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
552 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
553 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
554
555 /* Flow Control */
556 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
557 for (i = 0; i < 4; i++)
558 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
559 for (i = 0; i < 8; i++) {
560 switch (hw->mac.type) {
561 case ixgbe_mac_82598EB:
562 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
563 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
564 break;
565 case ixgbe_mac_82599EB:
566 case ixgbe_mac_X540:
567 case ixgbe_mac_X550:
568 case ixgbe_mac_X550EM_x:
569 case ixgbe_mac_x550em_a:
570 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
571 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
572 break;
573 default:
574 break;
575 }
576 }
577 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
578 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
579
580 /* Receive DMA */
581 for (i = 0; i < 64; i++)
582 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
583 for (i = 0; i < 64; i++)
584 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
585 for (i = 0; i < 64; i++)
586 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
587 for (i = 0; i < 64; i++)
588 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
589 for (i = 0; i < 64; i++)
590 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
591 for (i = 0; i < 64; i++)
592 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
593 for (i = 0; i < 16; i++)
594 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
595 for (i = 0; i < 16; i++)
596 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
597 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
598 for (i = 0; i < 8; i++)
599 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
600 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
601 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
602
603 /* Receive */
604 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
605 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
606 for (i = 0; i < 16; i++)
607 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
608 for (i = 0; i < 16; i++)
609 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
610 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
611 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
612 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
613 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
614 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
615 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
616 for (i = 0; i < 8; i++)
617 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
618 for (i = 0; i < 8; i++)
619 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
620 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
621
622 /* Transmit */
623 for (i = 0; i < 32; i++)
624 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
625 for (i = 0; i < 32; i++)
626 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
627 for (i = 0; i < 32; i++)
628 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
629 for (i = 0; i < 32; i++)
630 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
631 for (i = 0; i < 32; i++)
632 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
633 for (i = 0; i < 32; i++)
634 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
635 for (i = 0; i < 32; i++)
636 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
637 for (i = 0; i < 32; i++)
638 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
639 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
640 for (i = 0; i < 16; i++)
641 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
642 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
643 for (i = 0; i < 8; i++)
644 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
645 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
646
647 /* Wake Up */
648 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
649 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
650 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
651 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
652 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
653 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
654 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
655 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
656 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
657
658 /* DCB */
659 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
660 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
661
662 switch (hw->mac.type) {
663 case ixgbe_mac_82598EB:
664 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
665 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
666 for (i = 0; i < 8; i++)
667 regs_buff[833 + i] =
668 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
669 for (i = 0; i < 8; i++)
670 regs_buff[841 + i] =
671 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
672 for (i = 0; i < 8; i++)
673 regs_buff[849 + i] =
674 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
675 for (i = 0; i < 8; i++)
676 regs_buff[857 + i] =
677 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
678 break;
679 case ixgbe_mac_82599EB:
680 case ixgbe_mac_X540:
681 case ixgbe_mac_X550:
682 case ixgbe_mac_X550EM_x:
683 case ixgbe_mac_x550em_a:
684 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
685 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
686 for (i = 0; i < 8; i++)
687 regs_buff[833 + i] =
688 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
689 for (i = 0; i < 8; i++)
690 regs_buff[841 + i] =
691 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
692 for (i = 0; i < 8; i++)
693 regs_buff[849 + i] =
694 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
695 for (i = 0; i < 8; i++)
696 regs_buff[857 + i] =
697 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
698 break;
699 default:
700 break;
701 }
702
703 for (i = 0; i < 8; i++)
704 regs_buff[865 + i] =
705 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
706 for (i = 0; i < 8; i++)
707 regs_buff[873 + i] =
708 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
709
710 /* Statistics */
711 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
712 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
713 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
714 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
715 for (i = 0; i < 8; i++)
716 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
717 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
718 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
719 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
720 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
721 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
722 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
723 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
724 for (i = 0; i < 8; i++)
725 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
726 for (i = 0; i < 8; i++)
727 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
728 for (i = 0; i < 8; i++)
729 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
730 for (i = 0; i < 8; i++)
731 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
732 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
733 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
734 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
735 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
736 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
737 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
738 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
739 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
740 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
741 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
742 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
743 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
744 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
745 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
746 for (i = 0; i < 8; i++)
747 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
748 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
749 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
750 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
751 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
752 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
753 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
754 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
755 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
756 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
757 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
758 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
759 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
760 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
761 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
762 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
763 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
764 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
765 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
766 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
767 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
768 for (i = 0; i < 16; i++)
769 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
770 for (i = 0; i < 16; i++)
771 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
772 for (i = 0; i < 16; i++)
773 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
774 for (i = 0; i < 16; i++)
775 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
776
777 /* MAC */
778 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
779 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
780 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
781 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
782 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
783 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
784 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
785 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
786 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
787 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
788 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
789 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
790 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
791 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
792 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
793 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
794 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
795 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
796 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
797 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
798 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
799 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
800 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
801 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
802 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
803 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
804 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
805 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
806 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
807 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
808 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
809 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
810 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
811
812 /* Diagnostic */
813 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
814 for (i = 0; i < 8; i++)
815 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
816 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
817 for (i = 0; i < 4; i++)
818 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
819 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
820 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
821 for (i = 0; i < 8; i++)
822 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
823 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
824 for (i = 0; i < 4; i++)
825 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
826 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
827 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
828 for (i = 0; i < 4; i++)
829 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
830 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
831 for (i = 0; i < 4; i++)
832 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
833 for (i = 0; i < 8; i++)
834 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
835 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
836 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
837 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
838 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
839 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
840 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
841 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
842 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
843 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
844
845 /* 82599 X540 specific registers */
846 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
847
848 /* 82599 X540 specific DCB registers */
849 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
850 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
851 for (i = 0; i < 4; i++)
852 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
853 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
854 /* same as RTTQCNRM */
855 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
856 /* same as RTTQCNRR */
857
858 /* X540 specific DCB registers */
859 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
860 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
861}
862
863static int ixgbe_get_eeprom_len(struct net_device *netdev)
864{
865 struct ixgbe_adapter *adapter = netdev_priv(netdev);
866 return adapter->hw.eeprom.word_size * 2;
867}
868
869static int ixgbe_get_eeprom(struct net_device *netdev,
870 struct ethtool_eeprom *eeprom, u8 *bytes)
871{
872 struct ixgbe_adapter *adapter = netdev_priv(netdev);
873 struct ixgbe_hw *hw = &adapter->hw;
874 u16 *eeprom_buff;
875 int first_word, last_word, eeprom_len;
876 int ret_val = 0;
877 u16 i;
878
879 if (eeprom->len == 0)
880 return -EINVAL;
881
882 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
883
884 first_word = eeprom->offset >> 1;
885 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
886 eeprom_len = last_word - first_word + 1;
887
888 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
889 if (!eeprom_buff)
890 return -ENOMEM;
891
892 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
893 eeprom_buff);
894
895 /* Device's eeprom is always little-endian, word addressable */
896 for (i = 0; i < eeprom_len; i++)
897 le16_to_cpus(&eeprom_buff[i]);
898
899 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
900 kfree(eeprom_buff);
901
902 return ret_val;
903}
904
905static int ixgbe_set_eeprom(struct net_device *netdev,
906 struct ethtool_eeprom *eeprom, u8 *bytes)
907{
908 struct ixgbe_adapter *adapter = netdev_priv(netdev);
909 struct ixgbe_hw *hw = &adapter->hw;
910 u16 *eeprom_buff;
911 void *ptr;
912 int max_len, first_word, last_word, ret_val = 0;
913 u16 i;
914
915 if (eeprom->len == 0)
916 return -EINVAL;
917
918 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
919 return -EINVAL;
920
921 max_len = hw->eeprom.word_size * 2;
922
923 first_word = eeprom->offset >> 1;
924 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
925 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
926 if (!eeprom_buff)
927 return -ENOMEM;
928
929 ptr = eeprom_buff;
930
931 if (eeprom->offset & 1) {
932 /*
933 * need read/modify/write of first changed EEPROM word
934 * only the second byte of the word is being modified
935 */
936 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
937 if (ret_val)
938 goto err;
939
940 ptr++;
941 }
942 if ((eeprom->offset + eeprom->len) & 1) {
943 /*
944 * need read/modify/write of last changed EEPROM word
945 * only the first byte of the word is being modified
946 */
947 ret_val = hw->eeprom.ops.read(hw, last_word,
948 &eeprom_buff[last_word - first_word]);
949 if (ret_val)
950 goto err;
951 }
952
953 /* Device's eeprom is always little-endian, word addressable */
954 for (i = 0; i < last_word - first_word + 1; i++)
955 le16_to_cpus(&eeprom_buff[i]);
956
957 memcpy(ptr, bytes, eeprom->len);
958
959 for (i = 0; i < last_word - first_word + 1; i++)
960 cpu_to_le16s(&eeprom_buff[i]);
961
962 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
963 last_word - first_word + 1,
964 eeprom_buff);
965
966 /* Update the checksum */
967 if (ret_val == 0)
968 hw->eeprom.ops.update_checksum(hw);
969
970err:
971 kfree(eeprom_buff);
972 return ret_val;
973}
974
975static void ixgbe_get_drvinfo(struct net_device *netdev,
976 struct ethtool_drvinfo *drvinfo)
977{
978 struct ixgbe_adapter *adapter = netdev_priv(netdev);
979 u32 nvm_track_id;
980
981 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
982 strlcpy(drvinfo->version, ixgbe_driver_version,
983 sizeof(drvinfo->version));
984
985 nvm_track_id = (adapter->eeprom_verh << 16) |
986 adapter->eeprom_verl;
987 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
988 nvm_track_id);
989
990 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
991 sizeof(drvinfo->bus_info));
992}
993
994static void ixgbe_get_ringparam(struct net_device *netdev,
995 struct ethtool_ringparam *ring)
996{
997 struct ixgbe_adapter *adapter = netdev_priv(netdev);
998 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
999 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1000
1001 ring->rx_max_pending = IXGBE_MAX_RXD;
1002 ring->tx_max_pending = IXGBE_MAX_TXD;
1003 ring->rx_pending = rx_ring->count;
1004 ring->tx_pending = tx_ring->count;
1005}
1006
1007static int ixgbe_set_ringparam(struct net_device *netdev,
1008 struct ethtool_ringparam *ring)
1009{
1010 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1011 struct ixgbe_ring *temp_ring;
1012 int i, err = 0;
1013 u32 new_rx_count, new_tx_count;
1014
1015 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1016 return -EINVAL;
1017
1018 new_tx_count = clamp_t(u32, ring->tx_pending,
1019 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1020 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1021
1022 new_rx_count = clamp_t(u32, ring->rx_pending,
1023 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1024 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1025
1026 if ((new_tx_count == adapter->tx_ring_count) &&
1027 (new_rx_count == adapter->rx_ring_count)) {
1028 /* nothing to do */
1029 return 0;
1030 }
1031
1032 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1033 usleep_range(1000, 2000);
1034
1035 if (!netif_running(adapter->netdev)) {
1036 for (i = 0; i < adapter->num_tx_queues; i++)
1037 adapter->tx_ring[i]->count = new_tx_count;
1038 for (i = 0; i < adapter->num_rx_queues; i++)
1039 adapter->rx_ring[i]->count = new_rx_count;
1040 adapter->tx_ring_count = new_tx_count;
1041 adapter->rx_ring_count = new_rx_count;
1042 goto clear_reset;
1043 }
1044
1045 /* allocate temporary buffer to store rings in */
1046 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
1047 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1048
1049 if (!temp_ring) {
1050 err = -ENOMEM;
1051 goto clear_reset;
1052 }
1053
1054 ixgbe_down(adapter);
1055
1056 /*
1057 * Setup new Tx resources and free the old Tx resources in that order.
1058 * We can then assign the new resources to the rings via a memcpy.
1059 * The advantage to this approach is that we are guaranteed to still
1060 * have resources even in the case of an allocation failure.
1061 */
1062 if (new_tx_count != adapter->tx_ring_count) {
1063 for (i = 0; i < adapter->num_tx_queues; i++) {
1064 memcpy(&temp_ring[i], adapter->tx_ring[i],
1065 sizeof(struct ixgbe_ring));
1066
1067 temp_ring[i].count = new_tx_count;
1068 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1069 if (err) {
1070 while (i) {
1071 i--;
1072 ixgbe_free_tx_resources(&temp_ring[i]);
1073 }
1074 goto err_setup;
1075 }
1076 }
1077
1078 for (i = 0; i < adapter->num_tx_queues; i++) {
1079 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1080
1081 memcpy(adapter->tx_ring[i], &temp_ring[i],
1082 sizeof(struct ixgbe_ring));
1083 }
1084
1085 adapter->tx_ring_count = new_tx_count;
1086 }
1087
1088 /* Repeat the process for the Rx rings if needed */
1089 if (new_rx_count != adapter->rx_ring_count) {
1090 for (i = 0; i < adapter->num_rx_queues; i++) {
1091 memcpy(&temp_ring[i], adapter->rx_ring[i],
1092 sizeof(struct ixgbe_ring));
1093
1094 temp_ring[i].count = new_rx_count;
1095 err = ixgbe_setup_rx_resources(&temp_ring[i]);
1096 if (err) {
1097 while (i) {
1098 i--;
1099 ixgbe_free_rx_resources(&temp_ring[i]);
1100 }
1101 goto err_setup;
1102 }
1103
1104 }
1105
1106 for (i = 0; i < adapter->num_rx_queues; i++) {
1107 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1108
1109 memcpy(adapter->rx_ring[i], &temp_ring[i],
1110 sizeof(struct ixgbe_ring));
1111 }
1112
1113 adapter->rx_ring_count = new_rx_count;
1114 }
1115
1116err_setup:
1117 ixgbe_up(adapter);
1118 vfree(temp_ring);
1119clear_reset:
1120 clear_bit(__IXGBE_RESETTING, &adapter->state);
1121 return err;
1122}
1123
1124static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1125{
1126 switch (sset) {
1127 case ETH_SS_TEST:
1128 return IXGBE_TEST_LEN;
1129 case ETH_SS_STATS:
1130 return IXGBE_STATS_LEN;
1131 default:
1132 return -EOPNOTSUPP;
1133 }
1134}
1135
1136static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1137 struct ethtool_stats *stats, u64 *data)
1138{
1139 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1140 struct rtnl_link_stats64 temp;
1141 const struct rtnl_link_stats64 *net_stats;
1142 unsigned int start;
1143 struct ixgbe_ring *ring;
1144 int i, j;
1145 char *p = NULL;
1146
1147 ixgbe_update_stats(adapter);
1148 net_stats = dev_get_stats(netdev, &temp);
1149 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1150 switch (ixgbe_gstrings_stats[i].type) {
1151 case NETDEV_STATS:
1152 p = (char *) net_stats +
1153 ixgbe_gstrings_stats[i].stat_offset;
1154 break;
1155 case IXGBE_STATS:
1156 p = (char *) adapter +
1157 ixgbe_gstrings_stats[i].stat_offset;
1158 break;
1159 default:
1160 data[i] = 0;
1161 continue;
1162 }
1163
1164 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1165 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1166 }
1167 for (j = 0; j < netdev->num_tx_queues; j++) {
1168 ring = adapter->tx_ring[j];
1169 if (!ring) {
1170 data[i] = 0;
1171 data[i+1] = 0;
1172 i += 2;
1173#ifdef BP_EXTENDED_STATS
1174 data[i] = 0;
1175 data[i+1] = 0;
1176 data[i+2] = 0;
1177 i += 3;
1178#endif
1179 continue;
1180 }
1181
1182 do {
1183 start = u64_stats_fetch_begin_irq(&ring->syncp);
1184 data[i] = ring->stats.packets;
1185 data[i+1] = ring->stats.bytes;
1186 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1187 i += 2;
1188#ifdef BP_EXTENDED_STATS
1189 data[i] = ring->stats.yields;
1190 data[i+1] = ring->stats.misses;
1191 data[i+2] = ring->stats.cleaned;
1192 i += 3;
1193#endif
1194 }
1195 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1196 ring = adapter->rx_ring[j];
1197 if (!ring) {
1198 data[i] = 0;
1199 data[i+1] = 0;
1200 i += 2;
1201#ifdef BP_EXTENDED_STATS
1202 data[i] = 0;
1203 data[i+1] = 0;
1204 data[i+2] = 0;
1205 i += 3;
1206#endif
1207 continue;
1208 }
1209
1210 do {
1211 start = u64_stats_fetch_begin_irq(&ring->syncp);
1212 data[i] = ring->stats.packets;
1213 data[i+1] = ring->stats.bytes;
1214 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1215 i += 2;
1216#ifdef BP_EXTENDED_STATS
1217 data[i] = ring->stats.yields;
1218 data[i+1] = ring->stats.misses;
1219 data[i+2] = ring->stats.cleaned;
1220 i += 3;
1221#endif
1222 }
1223
1224 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1225 data[i++] = adapter->stats.pxontxc[j];
1226 data[i++] = adapter->stats.pxofftxc[j];
1227 }
1228 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1229 data[i++] = adapter->stats.pxonrxc[j];
1230 data[i++] = adapter->stats.pxoffrxc[j];
1231 }
1232}
1233
1234static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1235 u8 *data)
1236{
1237 char *p = (char *)data;
1238 int i;
1239
1240 switch (stringset) {
1241 case ETH_SS_TEST:
1242 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1243 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1244 data += ETH_GSTRING_LEN;
1245 }
1246 break;
1247 case ETH_SS_STATS:
1248 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1249 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1250 ETH_GSTRING_LEN);
1251 p += ETH_GSTRING_LEN;
1252 }
1253 for (i = 0; i < netdev->num_tx_queues; i++) {
1254 sprintf(p, "tx_queue_%u_packets", i);
1255 p += ETH_GSTRING_LEN;
1256 sprintf(p, "tx_queue_%u_bytes", i);
1257 p += ETH_GSTRING_LEN;
1258#ifdef BP_EXTENDED_STATS
1259 sprintf(p, "tx_queue_%u_bp_napi_yield", i);
1260 p += ETH_GSTRING_LEN;
1261 sprintf(p, "tx_queue_%u_bp_misses", i);
1262 p += ETH_GSTRING_LEN;
1263 sprintf(p, "tx_queue_%u_bp_cleaned", i);
1264 p += ETH_GSTRING_LEN;
1265#endif /* BP_EXTENDED_STATS */
1266 }
1267 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1268 sprintf(p, "rx_queue_%u_packets", i);
1269 p += ETH_GSTRING_LEN;
1270 sprintf(p, "rx_queue_%u_bytes", i);
1271 p += ETH_GSTRING_LEN;
1272#ifdef BP_EXTENDED_STATS
1273 sprintf(p, "rx_queue_%u_bp_poll_yield", i);
1274 p += ETH_GSTRING_LEN;
1275 sprintf(p, "rx_queue_%u_bp_misses", i);
1276 p += ETH_GSTRING_LEN;
1277 sprintf(p, "rx_queue_%u_bp_cleaned", i);
1278 p += ETH_GSTRING_LEN;
1279#endif /* BP_EXTENDED_STATS */
1280 }
1281 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1282 sprintf(p, "tx_pb_%u_pxon", i);
1283 p += ETH_GSTRING_LEN;
1284 sprintf(p, "tx_pb_%u_pxoff", i);
1285 p += ETH_GSTRING_LEN;
1286 }
1287 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1288 sprintf(p, "rx_pb_%u_pxon", i);
1289 p += ETH_GSTRING_LEN;
1290 sprintf(p, "rx_pb_%u_pxoff", i);
1291 p += ETH_GSTRING_LEN;
1292 }
1293 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1294 break;
1295 }
1296}
1297
1298static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1299{
1300 struct ixgbe_hw *hw = &adapter->hw;
1301 bool link_up;
1302 u32 link_speed = 0;
1303
1304 if (ixgbe_removed(hw->hw_addr)) {
1305 *data = 1;
1306 return 1;
1307 }
1308 *data = 0;
1309
1310 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1311 if (link_up)
1312 return *data;
1313 else
1314 *data = 1;
1315 return *data;
1316}
1317
1318/* ethtool register test data */
1319struct ixgbe_reg_test {
1320 u16 reg;
1321 u8 array_len;
1322 u8 test_type;
1323 u32 mask;
1324 u32 write;
1325};
1326
1327/* In the hardware, registers are laid out either singly, in arrays
1328 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1329 * most tests take place on arrays or single registers (handled
1330 * as a single-element array) and special-case the tables.
1331 * Table tests are always pattern tests.
1332 *
1333 * We also make provision for some required setup steps by specifying
1334 * registers to be written without any read-back testing.
1335 */
1336
1337#define PATTERN_TEST 1
1338#define SET_READ_TEST 2
1339#define WRITE_NO_TEST 3
1340#define TABLE32_TEST 4
1341#define TABLE64_TEST_LO 5
1342#define TABLE64_TEST_HI 6
1343
1344/* default 82599 register test */
1345static const struct ixgbe_reg_test reg_test_82599[] = {
1346 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1347 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1348 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1349 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1350 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1351 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1352 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1353 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1354 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1355 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1356 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1357 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1358 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1359 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1360 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1361 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1362 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1363 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1364 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1365 { .reg = 0 }
1366};
1367
1368/* default 82598 register test */
1369static const struct ixgbe_reg_test reg_test_82598[] = {
1370 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1371 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1372 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1373 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1374 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1375 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1376 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1377 /* Enable all four RX queues before testing. */
1378 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1379 /* RDH is read-only for 82598, only test RDT. */
1380 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1381 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1382 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1383 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1384 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1385 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1386 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1387 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1388 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1389 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1390 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1391 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1392 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1393 { .reg = 0 }
1394};
1395
1396static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1397 u32 mask, u32 write)
1398{
1399 u32 pat, val, before;
1400 static const u32 test_pattern[] = {
1401 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1402
1403 if (ixgbe_removed(adapter->hw.hw_addr)) {
1404 *data = 1;
1405 return true;
1406 }
1407 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1408 before = ixgbe_read_reg(&adapter->hw, reg);
1409 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1410 val = ixgbe_read_reg(&adapter->hw, reg);
1411 if (val != (test_pattern[pat] & write & mask)) {
1412 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1413 reg, val, (test_pattern[pat] & write & mask));
1414 *data = reg;
1415 ixgbe_write_reg(&adapter->hw, reg, before);
1416 return true;
1417 }
1418 ixgbe_write_reg(&adapter->hw, reg, before);
1419 }
1420 return false;
1421}
1422
1423static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1424 u32 mask, u32 write)
1425{
1426 u32 val, before;
1427
1428 if (ixgbe_removed(adapter->hw.hw_addr)) {
1429 *data = 1;
1430 return true;
1431 }
1432 before = ixgbe_read_reg(&adapter->hw, reg);
1433 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1434 val = ixgbe_read_reg(&adapter->hw, reg);
1435 if ((write & mask) != (val & mask)) {
1436 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1437 reg, (val & mask), (write & mask));
1438 *data = reg;
1439 ixgbe_write_reg(&adapter->hw, reg, before);
1440 return true;
1441 }
1442 ixgbe_write_reg(&adapter->hw, reg, before);
1443 return false;
1444}
1445
1446static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1447{
1448 const struct ixgbe_reg_test *test;
1449 u32 value, before, after;
1450 u32 i, toggle;
1451
1452 if (ixgbe_removed(adapter->hw.hw_addr)) {
1453 e_err(drv, "Adapter removed - register test blocked\n");
1454 *data = 1;
1455 return 1;
1456 }
1457 switch (adapter->hw.mac.type) {
1458 case ixgbe_mac_82598EB:
1459 toggle = 0x7FFFF3FF;
1460 test = reg_test_82598;
1461 break;
1462 case ixgbe_mac_82599EB:
1463 case ixgbe_mac_X540:
1464 case ixgbe_mac_X550:
1465 case ixgbe_mac_X550EM_x:
1466 case ixgbe_mac_x550em_a:
1467 toggle = 0x7FFFF30F;
1468 test = reg_test_82599;
1469 break;
1470 default:
1471 *data = 1;
1472 return 1;
1473 }
1474
1475 /*
1476 * Because the status register is such a special case,
1477 * we handle it separately from the rest of the register
1478 * tests. Some bits are read-only, some toggle, and some
1479 * are writeable on newer MACs.
1480 */
1481 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1482 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1483 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1484 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1485 if (value != after) {
1486 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1487 after, value);
1488 *data = 1;
1489 return 1;
1490 }
1491 /* restore previous status */
1492 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1493
1494 /*
1495 * Perform the remainder of the register test, looping through
1496 * the test table until we either fail or reach the null entry.
1497 */
1498 while (test->reg) {
1499 for (i = 0; i < test->array_len; i++) {
1500 bool b = false;
1501
1502 switch (test->test_type) {
1503 case PATTERN_TEST:
1504 b = reg_pattern_test(adapter, data,
1505 test->reg + (i * 0x40),
1506 test->mask,
1507 test->write);
1508 break;
1509 case SET_READ_TEST:
1510 b = reg_set_and_check(adapter, data,
1511 test->reg + (i * 0x40),
1512 test->mask,
1513 test->write);
1514 break;
1515 case WRITE_NO_TEST:
1516 ixgbe_write_reg(&adapter->hw,
1517 test->reg + (i * 0x40),
1518 test->write);
1519 break;
1520 case TABLE32_TEST:
1521 b = reg_pattern_test(adapter, data,
1522 test->reg + (i * 4),
1523 test->mask,
1524 test->write);
1525 break;
1526 case TABLE64_TEST_LO:
1527 b = reg_pattern_test(adapter, data,
1528 test->reg + (i * 8),
1529 test->mask,
1530 test->write);
1531 break;
1532 case TABLE64_TEST_HI:
1533 b = reg_pattern_test(adapter, data,
1534 (test->reg + 4) + (i * 8),
1535 test->mask,
1536 test->write);
1537 break;
1538 }
1539 if (b)
1540 return 1;
1541 }
1542 test++;
1543 }
1544
1545 *data = 0;
1546 return 0;
1547}
1548
1549static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1550{
1551 struct ixgbe_hw *hw = &adapter->hw;
1552 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1553 *data = 1;
1554 else
1555 *data = 0;
1556 return *data;
1557}
1558
1559static irqreturn_t ixgbe_test_intr(int irq, void *data)
1560{
1561 struct net_device *netdev = (struct net_device *) data;
1562 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1563
1564 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1565
1566 return IRQ_HANDLED;
1567}
1568
1569static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1570{
1571 struct net_device *netdev = adapter->netdev;
1572 u32 mask, i = 0, shared_int = true;
1573 u32 irq = adapter->pdev->irq;
1574
1575 *data = 0;
1576
1577 /* Hook up test interrupt handler just for this test */
1578 if (adapter->msix_entries) {
1579 /* NOTE: we don't test MSI-X interrupts here, yet */
1580 return 0;
1581 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1582 shared_int = false;
1583 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1584 netdev)) {
1585 *data = 1;
1586 return -1;
1587 }
1588 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1589 netdev->name, netdev)) {
1590 shared_int = false;
1591 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1592 netdev->name, netdev)) {
1593 *data = 1;
1594 return -1;
1595 }
1596 e_info(hw, "testing %s interrupt\n", shared_int ?
1597 "shared" : "unshared");
1598
1599 /* Disable all the interrupts */
1600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1601 IXGBE_WRITE_FLUSH(&adapter->hw);
1602 usleep_range(10000, 20000);
1603
1604 /* Test each interrupt */
1605 for (; i < 10; i++) {
1606 /* Interrupt to test */
1607 mask = BIT(i);
1608
1609 if (!shared_int) {
1610 /*
1611 * Disable the interrupts to be reported in
1612 * the cause register and then force the same
1613 * interrupt and see if one gets posted. If
1614 * an interrupt was posted to the bus, the
1615 * test failed.
1616 */
1617 adapter->test_icr = 0;
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1619 ~mask & 0x00007FFF);
1620 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1621 ~mask & 0x00007FFF);
1622 IXGBE_WRITE_FLUSH(&adapter->hw);
1623 usleep_range(10000, 20000);
1624
1625 if (adapter->test_icr & mask) {
1626 *data = 3;
1627 break;
1628 }
1629 }
1630
1631 /*
1632 * Enable the interrupt to be reported in the cause
1633 * register and then force the same interrupt and see
1634 * if one gets posted. If an interrupt was not posted
1635 * to the bus, the test failed.
1636 */
1637 adapter->test_icr = 0;
1638 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1640 IXGBE_WRITE_FLUSH(&adapter->hw);
1641 usleep_range(10000, 20000);
1642
1643 if (!(adapter->test_icr & mask)) {
1644 *data = 4;
1645 break;
1646 }
1647
1648 if (!shared_int) {
1649 /*
1650 * Disable the other interrupts to be reported in
1651 * the cause register and then force the other
1652 * interrupts and see if any get posted. If
1653 * an interrupt was posted to the bus, the
1654 * test failed.
1655 */
1656 adapter->test_icr = 0;
1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1658 ~mask & 0x00007FFF);
1659 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1660 ~mask & 0x00007FFF);
1661 IXGBE_WRITE_FLUSH(&adapter->hw);
1662 usleep_range(10000, 20000);
1663
1664 if (adapter->test_icr) {
1665 *data = 5;
1666 break;
1667 }
1668 }
1669 }
1670
1671 /* Disable all the interrupts */
1672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1673 IXGBE_WRITE_FLUSH(&adapter->hw);
1674 usleep_range(10000, 20000);
1675
1676 /* Unhook test interrupt handler */
1677 free_irq(irq, netdev);
1678
1679 return *data;
1680}
1681
1682static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1683{
1684 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1685 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1686 struct ixgbe_hw *hw = &adapter->hw;
1687 u32 reg_ctl;
1688
1689 /* shut down the DMA engines now so they can be reinitialized later */
1690
1691 /* first Rx */
1692 hw->mac.ops.disable_rx(hw);
1693 ixgbe_disable_rx_queue(adapter, rx_ring);
1694
1695 /* now Tx */
1696 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1697 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1698 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1699
1700 switch (hw->mac.type) {
1701 case ixgbe_mac_82599EB:
1702 case ixgbe_mac_X540:
1703 case ixgbe_mac_X550:
1704 case ixgbe_mac_X550EM_x:
1705 case ixgbe_mac_x550em_a:
1706 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1707 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1708 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1709 break;
1710 default:
1711 break;
1712 }
1713
1714 ixgbe_reset(adapter);
1715
1716 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1717 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1718}
1719
1720static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1721{
1722 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1723 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1724 struct ixgbe_hw *hw = &adapter->hw;
1725 u32 rctl, reg_data;
1726 int ret_val;
1727 int err;
1728
1729 /* Setup Tx descriptor ring and Tx buffers */
1730 tx_ring->count = IXGBE_DEFAULT_TXD;
1731 tx_ring->queue_index = 0;
1732 tx_ring->dev = &adapter->pdev->dev;
1733 tx_ring->netdev = adapter->netdev;
1734 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1735
1736 err = ixgbe_setup_tx_resources(tx_ring);
1737 if (err)
1738 return 1;
1739
1740 switch (adapter->hw.mac.type) {
1741 case ixgbe_mac_82599EB:
1742 case ixgbe_mac_X540:
1743 case ixgbe_mac_X550:
1744 case ixgbe_mac_X550EM_x:
1745 case ixgbe_mac_x550em_a:
1746 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1747 reg_data |= IXGBE_DMATXCTL_TE;
1748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1749 break;
1750 default:
1751 break;
1752 }
1753
1754 ixgbe_configure_tx_ring(adapter, tx_ring);
1755
1756 /* Setup Rx Descriptor ring and Rx buffers */
1757 rx_ring->count = IXGBE_DEFAULT_RXD;
1758 rx_ring->queue_index = 0;
1759 rx_ring->dev = &adapter->pdev->dev;
1760 rx_ring->netdev = adapter->netdev;
1761 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1762
1763 err = ixgbe_setup_rx_resources(rx_ring);
1764 if (err) {
1765 ret_val = 4;
1766 goto err_nomem;
1767 }
1768
1769 hw->mac.ops.disable_rx(hw);
1770
1771 ixgbe_configure_rx_ring(adapter, rx_ring);
1772
1773 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1774 rctl |= IXGBE_RXCTRL_DMBYPS;
1775 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1776
1777 hw->mac.ops.enable_rx(hw);
1778
1779 return 0;
1780
1781err_nomem:
1782 ixgbe_free_desc_rings(adapter);
1783 return ret_val;
1784}
1785
1786static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1787{
1788 struct ixgbe_hw *hw = &adapter->hw;
1789 u32 reg_data;
1790
1791
1792 /* Setup MAC loopback */
1793 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1794 reg_data |= IXGBE_HLREG0_LPBK;
1795 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1796
1797 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1798 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1799 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1800
1801 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1802 switch (adapter->hw.mac.type) {
1803 case ixgbe_mac_X540:
1804 case ixgbe_mac_X550:
1805 case ixgbe_mac_X550EM_x:
1806 case ixgbe_mac_x550em_a:
1807 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1808 reg_data |= IXGBE_MACC_FLU;
1809 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1810 break;
1811 default:
1812 if (hw->mac.orig_autoc) {
1813 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1814 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1815 } else {
1816 return 10;
1817 }
1818 }
1819 IXGBE_WRITE_FLUSH(hw);
1820 usleep_range(10000, 20000);
1821
1822 /* Disable Atlas Tx lanes; re-enabled in reset path */
1823 if (hw->mac.type == ixgbe_mac_82598EB) {
1824 u8 atlas;
1825
1826 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1827 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1828 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1829
1830 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1831 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1832 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1833
1834 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1835 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1836 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1837
1838 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1839 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1840 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1841 }
1842
1843 return 0;
1844}
1845
1846static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1847{
1848 u32 reg_data;
1849
1850 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1851 reg_data &= ~IXGBE_HLREG0_LPBK;
1852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1853}
1854
1855static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1856 unsigned int frame_size)
1857{
1858 memset(skb->data, 0xFF, frame_size);
1859 frame_size >>= 1;
1860 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1861 memset(&skb->data[frame_size + 10], 0xBE, 1);
1862 memset(&skb->data[frame_size + 12], 0xAF, 1);
1863}
1864
1865static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1866 unsigned int frame_size)
1867{
1868 unsigned char *data;
1869 bool match = true;
1870
1871 frame_size >>= 1;
1872
1873 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1874
1875 if (data[3] != 0xFF ||
1876 data[frame_size + 10] != 0xBE ||
1877 data[frame_size + 12] != 0xAF)
1878 match = false;
1879
1880 kunmap(rx_buffer->page);
1881
1882 return match;
1883}
1884
1885static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1886 struct ixgbe_ring *tx_ring,
1887 unsigned int size)
1888{
1889 union ixgbe_adv_rx_desc *rx_desc;
1890 struct ixgbe_rx_buffer *rx_buffer;
1891 struct ixgbe_tx_buffer *tx_buffer;
1892 u16 rx_ntc, tx_ntc, count = 0;
1893
1894 /* initialize next to clean and descriptor values */
1895 rx_ntc = rx_ring->next_to_clean;
1896 tx_ntc = tx_ring->next_to_clean;
1897 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1898
1899 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1900 /* check Rx buffer */
1901 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1902
1903 /* sync Rx buffer for CPU read */
1904 dma_sync_single_for_cpu(rx_ring->dev,
1905 rx_buffer->dma,
1906 ixgbe_rx_bufsz(rx_ring),
1907 DMA_FROM_DEVICE);
1908
1909 /* verify contents of skb */
1910 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1911 count++;
1912
1913 /* sync Rx buffer for device write */
1914 dma_sync_single_for_device(rx_ring->dev,
1915 rx_buffer->dma,
1916 ixgbe_rx_bufsz(rx_ring),
1917 DMA_FROM_DEVICE);
1918
1919 /* unmap buffer on Tx side */
1920 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1921 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1922
1923 /* increment Rx/Tx next to clean counters */
1924 rx_ntc++;
1925 if (rx_ntc == rx_ring->count)
1926 rx_ntc = 0;
1927 tx_ntc++;
1928 if (tx_ntc == tx_ring->count)
1929 tx_ntc = 0;
1930
1931 /* fetch next descriptor */
1932 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1933 }
1934
1935 netdev_tx_reset_queue(txring_txq(tx_ring));
1936
1937 /* re-map buffers to ring, store next to clean values */
1938 ixgbe_alloc_rx_buffers(rx_ring, count);
1939 rx_ring->next_to_clean = rx_ntc;
1940 tx_ring->next_to_clean = tx_ntc;
1941
1942 return count;
1943}
1944
1945static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1946{
1947 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1948 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1949 int i, j, lc, good_cnt, ret_val = 0;
1950 unsigned int size = 1024;
1951 netdev_tx_t tx_ret_val;
1952 struct sk_buff *skb;
1953 u32 flags_orig = adapter->flags;
1954
1955 /* DCB can modify the frames on Tx */
1956 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1957
1958 /* allocate test skb */
1959 skb = alloc_skb(size, GFP_KERNEL);
1960 if (!skb)
1961 return 11;
1962
1963 /* place data into test skb */
1964 ixgbe_create_lbtest_frame(skb, size);
1965 skb_put(skb, size);
1966
1967 /*
1968 * Calculate the loop count based on the largest descriptor ring
1969 * The idea is to wrap the largest ring a number of times using 64
1970 * send/receive pairs during each loop
1971 */
1972
1973 if (rx_ring->count <= tx_ring->count)
1974 lc = ((tx_ring->count / 64) * 2) + 1;
1975 else
1976 lc = ((rx_ring->count / 64) * 2) + 1;
1977
1978 for (j = 0; j <= lc; j++) {
1979 /* reset count of good packets */
1980 good_cnt = 0;
1981
1982 /* place 64 packets on the transmit queue*/
1983 for (i = 0; i < 64; i++) {
1984 skb_get(skb);
1985 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1986 adapter,
1987 tx_ring);
1988 if (tx_ret_val == NETDEV_TX_OK)
1989 good_cnt++;
1990 }
1991
1992 if (good_cnt != 64) {
1993 ret_val = 12;
1994 break;
1995 }
1996
1997 /* allow 200 milliseconds for packets to go from Tx to Rx */
1998 msleep(200);
1999
2000 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2001 if (good_cnt != 64) {
2002 ret_val = 13;
2003 break;
2004 }
2005 }
2006
2007 /* free the original skb */
2008 kfree_skb(skb);
2009 adapter->flags = flags_orig;
2010
2011 return ret_val;
2012}
2013
2014static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2015{
2016 *data = ixgbe_setup_desc_rings(adapter);
2017 if (*data)
2018 goto out;
2019 *data = ixgbe_setup_loopback_test(adapter);
2020 if (*data)
2021 goto err_loopback;
2022 *data = ixgbe_run_loopback_test(adapter);
2023 ixgbe_loopback_cleanup(adapter);
2024
2025err_loopback:
2026 ixgbe_free_desc_rings(adapter);
2027out:
2028 return *data;
2029}
2030
2031static void ixgbe_diag_test(struct net_device *netdev,
2032 struct ethtool_test *eth_test, u64 *data)
2033{
2034 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2035 bool if_running = netif_running(netdev);
2036
2037 if (ixgbe_removed(adapter->hw.hw_addr)) {
2038 e_err(hw, "Adapter removed - test blocked\n");
2039 data[0] = 1;
2040 data[1] = 1;
2041 data[2] = 1;
2042 data[3] = 1;
2043 data[4] = 1;
2044 eth_test->flags |= ETH_TEST_FL_FAILED;
2045 return;
2046 }
2047 set_bit(__IXGBE_TESTING, &adapter->state);
2048 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2049 struct ixgbe_hw *hw = &adapter->hw;
2050
2051 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2052 int i;
2053 for (i = 0; i < adapter->num_vfs; i++) {
2054 if (adapter->vfinfo[i].clear_to_send) {
2055 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2056 data[0] = 1;
2057 data[1] = 1;
2058 data[2] = 1;
2059 data[3] = 1;
2060 data[4] = 1;
2061 eth_test->flags |= ETH_TEST_FL_FAILED;
2062 clear_bit(__IXGBE_TESTING,
2063 &adapter->state);
2064 goto skip_ol_tests;
2065 }
2066 }
2067 }
2068
2069 /* Offline tests */
2070 e_info(hw, "offline testing starting\n");
2071
2072 /* Link test performed before hardware reset so autoneg doesn't
2073 * interfere with test result
2074 */
2075 if (ixgbe_link_test(adapter, &data[4]))
2076 eth_test->flags |= ETH_TEST_FL_FAILED;
2077
2078 if (if_running)
2079 /* indicate we're in test mode */
2080 ixgbe_close(netdev);
2081 else
2082 ixgbe_reset(adapter);
2083
2084 e_info(hw, "register testing starting\n");
2085 if (ixgbe_reg_test(adapter, &data[0]))
2086 eth_test->flags |= ETH_TEST_FL_FAILED;
2087
2088 ixgbe_reset(adapter);
2089 e_info(hw, "eeprom testing starting\n");
2090 if (ixgbe_eeprom_test(adapter, &data[1]))
2091 eth_test->flags |= ETH_TEST_FL_FAILED;
2092
2093 ixgbe_reset(adapter);
2094 e_info(hw, "interrupt testing starting\n");
2095 if (ixgbe_intr_test(adapter, &data[2]))
2096 eth_test->flags |= ETH_TEST_FL_FAILED;
2097
2098 /* If SRIOV or VMDq is enabled then skip MAC
2099 * loopback diagnostic. */
2100 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2101 IXGBE_FLAG_VMDQ_ENABLED)) {
2102 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2103 data[3] = 0;
2104 goto skip_loopback;
2105 }
2106
2107 ixgbe_reset(adapter);
2108 e_info(hw, "loopback testing starting\n");
2109 if (ixgbe_loopback_test(adapter, &data[3]))
2110 eth_test->flags |= ETH_TEST_FL_FAILED;
2111
2112skip_loopback:
2113 ixgbe_reset(adapter);
2114
2115 /* clear testing bit and return adapter to previous state */
2116 clear_bit(__IXGBE_TESTING, &adapter->state);
2117 if (if_running)
2118 ixgbe_open(netdev);
2119 else if (hw->mac.ops.disable_tx_laser)
2120 hw->mac.ops.disable_tx_laser(hw);
2121 } else {
2122 e_info(hw, "online testing starting\n");
2123
2124 /* Online tests */
2125 if (ixgbe_link_test(adapter, &data[4]))
2126 eth_test->flags |= ETH_TEST_FL_FAILED;
2127
2128 /* Offline tests aren't run; pass by default */
2129 data[0] = 0;
2130 data[1] = 0;
2131 data[2] = 0;
2132 data[3] = 0;
2133
2134 clear_bit(__IXGBE_TESTING, &adapter->state);
2135 }
2136
2137skip_ol_tests:
2138 msleep_interruptible(4 * 1000);
2139}
2140
2141static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2142 struct ethtool_wolinfo *wol)
2143{
2144 struct ixgbe_hw *hw = &adapter->hw;
2145 int retval = 0;
2146
2147 /* WOL not supported for all devices */
2148 if (!ixgbe_wol_supported(adapter, hw->device_id,
2149 hw->subsystem_device_id)) {
2150 retval = 1;
2151 wol->supported = 0;
2152 }
2153
2154 return retval;
2155}
2156
2157static void ixgbe_get_wol(struct net_device *netdev,
2158 struct ethtool_wolinfo *wol)
2159{
2160 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2161
2162 wol->supported = WAKE_UCAST | WAKE_MCAST |
2163 WAKE_BCAST | WAKE_MAGIC;
2164 wol->wolopts = 0;
2165
2166 if (ixgbe_wol_exclusion(adapter, wol) ||
2167 !device_can_wakeup(&adapter->pdev->dev))
2168 return;
2169
2170 if (adapter->wol & IXGBE_WUFC_EX)
2171 wol->wolopts |= WAKE_UCAST;
2172 if (adapter->wol & IXGBE_WUFC_MC)
2173 wol->wolopts |= WAKE_MCAST;
2174 if (adapter->wol & IXGBE_WUFC_BC)
2175 wol->wolopts |= WAKE_BCAST;
2176 if (adapter->wol & IXGBE_WUFC_MAG)
2177 wol->wolopts |= WAKE_MAGIC;
2178}
2179
2180static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2181{
2182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2183
2184 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2185 return -EOPNOTSUPP;
2186
2187 if (ixgbe_wol_exclusion(adapter, wol))
2188 return wol->wolopts ? -EOPNOTSUPP : 0;
2189
2190 adapter->wol = 0;
2191
2192 if (wol->wolopts & WAKE_UCAST)
2193 adapter->wol |= IXGBE_WUFC_EX;
2194 if (wol->wolopts & WAKE_MCAST)
2195 adapter->wol |= IXGBE_WUFC_MC;
2196 if (wol->wolopts & WAKE_BCAST)
2197 adapter->wol |= IXGBE_WUFC_BC;
2198 if (wol->wolopts & WAKE_MAGIC)
2199 adapter->wol |= IXGBE_WUFC_MAG;
2200
2201 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2202
2203 return 0;
2204}
2205
2206static int ixgbe_nway_reset(struct net_device *netdev)
2207{
2208 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2209
2210 if (netif_running(netdev))
2211 ixgbe_reinit_locked(adapter);
2212
2213 return 0;
2214}
2215
2216static int ixgbe_set_phys_id(struct net_device *netdev,
2217 enum ethtool_phys_id_state state)
2218{
2219 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2220 struct ixgbe_hw *hw = &adapter->hw;
2221
2222 switch (state) {
2223 case ETHTOOL_ID_ACTIVE:
2224 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2225 return 2;
2226
2227 case ETHTOOL_ID_ON:
2228 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2229 break;
2230
2231 case ETHTOOL_ID_OFF:
2232 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2233 break;
2234
2235 case ETHTOOL_ID_INACTIVE:
2236 /* Restore LED settings */
2237 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2238 break;
2239 }
2240
2241 return 0;
2242}
2243
2244static int ixgbe_get_coalesce(struct net_device *netdev,
2245 struct ethtool_coalesce *ec)
2246{
2247 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2248
2249 /* only valid if in constant ITR mode */
2250 if (adapter->rx_itr_setting <= 1)
2251 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2252 else
2253 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2254
2255 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2256 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2257 return 0;
2258
2259 /* only valid if in constant ITR mode */
2260 if (adapter->tx_itr_setting <= 1)
2261 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2262 else
2263 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2264
2265 return 0;
2266}
2267
2268/*
2269 * this function must be called before setting the new value of
2270 * rx_itr_setting
2271 */
2272static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2273{
2274 struct net_device *netdev = adapter->netdev;
2275
2276 /* nothing to do if LRO or RSC are not enabled */
2277 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2278 !(netdev->features & NETIF_F_LRO))
2279 return false;
2280
2281 /* check the feature flag value and enable RSC if necessary */
2282 if (adapter->rx_itr_setting == 1 ||
2283 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2284 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2285 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2286 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2287 return true;
2288 }
2289 /* if interrupt rate is too high then disable RSC */
2290 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2291 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2292 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2293 return true;
2294 }
2295 return false;
2296}
2297
2298static int ixgbe_set_coalesce(struct net_device *netdev,
2299 struct ethtool_coalesce *ec)
2300{
2301 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2302 struct ixgbe_q_vector *q_vector;
2303 int i;
2304 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2305 bool need_reset = false;
2306
2307 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2308 /* reject Tx specific changes in case of mixed RxTx vectors */
2309 if (ec->tx_coalesce_usecs)
2310 return -EINVAL;
2311 tx_itr_prev = adapter->rx_itr_setting;
2312 } else {
2313 tx_itr_prev = adapter->tx_itr_setting;
2314 }
2315
2316 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2317 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2318 return -EINVAL;
2319
2320 if (ec->rx_coalesce_usecs > 1)
2321 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2322 else
2323 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2324
2325 if (adapter->rx_itr_setting == 1)
2326 rx_itr_param = IXGBE_20K_ITR;
2327 else
2328 rx_itr_param = adapter->rx_itr_setting;
2329
2330 if (ec->tx_coalesce_usecs > 1)
2331 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2332 else
2333 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2334
2335 if (adapter->tx_itr_setting == 1)
2336 tx_itr_param = IXGBE_12K_ITR;
2337 else
2338 tx_itr_param = adapter->tx_itr_setting;
2339
2340 /* mixed Rx/Tx */
2341 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2342 adapter->tx_itr_setting = adapter->rx_itr_setting;
2343
2344 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2345 if ((adapter->tx_itr_setting != 1) &&
2346 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2347 if ((tx_itr_prev == 1) ||
2348 (tx_itr_prev >= IXGBE_100K_ITR))
2349 need_reset = true;
2350 } else {
2351 if ((tx_itr_prev != 1) &&
2352 (tx_itr_prev < IXGBE_100K_ITR))
2353 need_reset = true;
2354 }
2355
2356 /* check the old value and enable RSC if necessary */
2357 need_reset |= ixgbe_update_rsc(adapter);
2358
2359 for (i = 0; i < adapter->num_q_vectors; i++) {
2360 q_vector = adapter->q_vector[i];
2361 if (q_vector->tx.count && !q_vector->rx.count)
2362 /* tx only */
2363 q_vector->itr = tx_itr_param;
2364 else
2365 /* rx only or mixed */
2366 q_vector->itr = rx_itr_param;
2367 ixgbe_write_eitr(q_vector);
2368 }
2369
2370 /*
2371 * do reset here at the end to make sure EITR==0 case is handled
2372 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2373 * also locks in RSC enable/disable which requires reset
2374 */
2375 if (need_reset)
2376 ixgbe_do_reset(netdev);
2377
2378 return 0;
2379}
2380
2381static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2382 struct ethtool_rxnfc *cmd)
2383{
2384 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2385 struct ethtool_rx_flow_spec *fsp =
2386 (struct ethtool_rx_flow_spec *)&cmd->fs;
2387 struct hlist_node *node2;
2388 struct ixgbe_fdir_filter *rule = NULL;
2389
2390 /* report total rule count */
2391 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2392
2393 hlist_for_each_entry_safe(rule, node2,
2394 &adapter->fdir_filter_list, fdir_node) {
2395 if (fsp->location <= rule->sw_idx)
2396 break;
2397 }
2398
2399 if (!rule || fsp->location != rule->sw_idx)
2400 return -EINVAL;
2401
2402 /* fill out the flow spec entry */
2403
2404 /* set flow type field */
2405 switch (rule->filter.formatted.flow_type) {
2406 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2407 fsp->flow_type = TCP_V4_FLOW;
2408 break;
2409 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2410 fsp->flow_type = UDP_V4_FLOW;
2411 break;
2412 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2413 fsp->flow_type = SCTP_V4_FLOW;
2414 break;
2415 case IXGBE_ATR_FLOW_TYPE_IPV4:
2416 fsp->flow_type = IP_USER_FLOW;
2417 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2418 fsp->h_u.usr_ip4_spec.proto = 0;
2419 fsp->m_u.usr_ip4_spec.proto = 0;
2420 break;
2421 default:
2422 return -EINVAL;
2423 }
2424
2425 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2426 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2427 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2428 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2429 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2430 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2431 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2432 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2433 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2434 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2435 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2436 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2437 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2438 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2439 fsp->flow_type |= FLOW_EXT;
2440
2441 /* record action */
2442 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2443 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2444 else
2445 fsp->ring_cookie = rule->action;
2446
2447 return 0;
2448}
2449
2450static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2451 struct ethtool_rxnfc *cmd,
2452 u32 *rule_locs)
2453{
2454 struct hlist_node *node2;
2455 struct ixgbe_fdir_filter *rule;
2456 int cnt = 0;
2457
2458 /* report total rule count */
2459 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2460
2461 hlist_for_each_entry_safe(rule, node2,
2462 &adapter->fdir_filter_list, fdir_node) {
2463 if (cnt == cmd->rule_cnt)
2464 return -EMSGSIZE;
2465 rule_locs[cnt] = rule->sw_idx;
2466 cnt++;
2467 }
2468
2469 cmd->rule_cnt = cnt;
2470
2471 return 0;
2472}
2473
2474static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2475 struct ethtool_rxnfc *cmd)
2476{
2477 cmd->data = 0;
2478
2479 /* Report default options for RSS on ixgbe */
2480 switch (cmd->flow_type) {
2481 case TCP_V4_FLOW:
2482 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2483 /* fallthrough */
2484 case UDP_V4_FLOW:
2485 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2486 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2487 /* fallthrough */
2488 case SCTP_V4_FLOW:
2489 case AH_ESP_V4_FLOW:
2490 case AH_V4_FLOW:
2491 case ESP_V4_FLOW:
2492 case IPV4_FLOW:
2493 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2494 break;
2495 case TCP_V6_FLOW:
2496 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2497 /* fallthrough */
2498 case UDP_V6_FLOW:
2499 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2500 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2501 /* fallthrough */
2502 case SCTP_V6_FLOW:
2503 case AH_ESP_V6_FLOW:
2504 case AH_V6_FLOW:
2505 case ESP_V6_FLOW:
2506 case IPV6_FLOW:
2507 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2508 break;
2509 default:
2510 return -EINVAL;
2511 }
2512
2513 return 0;
2514}
2515
2516static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2517 u32 *rule_locs)
2518{
2519 struct ixgbe_adapter *adapter = netdev_priv(dev);
2520 int ret = -EOPNOTSUPP;
2521
2522 switch (cmd->cmd) {
2523 case ETHTOOL_GRXRINGS:
2524 cmd->data = adapter->num_rx_queues;
2525 ret = 0;
2526 break;
2527 case ETHTOOL_GRXCLSRLCNT:
2528 cmd->rule_cnt = adapter->fdir_filter_count;
2529 ret = 0;
2530 break;
2531 case ETHTOOL_GRXCLSRULE:
2532 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2533 break;
2534 case ETHTOOL_GRXCLSRLALL:
2535 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2536 break;
2537 case ETHTOOL_GRXFH:
2538 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2539 break;
2540 default:
2541 break;
2542 }
2543
2544 return ret;
2545}
2546
2547int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2548 struct ixgbe_fdir_filter *input,
2549 u16 sw_idx)
2550{
2551 struct ixgbe_hw *hw = &adapter->hw;
2552 struct hlist_node *node2;
2553 struct ixgbe_fdir_filter *rule, *parent;
2554 int err = -EINVAL;
2555
2556 parent = NULL;
2557 rule = NULL;
2558
2559 hlist_for_each_entry_safe(rule, node2,
2560 &adapter->fdir_filter_list, fdir_node) {
2561 /* hash found, or no matching entry */
2562 if (rule->sw_idx >= sw_idx)
2563 break;
2564 parent = rule;
2565 }
2566
2567 /* if there is an old rule occupying our place remove it */
2568 if (rule && (rule->sw_idx == sw_idx)) {
2569 if (!input || (rule->filter.formatted.bkt_hash !=
2570 input->filter.formatted.bkt_hash)) {
2571 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2572 &rule->filter,
2573 sw_idx);
2574 }
2575
2576 hlist_del(&rule->fdir_node);
2577 kfree(rule);
2578 adapter->fdir_filter_count--;
2579 }
2580
2581 /*
2582 * If no input this was a delete, err should be 0 if a rule was
2583 * successfully found and removed from the list else -EINVAL
2584 */
2585 if (!input)
2586 return err;
2587
2588 /* initialize node and set software index */
2589 INIT_HLIST_NODE(&input->fdir_node);
2590
2591 /* add filter to the list */
2592 if (parent)
2593 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2594 else
2595 hlist_add_head(&input->fdir_node,
2596 &adapter->fdir_filter_list);
2597
2598 /* update counts */
2599 adapter->fdir_filter_count++;
2600
2601 return 0;
2602}
2603
2604static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2605 u8 *flow_type)
2606{
2607 switch (fsp->flow_type & ~FLOW_EXT) {
2608 case TCP_V4_FLOW:
2609 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2610 break;
2611 case UDP_V4_FLOW:
2612 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2613 break;
2614 case SCTP_V4_FLOW:
2615 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2616 break;
2617 case IP_USER_FLOW:
2618 switch (fsp->h_u.usr_ip4_spec.proto) {
2619 case IPPROTO_TCP:
2620 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2621 break;
2622 case IPPROTO_UDP:
2623 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2624 break;
2625 case IPPROTO_SCTP:
2626 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2627 break;
2628 case 0:
2629 if (!fsp->m_u.usr_ip4_spec.proto) {
2630 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2631 break;
2632 }
2633 default:
2634 return 0;
2635 }
2636 break;
2637 default:
2638 return 0;
2639 }
2640
2641 return 1;
2642}
2643
2644static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2645 struct ethtool_rxnfc *cmd)
2646{
2647 struct ethtool_rx_flow_spec *fsp =
2648 (struct ethtool_rx_flow_spec *)&cmd->fs;
2649 struct ixgbe_hw *hw = &adapter->hw;
2650 struct ixgbe_fdir_filter *input;
2651 union ixgbe_atr_input mask;
2652 u8 queue;
2653 int err;
2654
2655 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2656 return -EOPNOTSUPP;
2657
2658 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2659 * we use the drop index.
2660 */
2661 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2662 queue = IXGBE_FDIR_DROP_QUEUE;
2663 } else {
2664 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2665 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2666
2667 if (!vf && (ring >= adapter->num_rx_queues))
2668 return -EINVAL;
2669 else if (vf &&
2670 ((vf > adapter->num_vfs) ||
2671 ring >= adapter->num_rx_queues_per_pool))
2672 return -EINVAL;
2673
2674 /* Map the ring onto the absolute queue index */
2675 if (!vf)
2676 queue = adapter->rx_ring[ring]->reg_idx;
2677 else
2678 queue = ((vf - 1) *
2679 adapter->num_rx_queues_per_pool) + ring;
2680 }
2681
2682 /* Don't allow indexes to exist outside of available space */
2683 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2684 e_err(drv, "Location out of range\n");
2685 return -EINVAL;
2686 }
2687
2688 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2689 if (!input)
2690 return -ENOMEM;
2691
2692 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2693
2694 /* set SW index */
2695 input->sw_idx = fsp->location;
2696
2697 /* record flow type */
2698 if (!ixgbe_flowspec_to_flow_type(fsp,
2699 &input->filter.formatted.flow_type)) {
2700 e_err(drv, "Unrecognized flow type\n");
2701 goto err_out;
2702 }
2703
2704 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2705 IXGBE_ATR_L4TYPE_MASK;
2706
2707 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2708 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2709
2710 /* Copy input into formatted structures */
2711 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2712 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2713 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2714 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2715 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2716 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2717 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2718 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2719
2720 if (fsp->flow_type & FLOW_EXT) {
2721 input->filter.formatted.vm_pool =
2722 (unsigned char)ntohl(fsp->h_ext.data[1]);
2723 mask.formatted.vm_pool =
2724 (unsigned char)ntohl(fsp->m_ext.data[1]);
2725 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2726 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2727 input->filter.formatted.flex_bytes =
2728 fsp->h_ext.vlan_etype;
2729 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2730 }
2731
2732 /* determine if we need to drop or route the packet */
2733 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2734 input->action = IXGBE_FDIR_DROP_QUEUE;
2735 else
2736 input->action = fsp->ring_cookie;
2737
2738 spin_lock(&adapter->fdir_perfect_lock);
2739
2740 if (hlist_empty(&adapter->fdir_filter_list)) {
2741 /* save mask and program input mask into HW */
2742 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2743 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2744 if (err) {
2745 e_err(drv, "Error writing mask\n");
2746 goto err_out_w_lock;
2747 }
2748 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2749 e_err(drv, "Only one mask supported per port\n");
2750 goto err_out_w_lock;
2751 }
2752
2753 /* apply mask and compute/store hash */
2754 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2755
2756 /* program filters to filter memory */
2757 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2758 &input->filter, input->sw_idx, queue);
2759 if (err)
2760 goto err_out_w_lock;
2761
2762 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2763
2764 spin_unlock(&adapter->fdir_perfect_lock);
2765
2766 return err;
2767err_out_w_lock:
2768 spin_unlock(&adapter->fdir_perfect_lock);
2769err_out:
2770 kfree(input);
2771 return -EINVAL;
2772}
2773
2774static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2775 struct ethtool_rxnfc *cmd)
2776{
2777 struct ethtool_rx_flow_spec *fsp =
2778 (struct ethtool_rx_flow_spec *)&cmd->fs;
2779 int err;
2780
2781 spin_lock(&adapter->fdir_perfect_lock);
2782 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2783 spin_unlock(&adapter->fdir_perfect_lock);
2784
2785 return err;
2786}
2787
2788#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2789 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2790static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2791 struct ethtool_rxnfc *nfc)
2792{
2793 u32 flags2 = adapter->flags2;
2794
2795 /*
2796 * RSS does not support anything other than hashing
2797 * to queues on src and dst IPs and ports
2798 */
2799 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2800 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2801 return -EINVAL;
2802
2803 switch (nfc->flow_type) {
2804 case TCP_V4_FLOW:
2805 case TCP_V6_FLOW:
2806 if (!(nfc->data & RXH_IP_SRC) ||
2807 !(nfc->data & RXH_IP_DST) ||
2808 !(nfc->data & RXH_L4_B_0_1) ||
2809 !(nfc->data & RXH_L4_B_2_3))
2810 return -EINVAL;
2811 break;
2812 case UDP_V4_FLOW:
2813 if (!(nfc->data & RXH_IP_SRC) ||
2814 !(nfc->data & RXH_IP_DST))
2815 return -EINVAL;
2816 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2817 case 0:
2818 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2819 break;
2820 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2821 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2822 break;
2823 default:
2824 return -EINVAL;
2825 }
2826 break;
2827 case UDP_V6_FLOW:
2828 if (!(nfc->data & RXH_IP_SRC) ||
2829 !(nfc->data & RXH_IP_DST))
2830 return -EINVAL;
2831 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2832 case 0:
2833 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2834 break;
2835 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2836 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2837 break;
2838 default:
2839 return -EINVAL;
2840 }
2841 break;
2842 case AH_ESP_V4_FLOW:
2843 case AH_V4_FLOW:
2844 case ESP_V4_FLOW:
2845 case SCTP_V4_FLOW:
2846 case AH_ESP_V6_FLOW:
2847 case AH_V6_FLOW:
2848 case ESP_V6_FLOW:
2849 case SCTP_V6_FLOW:
2850 if (!(nfc->data & RXH_IP_SRC) ||
2851 !(nfc->data & RXH_IP_DST) ||
2852 (nfc->data & RXH_L4_B_0_1) ||
2853 (nfc->data & RXH_L4_B_2_3))
2854 return -EINVAL;
2855 break;
2856 default:
2857 return -EINVAL;
2858 }
2859
2860 /* if we changed something we need to update flags */
2861 if (flags2 != adapter->flags2) {
2862 struct ixgbe_hw *hw = &adapter->hw;
2863 u32 mrqc;
2864 unsigned int pf_pool = adapter->num_vfs;
2865
2866 if ((hw->mac.type >= ixgbe_mac_X550) &&
2867 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2868 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2869 else
2870 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2871
2872 if ((flags2 & UDP_RSS_FLAGS) &&
2873 !(adapter->flags2 & UDP_RSS_FLAGS))
2874 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2875
2876 adapter->flags2 = flags2;
2877
2878 /* Perform hash on these packet types */
2879 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2880 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2881 | IXGBE_MRQC_RSS_FIELD_IPV6
2882 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2883
2884 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2885 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2886
2887 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2888 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2889
2890 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2891 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2892
2893 if ((hw->mac.type >= ixgbe_mac_X550) &&
2894 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2895 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2896 else
2897 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2898 }
2899
2900 return 0;
2901}
2902
2903static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2904{
2905 struct ixgbe_adapter *adapter = netdev_priv(dev);
2906 int ret = -EOPNOTSUPP;
2907
2908 switch (cmd->cmd) {
2909 case ETHTOOL_SRXCLSRLINS:
2910 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2911 break;
2912 case ETHTOOL_SRXCLSRLDEL:
2913 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2914 break;
2915 case ETHTOOL_SRXFH:
2916 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2917 break;
2918 default:
2919 break;
2920 }
2921
2922 return ret;
2923}
2924
2925static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2926{
2927 if (adapter->hw.mac.type < ixgbe_mac_X550)
2928 return 16;
2929 else
2930 return 64;
2931}
2932
2933static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2934{
2935 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2936
2937 return sizeof(adapter->rss_key);
2938}
2939
2940static u32 ixgbe_rss_indir_size(struct net_device *netdev)
2941{
2942 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2943
2944 return ixgbe_rss_indir_tbl_entries(adapter);
2945}
2946
2947static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
2948{
2949 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
2950 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
2951
2952 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
2953 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
2954
2955 for (i = 0; i < reta_size; i++)
2956 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
2957}
2958
2959static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2960 u8 *hfunc)
2961{
2962 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2963
2964 if (hfunc)
2965 *hfunc = ETH_RSS_HASH_TOP;
2966
2967 if (indir)
2968 ixgbe_get_reta(adapter, indir);
2969
2970 if (key)
2971 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
2972
2973 return 0;
2974}
2975
2976static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
2977 const u8 *key, const u8 hfunc)
2978{
2979 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2980 int i;
2981 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
2982
2983 if (hfunc)
2984 return -EINVAL;
2985
2986 /* Fill out the redirection table */
2987 if (indir) {
2988 int max_queues = min_t(int, adapter->num_rx_queues,
2989 ixgbe_rss_indir_tbl_max(adapter));
2990
2991 /*Allow at least 2 queues w/ SR-IOV.*/
2992 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
2993 (max_queues < 2))
2994 max_queues = 2;
2995
2996 /* Verify user input. */
2997 for (i = 0; i < reta_entries; i++)
2998 if (indir[i] >= max_queues)
2999 return -EINVAL;
3000
3001 for (i = 0; i < reta_entries; i++)
3002 adapter->rss_indir_tbl[i] = indir[i];
3003 }
3004
3005 /* Fill out the rss hash key */
3006 if (key)
3007 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3008
3009 ixgbe_store_reta(adapter);
3010
3011 return 0;
3012}
3013
3014static int ixgbe_get_ts_info(struct net_device *dev,
3015 struct ethtool_ts_info *info)
3016{
3017 struct ixgbe_adapter *adapter = netdev_priv(dev);
3018
3019 /* we always support timestamping disabled */
3020 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3021
3022 switch (adapter->hw.mac.type) {
3023 case ixgbe_mac_X550:
3024 case ixgbe_mac_X550EM_x:
3025 case ixgbe_mac_x550em_a:
3026 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3027 /* fallthrough */
3028 case ixgbe_mac_X540:
3029 case ixgbe_mac_82599EB:
3030 info->so_timestamping =
3031 SOF_TIMESTAMPING_TX_SOFTWARE |
3032 SOF_TIMESTAMPING_RX_SOFTWARE |
3033 SOF_TIMESTAMPING_SOFTWARE |
3034 SOF_TIMESTAMPING_TX_HARDWARE |
3035 SOF_TIMESTAMPING_RX_HARDWARE |
3036 SOF_TIMESTAMPING_RAW_HARDWARE;
3037
3038 if (adapter->ptp_clock)
3039 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3040 else
3041 info->phc_index = -1;
3042
3043 info->tx_types =
3044 BIT(HWTSTAMP_TX_OFF) |
3045 BIT(HWTSTAMP_TX_ON);
3046
3047 info->rx_filters |=
3048 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3049 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3050 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3051 break;
3052 default:
3053 return ethtool_op_get_ts_info(dev, info);
3054 }
3055 return 0;
3056}
3057
3058static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3059{
3060 unsigned int max_combined;
3061 u8 tcs = netdev_get_num_tc(adapter->netdev);
3062
3063 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3064 /* We only support one q_vector without MSI-X */
3065 max_combined = 1;
3066 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3067 /* Limit value based on the queue mask */
3068 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3069 } else if (tcs > 1) {
3070 /* For DCB report channels per traffic class */
3071 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3072 /* 8 TC w/ 4 queues per TC */
3073 max_combined = 4;
3074 } else if (tcs > 4) {
3075 /* 8 TC w/ 8 queues per TC */
3076 max_combined = 8;
3077 } else {
3078 /* 4 TC w/ 16 queues per TC */
3079 max_combined = 16;
3080 }
3081 } else if (adapter->atr_sample_rate) {
3082 /* support up to 64 queues with ATR */
3083 max_combined = IXGBE_MAX_FDIR_INDICES;
3084 } else {
3085 /* support up to 16 queues with RSS */
3086 max_combined = ixgbe_max_rss_indices(adapter);
3087 }
3088
3089 return max_combined;
3090}
3091
3092static void ixgbe_get_channels(struct net_device *dev,
3093 struct ethtool_channels *ch)
3094{
3095 struct ixgbe_adapter *adapter = netdev_priv(dev);
3096
3097 /* report maximum channels */
3098 ch->max_combined = ixgbe_max_channels(adapter);
3099
3100 /* report info for other vector */
3101 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3102 ch->max_other = NON_Q_VECTORS;
3103 ch->other_count = NON_Q_VECTORS;
3104 }
3105
3106 /* record RSS queues */
3107 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3108
3109 /* nothing else to report if RSS is disabled */
3110 if (ch->combined_count == 1)
3111 return;
3112
3113 /* we do not support ATR queueing if SR-IOV is enabled */
3114 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3115 return;
3116
3117 /* same thing goes for being DCB enabled */
3118 if (netdev_get_num_tc(dev) > 1)
3119 return;
3120
3121 /* if ATR is disabled we can exit */
3122 if (!adapter->atr_sample_rate)
3123 return;
3124
3125 /* report flow director queues as maximum channels */
3126 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3127}
3128
3129static int ixgbe_set_channels(struct net_device *dev,
3130 struct ethtool_channels *ch)
3131{
3132 struct ixgbe_adapter *adapter = netdev_priv(dev);
3133 unsigned int count = ch->combined_count;
3134 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3135
3136 /* verify they are not requesting separate vectors */
3137 if (!count || ch->rx_count || ch->tx_count)
3138 return -EINVAL;
3139
3140 /* verify other_count has not changed */
3141 if (ch->other_count != NON_Q_VECTORS)
3142 return -EINVAL;
3143
3144 /* verify the number of channels does not exceed hardware limits */
3145 if (count > ixgbe_max_channels(adapter))
3146 return -EINVAL;
3147
3148 /* update feature limits from largest to smallest supported values */
3149 adapter->ring_feature[RING_F_FDIR].limit = count;
3150
3151 /* cap RSS limit */
3152 if (count > max_rss_indices)
3153 count = max_rss_indices;
3154 adapter->ring_feature[RING_F_RSS].limit = count;
3155
3156#ifdef IXGBE_FCOE
3157 /* cap FCoE limit at 8 */
3158 if (count > IXGBE_FCRETA_SIZE)
3159 count = IXGBE_FCRETA_SIZE;
3160 adapter->ring_feature[RING_F_FCOE].limit = count;
3161
3162#endif
3163 /* use setup TC to update any traffic class queue mapping */
3164 return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
3165}
3166
3167static int ixgbe_get_module_info(struct net_device *dev,
3168 struct ethtool_modinfo *modinfo)
3169{
3170 struct ixgbe_adapter *adapter = netdev_priv(dev);
3171 struct ixgbe_hw *hw = &adapter->hw;
3172 s32 status;
3173 u8 sff8472_rev, addr_mode;
3174 bool page_swap = false;
3175
3176 /* Check whether we support SFF-8472 or not */
3177 status = hw->phy.ops.read_i2c_eeprom(hw,
3178 IXGBE_SFF_SFF_8472_COMP,
3179 &sff8472_rev);
3180 if (status)
3181 return -EIO;
3182
3183 /* addressing mode is not supported */
3184 status = hw->phy.ops.read_i2c_eeprom(hw,
3185 IXGBE_SFF_SFF_8472_SWAP,
3186 &addr_mode);
3187 if (status)
3188 return -EIO;
3189
3190 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3191 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3192 page_swap = true;
3193 }
3194
3195 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3196 /* We have a SFP, but it does not support SFF-8472 */
3197 modinfo->type = ETH_MODULE_SFF_8079;
3198 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3199 } else {
3200 /* We have a SFP which supports a revision of SFF-8472. */
3201 modinfo->type = ETH_MODULE_SFF_8472;
3202 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3203 }
3204
3205 return 0;
3206}
3207
3208static int ixgbe_get_module_eeprom(struct net_device *dev,
3209 struct ethtool_eeprom *ee,
3210 u8 *data)
3211{
3212 struct ixgbe_adapter *adapter = netdev_priv(dev);
3213 struct ixgbe_hw *hw = &adapter->hw;
3214 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3215 u8 databyte = 0xFF;
3216 int i = 0;
3217
3218 if (ee->len == 0)
3219 return -EINVAL;
3220
3221 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3222 /* I2C reads can take long time */
3223 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3224 return -EBUSY;
3225
3226 if (i < ETH_MODULE_SFF_8079_LEN)
3227 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3228 else
3229 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3230
3231 if (status)
3232 return -EIO;
3233
3234 data[i - ee->offset] = databyte;
3235 }
3236
3237 return 0;
3238}
3239
3240static const struct ethtool_ops ixgbe_ethtool_ops = {
3241 .get_settings = ixgbe_get_settings,
3242 .set_settings = ixgbe_set_settings,
3243 .get_drvinfo = ixgbe_get_drvinfo,
3244 .get_regs_len = ixgbe_get_regs_len,
3245 .get_regs = ixgbe_get_regs,
3246 .get_wol = ixgbe_get_wol,
3247 .set_wol = ixgbe_set_wol,
3248 .nway_reset = ixgbe_nway_reset,
3249 .get_link = ethtool_op_get_link,
3250 .get_eeprom_len = ixgbe_get_eeprom_len,
3251 .get_eeprom = ixgbe_get_eeprom,
3252 .set_eeprom = ixgbe_set_eeprom,
3253 .get_ringparam = ixgbe_get_ringparam,
3254 .set_ringparam = ixgbe_set_ringparam,
3255 .get_pauseparam = ixgbe_get_pauseparam,
3256 .set_pauseparam = ixgbe_set_pauseparam,
3257 .get_msglevel = ixgbe_get_msglevel,
3258 .set_msglevel = ixgbe_set_msglevel,
3259 .self_test = ixgbe_diag_test,
3260 .get_strings = ixgbe_get_strings,
3261 .set_phys_id = ixgbe_set_phys_id,
3262 .get_sset_count = ixgbe_get_sset_count,
3263 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3264 .get_coalesce = ixgbe_get_coalesce,
3265 .set_coalesce = ixgbe_set_coalesce,
3266 .get_rxnfc = ixgbe_get_rxnfc,
3267 .set_rxnfc = ixgbe_set_rxnfc,
3268 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3269 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3270 .get_rxfh = ixgbe_get_rxfh,
3271 .set_rxfh = ixgbe_set_rxfh,
3272 .get_channels = ixgbe_get_channels,
3273 .set_channels = ixgbe_set_channels,
3274 .get_ts_info = ixgbe_get_ts_info,
3275 .get_module_info = ixgbe_get_module_info,
3276 .get_module_eeprom = ixgbe_get_module_eeprom,
3277};
3278
3279void ixgbe_set_ethtool_ops(struct net_device *netdev)
3280{
3281 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3282}