Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4/* ethtool support for ixgbe */
5
6#include <linux/interrupt.h>
7#include <linux/types.h>
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/vmalloc.h>
14#include <linux/highmem.h>
15#include <linux/uaccess.h>
16
17#include "ixgbe.h"
18#include "ixgbe_phy.h"
19
20
21enum {NETDEV_STATS, IXGBE_STATS};
22
23struct ixgbe_stats {
24 char stat_string[ETH_GSTRING_LEN];
25 int type;
26 int sizeof_stat;
27 int stat_offset;
28};
29
30#define IXGBE_STAT(m) IXGBE_STATS, \
31 sizeof(((struct ixgbe_adapter *)0)->m), \
32 offsetof(struct ixgbe_adapter, m)
33#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
34 sizeof(((struct rtnl_link_stats64 *)0)->m), \
35 offsetof(struct rtnl_link_stats64, m)
36
37static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
38 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
39 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
40 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
41 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
42 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
43 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
44 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
45 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
46 {"lsc_int", IXGBE_STAT(lsc_int)},
47 {"tx_busy", IXGBE_STAT(tx_busy)},
48 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
49 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
50 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
51 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
52 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
53 {"multicast", IXGBE_NETDEV_STAT(multicast)},
54 {"broadcast", IXGBE_STAT(stats.bprc)},
55 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
56 {"collisions", IXGBE_NETDEV_STAT(collisions)},
57 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
58 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
59 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
60 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
61 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
62 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
63 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
64 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
65 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
66 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
67 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
68 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
69 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
70 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
71 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
72 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
73 {"rx_length_errors", IXGBE_STAT(stats.rlec)},
74 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
75 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
76 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
77 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
78 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
79 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
80 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
81 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
82 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
83 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
84 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
85 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
86 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
87 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
88 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
89 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
90 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
91 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
92 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
93 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
94#ifdef IXGBE_FCOE
95 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
96 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
97 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
98 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
99 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
100 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
101 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
102 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
103#endif /* IXGBE_FCOE */
104};
105
106/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
107 * we set the num_rx_queues to evaluate to num_tx_queues. This is
108 * used because we do not have a good way to get the max number of
109 * rx queues with CONFIG_RPS disabled.
110 */
111#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
112
113#define IXGBE_QUEUE_STATS_LEN ( \
114 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
115 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
116#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
117#define IXGBE_PB_STATS_LEN ( \
118 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
119 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
120 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
121 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
122 / sizeof(u64))
123#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
124 IXGBE_PB_STATS_LEN + \
125 IXGBE_QUEUE_STATS_LEN)
126
127static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
128 "Register test (offline)", "Eeprom test (offline)",
129 "Interrupt test (offline)", "Loopback test (offline)",
130 "Link test (on/offline)"
131};
132#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
133
134static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
135#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
136 "legacy-rx",
137#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
138 "vf-ipsec",
139#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
140 "mdd-disable-vf",
141};
142
143#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
144
145#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
146
147static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
148 struct ethtool_link_ksettings *cmd)
149{
150 if (!ixgbe_isbackplane(hw->phy.media_type)) {
151 ethtool_link_ksettings_add_link_mode(cmd, supported,
152 10000baseT_Full);
153 return;
154 }
155
156 switch (hw->device_id) {
157 case IXGBE_DEV_ID_82598:
158 case IXGBE_DEV_ID_82599_KX4:
159 case IXGBE_DEV_ID_82599_KX4_MEZZ:
160 case IXGBE_DEV_ID_X550EM_X_KX4:
161 ethtool_link_ksettings_add_link_mode
162 (cmd, supported, 10000baseKX4_Full);
163 break;
164 case IXGBE_DEV_ID_82598_BX:
165 case IXGBE_DEV_ID_82599_KR:
166 case IXGBE_DEV_ID_X550EM_X_KR:
167 case IXGBE_DEV_ID_X550EM_X_XFI:
168 ethtool_link_ksettings_add_link_mode
169 (cmd, supported, 10000baseKR_Full);
170 break;
171 default:
172 ethtool_link_ksettings_add_link_mode
173 (cmd, supported, 10000baseKX4_Full);
174 ethtool_link_ksettings_add_link_mode
175 (cmd, supported, 10000baseKR_Full);
176 break;
177 }
178}
179
180static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
181 struct ethtool_link_ksettings *cmd)
182{
183 if (!ixgbe_isbackplane(hw->phy.media_type)) {
184 ethtool_link_ksettings_add_link_mode(cmd, advertising,
185 10000baseT_Full);
186 return;
187 }
188
189 switch (hw->device_id) {
190 case IXGBE_DEV_ID_82598:
191 case IXGBE_DEV_ID_82599_KX4:
192 case IXGBE_DEV_ID_82599_KX4_MEZZ:
193 case IXGBE_DEV_ID_X550EM_X_KX4:
194 ethtool_link_ksettings_add_link_mode
195 (cmd, advertising, 10000baseKX4_Full);
196 break;
197 case IXGBE_DEV_ID_82598_BX:
198 case IXGBE_DEV_ID_82599_KR:
199 case IXGBE_DEV_ID_X550EM_X_KR:
200 case IXGBE_DEV_ID_X550EM_X_XFI:
201 ethtool_link_ksettings_add_link_mode
202 (cmd, advertising, 10000baseKR_Full);
203 break;
204 default:
205 ethtool_link_ksettings_add_link_mode
206 (cmd, advertising, 10000baseKX4_Full);
207 ethtool_link_ksettings_add_link_mode
208 (cmd, advertising, 10000baseKR_Full);
209 break;
210 }
211}
212
213static int ixgbe_get_link_ksettings(struct net_device *netdev,
214 struct ethtool_link_ksettings *cmd)
215{
216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
217 struct ixgbe_hw *hw = &adapter->hw;
218 ixgbe_link_speed supported_link;
219 bool autoneg = false;
220
221 ethtool_link_ksettings_zero_link_mode(cmd, supported);
222 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
223
224 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
225
226 /* set the supported link speeds */
227 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
228 ixgbe_set_supported_10gtypes(hw, cmd);
229 ixgbe_set_advertising_10gtypes(hw, cmd);
230 }
231 if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
232 ethtool_link_ksettings_add_link_mode(cmd, supported,
233 5000baseT_Full);
234
235 if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
236 ethtool_link_ksettings_add_link_mode(cmd, supported,
237 2500baseT_Full);
238
239 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
240 if (ixgbe_isbackplane(hw->phy.media_type)) {
241 ethtool_link_ksettings_add_link_mode(cmd, supported,
242 1000baseKX_Full);
243 ethtool_link_ksettings_add_link_mode(cmd, advertising,
244 1000baseKX_Full);
245 } else {
246 ethtool_link_ksettings_add_link_mode(cmd, supported,
247 1000baseT_Full);
248 ethtool_link_ksettings_add_link_mode(cmd, advertising,
249 1000baseT_Full);
250 }
251 }
252 if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
253 ethtool_link_ksettings_add_link_mode(cmd, supported,
254 100baseT_Full);
255 ethtool_link_ksettings_add_link_mode(cmd, advertising,
256 100baseT_Full);
257 }
258 if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
259 ethtool_link_ksettings_add_link_mode(cmd, supported,
260 10baseT_Full);
261 ethtool_link_ksettings_add_link_mode(cmd, advertising,
262 10baseT_Full);
263 }
264
265 /* set the advertised speeds */
266 if (hw->phy.autoneg_advertised) {
267 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
268 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
269 ethtool_link_ksettings_add_link_mode(cmd, advertising,
270 10baseT_Full);
271 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
272 ethtool_link_ksettings_add_link_mode(cmd, advertising,
273 100baseT_Full);
274 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
275 ixgbe_set_advertising_10gtypes(hw, cmd);
276 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
277 if (ethtool_link_ksettings_test_link_mode
278 (cmd, supported, 1000baseKX_Full))
279 ethtool_link_ksettings_add_link_mode
280 (cmd, advertising, 1000baseKX_Full);
281 else
282 ethtool_link_ksettings_add_link_mode
283 (cmd, advertising, 1000baseT_Full);
284 }
285 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
286 ethtool_link_ksettings_add_link_mode(cmd, advertising,
287 5000baseT_Full);
288 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
289 ethtool_link_ksettings_add_link_mode(cmd, advertising,
290 2500baseT_Full);
291 } else {
292 if (hw->phy.multispeed_fiber && !autoneg) {
293 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
294 ethtool_link_ksettings_add_link_mode
295 (cmd, advertising, 10000baseT_Full);
296 }
297 }
298
299 if (autoneg) {
300 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
301 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
302 cmd->base.autoneg = AUTONEG_ENABLE;
303 } else
304 cmd->base.autoneg = AUTONEG_DISABLE;
305
306 /* Determine the remaining settings based on the PHY type. */
307 switch (adapter->hw.phy.type) {
308 case ixgbe_phy_tn:
309 case ixgbe_phy_aq:
310 case ixgbe_phy_x550em_ext_t:
311 case ixgbe_phy_fw:
312 case ixgbe_phy_cu_unknown:
313 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
314 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
315 cmd->base.port = PORT_TP;
316 break;
317 case ixgbe_phy_qt:
318 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
319 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
320 cmd->base.port = PORT_FIBRE;
321 break;
322 case ixgbe_phy_nl:
323 case ixgbe_phy_sfp_passive_tyco:
324 case ixgbe_phy_sfp_passive_unknown:
325 case ixgbe_phy_sfp_ftl:
326 case ixgbe_phy_sfp_avago:
327 case ixgbe_phy_sfp_intel:
328 case ixgbe_phy_sfp_unknown:
329 case ixgbe_phy_qsfp_passive_unknown:
330 case ixgbe_phy_qsfp_active_unknown:
331 case ixgbe_phy_qsfp_intel:
332 case ixgbe_phy_qsfp_unknown:
333 /* SFP+ devices, further checking needed */
334 switch (adapter->hw.phy.sfp_type) {
335 case ixgbe_sfp_type_da_cu:
336 case ixgbe_sfp_type_da_cu_core0:
337 case ixgbe_sfp_type_da_cu_core1:
338 ethtool_link_ksettings_add_link_mode(cmd, supported,
339 FIBRE);
340 ethtool_link_ksettings_add_link_mode(cmd, advertising,
341 FIBRE);
342 cmd->base.port = PORT_DA;
343 break;
344 case ixgbe_sfp_type_sr:
345 case ixgbe_sfp_type_lr:
346 case ixgbe_sfp_type_srlr_core0:
347 case ixgbe_sfp_type_srlr_core1:
348 case ixgbe_sfp_type_1g_sx_core0:
349 case ixgbe_sfp_type_1g_sx_core1:
350 case ixgbe_sfp_type_1g_lx_core0:
351 case ixgbe_sfp_type_1g_lx_core1:
352 case ixgbe_sfp_type_1g_bx_core0:
353 case ixgbe_sfp_type_1g_bx_core1:
354 ethtool_link_ksettings_add_link_mode(cmd, supported,
355 FIBRE);
356 ethtool_link_ksettings_add_link_mode(cmd, advertising,
357 FIBRE);
358 cmd->base.port = PORT_FIBRE;
359 break;
360 case ixgbe_sfp_type_not_present:
361 ethtool_link_ksettings_add_link_mode(cmd, supported,
362 FIBRE);
363 ethtool_link_ksettings_add_link_mode(cmd, advertising,
364 FIBRE);
365 cmd->base.port = PORT_NONE;
366 break;
367 case ixgbe_sfp_type_1g_cu_core0:
368 case ixgbe_sfp_type_1g_cu_core1:
369 ethtool_link_ksettings_add_link_mode(cmd, supported,
370 TP);
371 ethtool_link_ksettings_add_link_mode(cmd, advertising,
372 TP);
373 cmd->base.port = PORT_TP;
374 break;
375 case ixgbe_sfp_type_unknown:
376 default:
377 ethtool_link_ksettings_add_link_mode(cmd, supported,
378 FIBRE);
379 ethtool_link_ksettings_add_link_mode(cmd, advertising,
380 FIBRE);
381 cmd->base.port = PORT_OTHER;
382 break;
383 }
384 break;
385 case ixgbe_phy_xaui:
386 ethtool_link_ksettings_add_link_mode(cmd, supported,
387 FIBRE);
388 ethtool_link_ksettings_add_link_mode(cmd, advertising,
389 FIBRE);
390 cmd->base.port = PORT_NONE;
391 break;
392 case ixgbe_phy_unknown:
393 case ixgbe_phy_generic:
394 case ixgbe_phy_sfp_unsupported:
395 default:
396 ethtool_link_ksettings_add_link_mode(cmd, supported,
397 FIBRE);
398 ethtool_link_ksettings_add_link_mode(cmd, advertising,
399 FIBRE);
400 cmd->base.port = PORT_OTHER;
401 break;
402 }
403
404 /* Indicate pause support */
405 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
406
407 switch (hw->fc.requested_mode) {
408 case ixgbe_fc_full:
409 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
410 break;
411 case ixgbe_fc_rx_pause:
412 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
413 ethtool_link_ksettings_add_link_mode(cmd, advertising,
414 Asym_Pause);
415 break;
416 case ixgbe_fc_tx_pause:
417 ethtool_link_ksettings_add_link_mode(cmd, advertising,
418 Asym_Pause);
419 break;
420 default:
421 ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
422 ethtool_link_ksettings_del_link_mode(cmd, advertising,
423 Asym_Pause);
424 }
425
426 if (netif_carrier_ok(netdev)) {
427 switch (adapter->link_speed) {
428 case IXGBE_LINK_SPEED_10GB_FULL:
429 cmd->base.speed = SPEED_10000;
430 break;
431 case IXGBE_LINK_SPEED_5GB_FULL:
432 cmd->base.speed = SPEED_5000;
433 break;
434 case IXGBE_LINK_SPEED_2_5GB_FULL:
435 cmd->base.speed = SPEED_2500;
436 break;
437 case IXGBE_LINK_SPEED_1GB_FULL:
438 cmd->base.speed = SPEED_1000;
439 break;
440 case IXGBE_LINK_SPEED_100_FULL:
441 cmd->base.speed = SPEED_100;
442 break;
443 case IXGBE_LINK_SPEED_10_FULL:
444 cmd->base.speed = SPEED_10;
445 break;
446 default:
447 break;
448 }
449 cmd->base.duplex = DUPLEX_FULL;
450 } else {
451 cmd->base.speed = SPEED_UNKNOWN;
452 cmd->base.duplex = DUPLEX_UNKNOWN;
453 }
454
455 return 0;
456}
457
458static int ixgbe_set_link_ksettings(struct net_device *netdev,
459 const struct ethtool_link_ksettings *cmd)
460{
461 struct ixgbe_adapter *adapter = netdev_priv(netdev);
462 struct ixgbe_hw *hw = &adapter->hw;
463 u32 advertised, old;
464 int err = 0;
465
466 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
467 (hw->phy.multispeed_fiber)) {
468 /*
469 * this function does not support duplex forcing, but can
470 * limit the advertising of the adapter to the specified speed
471 */
472 if (!linkmode_subset(cmd->link_modes.advertising,
473 cmd->link_modes.supported))
474 return -EINVAL;
475
476 /* only allow one speed at a time if no autoneg */
477 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
478 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
479 10000baseT_Full) &&
480 ethtool_link_ksettings_test_link_mode(cmd, advertising,
481 1000baseT_Full))
482 return -EINVAL;
483 }
484
485 old = hw->phy.autoneg_advertised;
486 advertised = 0;
487 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
488 10000baseT_Full))
489 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
490 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
491 5000baseT_Full))
492 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
493 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
494 2500baseT_Full))
495 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
496 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
497 1000baseT_Full))
498 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
499
500 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
501 100baseT_Full))
502 advertised |= IXGBE_LINK_SPEED_100_FULL;
503
504 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
505 10baseT_Full))
506 advertised |= IXGBE_LINK_SPEED_10_FULL;
507
508 if (old == advertised)
509 return err;
510 /* this sets the link speed and restarts auto-neg */
511 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
512 usleep_range(1000, 2000);
513
514 hw->mac.autotry_restart = true;
515 err = hw->mac.ops.setup_link(hw, advertised, true);
516 if (err) {
517 e_info(probe, "setup link failed with code %d\n", err);
518 hw->mac.ops.setup_link(hw, old, true);
519 }
520 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
521 } else {
522 /* in this case we currently only support 10Gb/FULL */
523 u32 speed = cmd->base.speed;
524
525 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
526 (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
527 10000baseT_Full)) ||
528 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
529 return -EINVAL;
530 }
531
532 return err;
533}
534
535static void ixgbe_get_pause_stats(struct net_device *netdev,
536 struct ethtool_pause_stats *stats)
537{
538 struct ixgbe_adapter *adapter = netdev_priv(netdev);
539 struct ixgbe_hw_stats *hwstats = &adapter->stats;
540
541 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
542 stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
543}
544
545static void ixgbe_get_pauseparam(struct net_device *netdev,
546 struct ethtool_pauseparam *pause)
547{
548 struct ixgbe_adapter *adapter = netdev_priv(netdev);
549 struct ixgbe_hw *hw = &adapter->hw;
550
551 if (ixgbe_device_supports_autoneg_fc(hw) &&
552 !hw->fc.disable_fc_autoneg)
553 pause->autoneg = 1;
554 else
555 pause->autoneg = 0;
556
557 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
558 pause->rx_pause = 1;
559 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
560 pause->tx_pause = 1;
561 } else if (hw->fc.current_mode == ixgbe_fc_full) {
562 pause->rx_pause = 1;
563 pause->tx_pause = 1;
564 }
565}
566
567static int ixgbe_set_pauseparam(struct net_device *netdev,
568 struct ethtool_pauseparam *pause)
569{
570 struct ixgbe_adapter *adapter = netdev_priv(netdev);
571 struct ixgbe_hw *hw = &adapter->hw;
572 struct ixgbe_fc_info fc = hw->fc;
573
574 /* 82598 does no support link flow control with DCB enabled */
575 if ((hw->mac.type == ixgbe_mac_82598EB) &&
576 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
577 return -EINVAL;
578
579 /* some devices do not support autoneg of link flow control */
580 if ((pause->autoneg == AUTONEG_ENABLE) &&
581 !ixgbe_device_supports_autoneg_fc(hw))
582 return -EINVAL;
583
584 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
585
586 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
587 fc.requested_mode = ixgbe_fc_full;
588 else if (pause->rx_pause && !pause->tx_pause)
589 fc.requested_mode = ixgbe_fc_rx_pause;
590 else if (!pause->rx_pause && pause->tx_pause)
591 fc.requested_mode = ixgbe_fc_tx_pause;
592 else
593 fc.requested_mode = ixgbe_fc_none;
594
595 /* if the thing changed then we'll update and use new autoneg */
596 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
597 hw->fc = fc;
598 if (netif_running(netdev))
599 ixgbe_reinit_locked(adapter);
600 else
601 ixgbe_reset(adapter);
602 }
603
604 return 0;
605}
606
607static u32 ixgbe_get_msglevel(struct net_device *netdev)
608{
609 struct ixgbe_adapter *adapter = netdev_priv(netdev);
610 return adapter->msg_enable;
611}
612
613static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
614{
615 struct ixgbe_adapter *adapter = netdev_priv(netdev);
616 adapter->msg_enable = data;
617}
618
619static int ixgbe_get_regs_len(struct net_device *netdev)
620{
621#define IXGBE_REGS_LEN 1145
622 return IXGBE_REGS_LEN * sizeof(u32);
623}
624
625#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
626
627static void ixgbe_get_regs(struct net_device *netdev,
628 struct ethtool_regs *regs, void *p)
629{
630 struct ixgbe_adapter *adapter = netdev_priv(netdev);
631 struct ixgbe_hw *hw = &adapter->hw;
632 u32 *regs_buff = p;
633 u8 i;
634
635 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
636
637 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
638 hw->device_id;
639
640 /* General Registers */
641 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
642 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
643 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
644 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
645 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
646 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
647 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
648 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
649
650 /* NVM Register */
651 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
652 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
653 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
654 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
655 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
656 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
657 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
658 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
659 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
660 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
661
662 /* Interrupt */
663 /* don't read EICR because it can clear interrupt causes, instead
664 * read EICS which is a shadow but doesn't clear EICR */
665 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
666 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
667 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
668 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
669 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
670 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
671 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
672 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
673 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
674 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
675 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
676 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
677
678 /* Flow Control */
679 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
680 for (i = 0; i < 4; i++)
681 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
682 for (i = 0; i < 8; i++) {
683 switch (hw->mac.type) {
684 case ixgbe_mac_82598EB:
685 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
686 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
687 break;
688 case ixgbe_mac_82599EB:
689 case ixgbe_mac_X540:
690 case ixgbe_mac_X550:
691 case ixgbe_mac_X550EM_x:
692 case ixgbe_mac_x550em_a:
693 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
694 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
695 break;
696 default:
697 break;
698 }
699 }
700 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
701 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
702
703 /* Receive DMA */
704 for (i = 0; i < 64; i++)
705 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
706 for (i = 0; i < 64; i++)
707 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
708 for (i = 0; i < 64; i++)
709 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
710 for (i = 0; i < 64; i++)
711 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
712 for (i = 0; i < 64; i++)
713 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
714 for (i = 0; i < 64; i++)
715 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
716 for (i = 0; i < 16; i++)
717 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
718 for (i = 0; i < 16; i++)
719 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
720 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
721 for (i = 0; i < 8; i++)
722 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
723 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
724 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
725
726 /* Receive */
727 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
728 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
729 for (i = 0; i < 16; i++)
730 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
731 for (i = 0; i < 16; i++)
732 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
733 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
734 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
735 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
736 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
737 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
738 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
739 for (i = 0; i < 8; i++)
740 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
741 for (i = 0; i < 8; i++)
742 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
743 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
744
745 /* Transmit */
746 for (i = 0; i < 32; i++)
747 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
748 for (i = 0; i < 32; i++)
749 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
750 for (i = 0; i < 32; i++)
751 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
752 for (i = 0; i < 32; i++)
753 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
754 for (i = 0; i < 32; i++)
755 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
756 for (i = 0; i < 32; i++)
757 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
758 for (i = 0; i < 32; i++)
759 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
760 for (i = 0; i < 32; i++)
761 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
762 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
763 for (i = 0; i < 16; i++)
764 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
765 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
766 for (i = 0; i < 8; i++)
767 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
768 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
769
770 /* Wake Up */
771 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
772 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
773 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
774 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
775 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
776 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
777 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
778 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
779 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
780
781 /* DCB */
782 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
783 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
784
785 switch (hw->mac.type) {
786 case ixgbe_mac_82598EB:
787 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
788 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
789 for (i = 0; i < 8; i++)
790 regs_buff[833 + i] =
791 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
792 for (i = 0; i < 8; i++)
793 regs_buff[841 + i] =
794 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
795 for (i = 0; i < 8; i++)
796 regs_buff[849 + i] =
797 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
798 for (i = 0; i < 8; i++)
799 regs_buff[857 + i] =
800 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
801 break;
802 case ixgbe_mac_82599EB:
803 case ixgbe_mac_X540:
804 case ixgbe_mac_X550:
805 case ixgbe_mac_X550EM_x:
806 case ixgbe_mac_x550em_a:
807 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
808 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
809 for (i = 0; i < 8; i++)
810 regs_buff[833 + i] =
811 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
812 for (i = 0; i < 8; i++)
813 regs_buff[841 + i] =
814 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
815 for (i = 0; i < 8; i++)
816 regs_buff[849 + i] =
817 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
818 for (i = 0; i < 8; i++)
819 regs_buff[857 + i] =
820 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
821 break;
822 default:
823 break;
824 }
825
826 for (i = 0; i < 8; i++)
827 regs_buff[865 + i] =
828 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
829 for (i = 0; i < 8; i++)
830 regs_buff[873 + i] =
831 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
832
833 /* Statistics */
834 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
835 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
836 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
837 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
838 for (i = 0; i < 8; i++)
839 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
840 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
841 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
842 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
843 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
844 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
845 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
846 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
847 for (i = 0; i < 8; i++)
848 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
849 for (i = 0; i < 8; i++)
850 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
851 for (i = 0; i < 8; i++)
852 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
853 for (i = 0; i < 8; i++)
854 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
855 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
856 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
857 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
858 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
859 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
860 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
861 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
862 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
863 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
864 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
865 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
866 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
867 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
868 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
869 for (i = 0; i < 8; i++)
870 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
871 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
872 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
873 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
874 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
875 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
876 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
877 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
878 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
879 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
880 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
881 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
882 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
883 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
884 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
885 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
886 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
887 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
888 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
889 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
890 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
891 for (i = 0; i < 16; i++)
892 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
893 for (i = 0; i < 16; i++)
894 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
895 for (i = 0; i < 16; i++)
896 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
897 for (i = 0; i < 16; i++)
898 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
899
900 /* MAC */
901 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
902 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
903 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
904 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
905 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
906 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
907 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
908 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
909 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
910 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
911 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
912 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
913 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
914 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
915 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
916 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
917 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
918 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
919 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
920 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
921 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
922 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
923 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
924 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
925 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
926 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
927 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
928 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
929 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
930 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
931 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
932 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
933 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
934
935 /* Diagnostic */
936 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
937 for (i = 0; i < 8; i++)
938 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
939 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
940 for (i = 0; i < 4; i++)
941 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
942 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
943 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
944 for (i = 0; i < 8; i++)
945 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
946 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
947 for (i = 0; i < 4; i++)
948 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
949 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
950 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
951 for (i = 0; i < 4; i++)
952 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
953 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
954 for (i = 0; i < 4; i++)
955 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
956 for (i = 0; i < 8; i++)
957 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
958 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
959 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
960 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
961 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
962 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
963 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
964 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
965 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
966 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
967
968 /* 82599 X540 specific registers */
969 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
970
971 /* 82599 X540 specific DCB registers */
972 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
973 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
974 for (i = 0; i < 4; i++)
975 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
976 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
977 /* same as RTTQCNRM */
978 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
979 /* same as RTTQCNRR */
980
981 /* X540 specific DCB registers */
982 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
983 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
984
985 /* Security config registers */
986 regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
987 regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
988 regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
989 regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
990 regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
991 regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
992}
993
994static int ixgbe_get_eeprom_len(struct net_device *netdev)
995{
996 struct ixgbe_adapter *adapter = netdev_priv(netdev);
997 return adapter->hw.eeprom.word_size * 2;
998}
999
1000static int ixgbe_get_eeprom(struct net_device *netdev,
1001 struct ethtool_eeprom *eeprom, u8 *bytes)
1002{
1003 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1004 struct ixgbe_hw *hw = &adapter->hw;
1005 u16 *eeprom_buff;
1006 int first_word, last_word, eeprom_len;
1007 int ret_val = 0;
1008 u16 i;
1009
1010 if (eeprom->len == 0)
1011 return -EINVAL;
1012
1013 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1014
1015 first_word = eeprom->offset >> 1;
1016 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1017 eeprom_len = last_word - first_word + 1;
1018
1019 eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
1020 if (!eeprom_buff)
1021 return -ENOMEM;
1022
1023 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
1024 eeprom_buff);
1025
1026 /* Device's eeprom is always little-endian, word addressable */
1027 for (i = 0; i < eeprom_len; i++)
1028 le16_to_cpus(&eeprom_buff[i]);
1029
1030 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1031 kfree(eeprom_buff);
1032
1033 return ret_val;
1034}
1035
1036static int ixgbe_set_eeprom(struct net_device *netdev,
1037 struct ethtool_eeprom *eeprom, u8 *bytes)
1038{
1039 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1040 struct ixgbe_hw *hw = &adapter->hw;
1041 u16 *eeprom_buff;
1042 void *ptr;
1043 int max_len, first_word, last_word, ret_val = 0;
1044 u16 i;
1045
1046 if (eeprom->len == 0)
1047 return -EINVAL;
1048
1049 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1050 return -EINVAL;
1051
1052 max_len = hw->eeprom.word_size * 2;
1053
1054 first_word = eeprom->offset >> 1;
1055 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1056 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1057 if (!eeprom_buff)
1058 return -ENOMEM;
1059
1060 ptr = eeprom_buff;
1061
1062 if (eeprom->offset & 1) {
1063 /*
1064 * need read/modify/write of first changed EEPROM word
1065 * only the second byte of the word is being modified
1066 */
1067 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
1068 if (ret_val)
1069 goto err;
1070
1071 ptr++;
1072 }
1073 if ((eeprom->offset + eeprom->len) & 1) {
1074 /*
1075 * need read/modify/write of last changed EEPROM word
1076 * only the first byte of the word is being modified
1077 */
1078 ret_val = hw->eeprom.ops.read(hw, last_word,
1079 &eeprom_buff[last_word - first_word]);
1080 if (ret_val)
1081 goto err;
1082 }
1083
1084 /* Device's eeprom is always little-endian, word addressable */
1085 for (i = 0; i < last_word - first_word + 1; i++)
1086 le16_to_cpus(&eeprom_buff[i]);
1087
1088 memcpy(ptr, bytes, eeprom->len);
1089
1090 for (i = 0; i < last_word - first_word + 1; i++)
1091 cpu_to_le16s(&eeprom_buff[i]);
1092
1093 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1094 last_word - first_word + 1,
1095 eeprom_buff);
1096
1097 /* Update the checksum */
1098 if (ret_val == 0)
1099 hw->eeprom.ops.update_checksum(hw);
1100
1101err:
1102 kfree(eeprom_buff);
1103 return ret_val;
1104}
1105
1106static void ixgbe_get_drvinfo(struct net_device *netdev,
1107 struct ethtool_drvinfo *drvinfo)
1108{
1109 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1110
1111 strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1112
1113 strscpy(drvinfo->fw_version, adapter->eeprom_id,
1114 sizeof(drvinfo->fw_version));
1115
1116 strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
1117 sizeof(drvinfo->bus_info));
1118
1119 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1120}
1121
1122static u32 ixgbe_get_max_rxd(struct ixgbe_adapter *adapter)
1123{
1124 switch (adapter->hw.mac.type) {
1125 case ixgbe_mac_82598EB:
1126 return IXGBE_MAX_RXD_82598;
1127 case ixgbe_mac_82599EB:
1128 return IXGBE_MAX_RXD_82599;
1129 case ixgbe_mac_X540:
1130 return IXGBE_MAX_RXD_X540;
1131 case ixgbe_mac_X550:
1132 case ixgbe_mac_X550EM_x:
1133 case ixgbe_mac_x550em_a:
1134 return IXGBE_MAX_RXD_X550;
1135 default:
1136 return IXGBE_MAX_RXD_82598;
1137 }
1138}
1139
1140static u32 ixgbe_get_max_txd(struct ixgbe_adapter *adapter)
1141{
1142 switch (adapter->hw.mac.type) {
1143 case ixgbe_mac_82598EB:
1144 return IXGBE_MAX_TXD_82598;
1145 case ixgbe_mac_82599EB:
1146 return IXGBE_MAX_TXD_82599;
1147 case ixgbe_mac_X540:
1148 return IXGBE_MAX_TXD_X540;
1149 case ixgbe_mac_X550:
1150 case ixgbe_mac_X550EM_x:
1151 case ixgbe_mac_x550em_a:
1152 return IXGBE_MAX_TXD_X550;
1153 default:
1154 return IXGBE_MAX_TXD_82598;
1155 }
1156}
1157
1158static void ixgbe_get_ringparam(struct net_device *netdev,
1159 struct ethtool_ringparam *ring,
1160 struct kernel_ethtool_ringparam *kernel_ring,
1161 struct netlink_ext_ack *extack)
1162{
1163 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1164 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1165 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1166
1167 ring->rx_max_pending = ixgbe_get_max_rxd(adapter);
1168 ring->tx_max_pending = ixgbe_get_max_txd(adapter);
1169 ring->rx_pending = rx_ring->count;
1170 ring->tx_pending = tx_ring->count;
1171}
1172
1173static int ixgbe_set_ringparam(struct net_device *netdev,
1174 struct ethtool_ringparam *ring,
1175 struct kernel_ethtool_ringparam *kernel_ring,
1176 struct netlink_ext_ack *extack)
1177{
1178 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1179 struct ixgbe_ring *temp_ring;
1180 int i, j, err = 0;
1181 u32 new_rx_count, new_tx_count;
1182
1183 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1184 return -EINVAL;
1185
1186 new_tx_count = clamp_t(u32, ring->tx_pending,
1187 IXGBE_MIN_TXD, ixgbe_get_max_txd(adapter));
1188 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1189
1190 new_rx_count = clamp_t(u32, ring->rx_pending,
1191 IXGBE_MIN_RXD, ixgbe_get_max_rxd(adapter));
1192 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1193
1194 if ((new_tx_count == adapter->tx_ring_count) &&
1195 (new_rx_count == adapter->rx_ring_count)) {
1196 /* nothing to do */
1197 return 0;
1198 }
1199
1200 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1201 usleep_range(1000, 2000);
1202
1203 if (!netif_running(adapter->netdev)) {
1204 for (i = 0; i < adapter->num_tx_queues; i++)
1205 adapter->tx_ring[i]->count = new_tx_count;
1206 for (i = 0; i < adapter->num_xdp_queues; i++)
1207 adapter->xdp_ring[i]->count = new_tx_count;
1208 for (i = 0; i < adapter->num_rx_queues; i++)
1209 adapter->rx_ring[i]->count = new_rx_count;
1210 adapter->tx_ring_count = new_tx_count;
1211 adapter->xdp_ring_count = new_tx_count;
1212 adapter->rx_ring_count = new_rx_count;
1213 goto clear_reset;
1214 }
1215
1216 /* allocate temporary buffer to store rings in */
1217 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1218 adapter->num_rx_queues);
1219 temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
1220
1221 if (!temp_ring) {
1222 err = -ENOMEM;
1223 goto clear_reset;
1224 }
1225
1226 ixgbe_down(adapter);
1227
1228 /*
1229 * Setup new Tx resources and free the old Tx resources in that order.
1230 * We can then assign the new resources to the rings via a memcpy.
1231 * The advantage to this approach is that we are guaranteed to still
1232 * have resources even in the case of an allocation failure.
1233 */
1234 if (new_tx_count != adapter->tx_ring_count) {
1235 for (i = 0; i < adapter->num_tx_queues; i++) {
1236 memcpy(&temp_ring[i], adapter->tx_ring[i],
1237 sizeof(struct ixgbe_ring));
1238
1239 temp_ring[i].count = new_tx_count;
1240 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1241 if (err) {
1242 while (i) {
1243 i--;
1244 ixgbe_free_tx_resources(&temp_ring[i]);
1245 }
1246 goto err_setup;
1247 }
1248 }
1249
1250 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1251 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1252 sizeof(struct ixgbe_ring));
1253
1254 temp_ring[i].count = new_tx_count;
1255 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1256 if (err) {
1257 while (i) {
1258 i--;
1259 ixgbe_free_tx_resources(&temp_ring[i]);
1260 }
1261 goto err_setup;
1262 }
1263 }
1264
1265 for (i = 0; i < adapter->num_tx_queues; i++) {
1266 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1267
1268 memcpy(adapter->tx_ring[i], &temp_ring[i],
1269 sizeof(struct ixgbe_ring));
1270 }
1271 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1272 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1273
1274 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1275 sizeof(struct ixgbe_ring));
1276 }
1277
1278 adapter->tx_ring_count = new_tx_count;
1279 }
1280
1281 /* Repeat the process for the Rx rings if needed */
1282 if (new_rx_count != adapter->rx_ring_count) {
1283 for (i = 0; i < adapter->num_rx_queues; i++) {
1284 memcpy(&temp_ring[i], adapter->rx_ring[i],
1285 sizeof(struct ixgbe_ring));
1286
1287 /* Clear copied XDP RX-queue info */
1288 memset(&temp_ring[i].xdp_rxq, 0,
1289 sizeof(temp_ring[i].xdp_rxq));
1290
1291 temp_ring[i].count = new_rx_count;
1292 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1293 if (err) {
1294 while (i) {
1295 i--;
1296 ixgbe_free_rx_resources(&temp_ring[i]);
1297 }
1298 goto err_setup;
1299 }
1300
1301 }
1302
1303 for (i = 0; i < adapter->num_rx_queues; i++) {
1304 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1305
1306 memcpy(adapter->rx_ring[i], &temp_ring[i],
1307 sizeof(struct ixgbe_ring));
1308 }
1309
1310 adapter->rx_ring_count = new_rx_count;
1311 }
1312
1313err_setup:
1314 ixgbe_up(adapter);
1315 vfree(temp_ring);
1316clear_reset:
1317 clear_bit(__IXGBE_RESETTING, &adapter->state);
1318 return err;
1319}
1320
1321static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1322{
1323 switch (sset) {
1324 case ETH_SS_TEST:
1325 return IXGBE_TEST_LEN;
1326 case ETH_SS_STATS:
1327 return IXGBE_STATS_LEN;
1328 case ETH_SS_PRIV_FLAGS:
1329 return IXGBE_PRIV_FLAGS_STR_LEN;
1330 default:
1331 return -EOPNOTSUPP;
1332 }
1333}
1334
1335static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1336 struct ethtool_stats *stats, u64 *data)
1337{
1338 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1339 struct rtnl_link_stats64 temp;
1340 const struct rtnl_link_stats64 *net_stats;
1341 unsigned int start;
1342 struct ixgbe_ring *ring;
1343 int i, j;
1344 char *p = NULL;
1345
1346 ixgbe_update_stats(adapter);
1347 net_stats = dev_get_stats(netdev, &temp);
1348 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1349 switch (ixgbe_gstrings_stats[i].type) {
1350 case NETDEV_STATS:
1351 p = (char *) net_stats +
1352 ixgbe_gstrings_stats[i].stat_offset;
1353 break;
1354 case IXGBE_STATS:
1355 p = (char *) adapter +
1356 ixgbe_gstrings_stats[i].stat_offset;
1357 break;
1358 default:
1359 data[i] = 0;
1360 continue;
1361 }
1362
1363 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1364 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1365 }
1366 for (j = 0; j < netdev->num_tx_queues; j++) {
1367 ring = adapter->tx_ring[j];
1368 if (!ring) {
1369 data[i] = 0;
1370 data[i+1] = 0;
1371 i += 2;
1372 continue;
1373 }
1374
1375 do {
1376 start = u64_stats_fetch_begin(&ring->syncp);
1377 data[i] = ring->stats.packets;
1378 data[i+1] = ring->stats.bytes;
1379 } while (u64_stats_fetch_retry(&ring->syncp, start));
1380 i += 2;
1381 }
1382 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1383 ring = adapter->rx_ring[j];
1384 if (!ring) {
1385 data[i] = 0;
1386 data[i+1] = 0;
1387 i += 2;
1388 continue;
1389 }
1390
1391 do {
1392 start = u64_stats_fetch_begin(&ring->syncp);
1393 data[i] = ring->stats.packets;
1394 data[i+1] = ring->stats.bytes;
1395 } while (u64_stats_fetch_retry(&ring->syncp, start));
1396 i += 2;
1397 }
1398
1399 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1400 data[i++] = adapter->stats.pxontxc[j];
1401 data[i++] = adapter->stats.pxofftxc[j];
1402 }
1403 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1404 data[i++] = adapter->stats.pxonrxc[j];
1405 data[i++] = adapter->stats.pxoffrxc[j];
1406 }
1407}
1408
1409static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1410 u8 *data)
1411{
1412 unsigned int i;
1413 u8 *p = data;
1414
1415 switch (stringset) {
1416 case ETH_SS_TEST:
1417 for (i = 0; i < IXGBE_TEST_LEN; i++)
1418 ethtool_puts(&p, ixgbe_gstrings_test[i]);
1419 break;
1420 case ETH_SS_STATS:
1421 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
1422 ethtool_puts(&p, ixgbe_gstrings_stats[i].stat_string);
1423 for (i = 0; i < netdev->num_tx_queues; i++) {
1424 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1425 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1426 }
1427 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1428 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1429 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1430 }
1431 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1432 ethtool_sprintf(&p, "tx_pb_%u_pxon", i);
1433 ethtool_sprintf(&p, "tx_pb_%u_pxoff", i);
1434 }
1435 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1436 ethtool_sprintf(&p, "rx_pb_%u_pxon", i);
1437 ethtool_sprintf(&p, "rx_pb_%u_pxoff", i);
1438 }
1439 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1440 break;
1441 case ETH_SS_PRIV_FLAGS:
1442 memcpy(data, ixgbe_priv_flags_strings,
1443 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1444 }
1445}
1446
1447static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1448{
1449 struct ixgbe_hw *hw = &adapter->hw;
1450 bool link_up;
1451 u32 link_speed = 0;
1452
1453 if (ixgbe_removed(hw->hw_addr)) {
1454 *data = 1;
1455 return 1;
1456 }
1457 *data = 0;
1458
1459 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1460 if (link_up)
1461 return *data;
1462 else
1463 *data = 1;
1464 return *data;
1465}
1466
1467/* ethtool register test data */
1468struct ixgbe_reg_test {
1469 u16 reg;
1470 u8 array_len;
1471 u8 test_type;
1472 u32 mask;
1473 u32 write;
1474};
1475
1476/* In the hardware, registers are laid out either singly, in arrays
1477 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1478 * most tests take place on arrays or single registers (handled
1479 * as a single-element array) and special-case the tables.
1480 * Table tests are always pattern tests.
1481 *
1482 * We also make provision for some required setup steps by specifying
1483 * registers to be written without any read-back testing.
1484 */
1485
1486#define PATTERN_TEST 1
1487#define SET_READ_TEST 2
1488#define WRITE_NO_TEST 3
1489#define TABLE32_TEST 4
1490#define TABLE64_TEST_LO 5
1491#define TABLE64_TEST_HI 6
1492
1493/* default 82599 register test */
1494static const struct ixgbe_reg_test reg_test_82599[] = {
1495 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1496 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1497 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1498 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1499 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1500 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1501 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1502 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1503 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1504 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1505 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1506 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1507 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1508 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1509 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1510 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1511 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1512 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1513 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1514 { .reg = 0 }
1515};
1516
1517/* default 82598 register test */
1518static const struct ixgbe_reg_test reg_test_82598[] = {
1519 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1520 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1521 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1522 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1523 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1524 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1525 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1526 /* Enable all four RX queues before testing. */
1527 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1528 /* RDH is read-only for 82598, only test RDT. */
1529 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1530 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1531 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1532 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1533 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1534 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1535 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1536 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1537 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1538 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1539 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1540 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1541 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1542 { .reg = 0 }
1543};
1544
1545static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1546 u32 mask, u32 write)
1547{
1548 u32 pat, val, before;
1549 static const u32 test_pattern[] = {
1550 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1551
1552 if (ixgbe_removed(adapter->hw.hw_addr)) {
1553 *data = 1;
1554 return true;
1555 }
1556 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1557 before = ixgbe_read_reg(&adapter->hw, reg);
1558 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1559 val = ixgbe_read_reg(&adapter->hw, reg);
1560 if (val != (test_pattern[pat] & write & mask)) {
1561 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1562 reg, val, (test_pattern[pat] & write & mask));
1563 *data = reg;
1564 ixgbe_write_reg(&adapter->hw, reg, before);
1565 return true;
1566 }
1567 ixgbe_write_reg(&adapter->hw, reg, before);
1568 }
1569 return false;
1570}
1571
1572static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1573 u32 mask, u32 write)
1574{
1575 u32 val, before;
1576
1577 if (ixgbe_removed(adapter->hw.hw_addr)) {
1578 *data = 1;
1579 return true;
1580 }
1581 before = ixgbe_read_reg(&adapter->hw, reg);
1582 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1583 val = ixgbe_read_reg(&adapter->hw, reg);
1584 if ((write & mask) != (val & mask)) {
1585 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1586 reg, (val & mask), (write & mask));
1587 *data = reg;
1588 ixgbe_write_reg(&adapter->hw, reg, before);
1589 return true;
1590 }
1591 ixgbe_write_reg(&adapter->hw, reg, before);
1592 return false;
1593}
1594
1595static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1596{
1597 const struct ixgbe_reg_test *test;
1598 u32 value, before, after;
1599 u32 i, toggle;
1600
1601 if (ixgbe_removed(adapter->hw.hw_addr)) {
1602 e_err(drv, "Adapter removed - register test blocked\n");
1603 *data = 1;
1604 return 1;
1605 }
1606 switch (adapter->hw.mac.type) {
1607 case ixgbe_mac_82598EB:
1608 toggle = 0x7FFFF3FF;
1609 test = reg_test_82598;
1610 break;
1611 case ixgbe_mac_82599EB:
1612 case ixgbe_mac_X540:
1613 case ixgbe_mac_X550:
1614 case ixgbe_mac_X550EM_x:
1615 case ixgbe_mac_x550em_a:
1616 toggle = 0x7FFFF30F;
1617 test = reg_test_82599;
1618 break;
1619 default:
1620 *data = 1;
1621 return 1;
1622 }
1623
1624 /*
1625 * Because the status register is such a special case,
1626 * we handle it separately from the rest of the register
1627 * tests. Some bits are read-only, some toggle, and some
1628 * are writeable on newer MACs.
1629 */
1630 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1631 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1632 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1633 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1634 if (value != after) {
1635 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1636 after, value);
1637 *data = 1;
1638 return 1;
1639 }
1640 /* restore previous status */
1641 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1642
1643 /*
1644 * Perform the remainder of the register test, looping through
1645 * the test table until we either fail or reach the null entry.
1646 */
1647 while (test->reg) {
1648 for (i = 0; i < test->array_len; i++) {
1649 bool b = false;
1650
1651 switch (test->test_type) {
1652 case PATTERN_TEST:
1653 b = reg_pattern_test(adapter, data,
1654 test->reg + (i * 0x40),
1655 test->mask,
1656 test->write);
1657 break;
1658 case SET_READ_TEST:
1659 b = reg_set_and_check(adapter, data,
1660 test->reg + (i * 0x40),
1661 test->mask,
1662 test->write);
1663 break;
1664 case WRITE_NO_TEST:
1665 ixgbe_write_reg(&adapter->hw,
1666 test->reg + (i * 0x40),
1667 test->write);
1668 break;
1669 case TABLE32_TEST:
1670 b = reg_pattern_test(adapter, data,
1671 test->reg + (i * 4),
1672 test->mask,
1673 test->write);
1674 break;
1675 case TABLE64_TEST_LO:
1676 b = reg_pattern_test(adapter, data,
1677 test->reg + (i * 8),
1678 test->mask,
1679 test->write);
1680 break;
1681 case TABLE64_TEST_HI:
1682 b = reg_pattern_test(adapter, data,
1683 (test->reg + 4) + (i * 8),
1684 test->mask,
1685 test->write);
1686 break;
1687 }
1688 if (b)
1689 return 1;
1690 }
1691 test++;
1692 }
1693
1694 *data = 0;
1695 return 0;
1696}
1697
1698static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1699{
1700 struct ixgbe_hw *hw = &adapter->hw;
1701 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1702 *data = 1;
1703 else
1704 *data = 0;
1705 return *data;
1706}
1707
1708static irqreturn_t ixgbe_test_intr(int irq, void *data)
1709{
1710 struct net_device *netdev = (struct net_device *) data;
1711 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1712
1713 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1714
1715 return IRQ_HANDLED;
1716}
1717
1718static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1719{
1720 struct net_device *netdev = adapter->netdev;
1721 u32 mask, i = 0, shared_int = true;
1722 u32 irq = adapter->pdev->irq;
1723
1724 *data = 0;
1725
1726 /* Hook up test interrupt handler just for this test */
1727 if (adapter->msix_entries) {
1728 /* NOTE: we don't test MSI-X interrupts here, yet */
1729 return 0;
1730 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1731 shared_int = false;
1732 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1733 netdev)) {
1734 *data = 1;
1735 return -1;
1736 }
1737 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1738 netdev->name, netdev)) {
1739 shared_int = false;
1740 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1741 netdev->name, netdev)) {
1742 *data = 1;
1743 return -1;
1744 }
1745 e_info(hw, "testing %s interrupt\n", shared_int ?
1746 "shared" : "unshared");
1747
1748 /* Disable all the interrupts */
1749 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1750 IXGBE_WRITE_FLUSH(&adapter->hw);
1751 usleep_range(10000, 20000);
1752
1753 /* Test each interrupt */
1754 for (; i < 10; i++) {
1755 /* Interrupt to test */
1756 mask = BIT(i);
1757
1758 if (!shared_int) {
1759 /*
1760 * Disable the interrupts to be reported in
1761 * the cause register and then force the same
1762 * interrupt and see if one gets posted. If
1763 * an interrupt was posted to the bus, the
1764 * test failed.
1765 */
1766 adapter->test_icr = 0;
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1768 ~mask & 0x00007FFF);
1769 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1770 ~mask & 0x00007FFF);
1771 IXGBE_WRITE_FLUSH(&adapter->hw);
1772 usleep_range(10000, 20000);
1773
1774 if (adapter->test_icr & mask) {
1775 *data = 3;
1776 break;
1777 }
1778 }
1779
1780 /*
1781 * Enable the interrupt to be reported in the cause
1782 * register and then force the same interrupt and see
1783 * if one gets posted. If an interrupt was not posted
1784 * to the bus, the test failed.
1785 */
1786 adapter->test_icr = 0;
1787 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1789 IXGBE_WRITE_FLUSH(&adapter->hw);
1790 usleep_range(10000, 20000);
1791
1792 if (!(adapter->test_icr & mask)) {
1793 *data = 4;
1794 break;
1795 }
1796
1797 if (!shared_int) {
1798 /*
1799 * Disable the other interrupts to be reported in
1800 * the cause register and then force the other
1801 * interrupts and see if any get posted. If
1802 * an interrupt was posted to the bus, the
1803 * test failed.
1804 */
1805 adapter->test_icr = 0;
1806 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1807 ~mask & 0x00007FFF);
1808 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1809 ~mask & 0x00007FFF);
1810 IXGBE_WRITE_FLUSH(&adapter->hw);
1811 usleep_range(10000, 20000);
1812
1813 if (adapter->test_icr) {
1814 *data = 5;
1815 break;
1816 }
1817 }
1818 }
1819
1820 /* Disable all the interrupts */
1821 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1822 IXGBE_WRITE_FLUSH(&adapter->hw);
1823 usleep_range(10000, 20000);
1824
1825 /* Unhook test interrupt handler */
1826 free_irq(irq, netdev);
1827
1828 return *data;
1829}
1830
1831static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1832{
1833 /* Shut down the DMA engines now so they can be reinitialized later,
1834 * since the test rings and normally used rings should overlap on
1835 * queue 0 we can just use the standard disable Rx/Tx calls and they
1836 * will take care of disabling the test rings for us.
1837 */
1838
1839 /* first Rx */
1840 ixgbe_disable_rx(adapter);
1841
1842 /* now Tx */
1843 ixgbe_disable_tx(adapter);
1844
1845 ixgbe_reset(adapter);
1846
1847 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1848 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1849}
1850
1851static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1852{
1853 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1854 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1855 struct ixgbe_hw *hw = &adapter->hw;
1856 u32 rctl, reg_data;
1857 int ret_val;
1858 int err;
1859
1860 /* Setup Tx descriptor ring and Tx buffers */
1861 tx_ring->count = IXGBE_DEFAULT_TXD;
1862 tx_ring->queue_index = 0;
1863 tx_ring->dev = &adapter->pdev->dev;
1864 tx_ring->netdev = adapter->netdev;
1865 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1866
1867 err = ixgbe_setup_tx_resources(tx_ring);
1868 if (err)
1869 return 1;
1870
1871 switch (adapter->hw.mac.type) {
1872 case ixgbe_mac_82599EB:
1873 case ixgbe_mac_X540:
1874 case ixgbe_mac_X550:
1875 case ixgbe_mac_X550EM_x:
1876 case ixgbe_mac_x550em_a:
1877 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1878 reg_data |= IXGBE_DMATXCTL_TE;
1879 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1880 break;
1881 default:
1882 break;
1883 }
1884
1885 ixgbe_configure_tx_ring(adapter, tx_ring);
1886
1887 /* Setup Rx Descriptor ring and Rx buffers */
1888 rx_ring->count = IXGBE_DEFAULT_RXD;
1889 rx_ring->queue_index = 0;
1890 rx_ring->dev = &adapter->pdev->dev;
1891 rx_ring->netdev = adapter->netdev;
1892 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1893
1894 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1895 if (err) {
1896 ret_val = 4;
1897 goto err_nomem;
1898 }
1899
1900 hw->mac.ops.disable_rx(hw);
1901
1902 ixgbe_configure_rx_ring(adapter, rx_ring);
1903
1904 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1905 rctl |= IXGBE_RXCTRL_DMBYPS;
1906 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1907
1908 hw->mac.ops.enable_rx(hw);
1909
1910 return 0;
1911
1912err_nomem:
1913 ixgbe_free_desc_rings(adapter);
1914 return ret_val;
1915}
1916
1917static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1918{
1919 struct ixgbe_hw *hw = &adapter->hw;
1920 u32 reg_data;
1921
1922
1923 /* Setup MAC loopback */
1924 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1925 reg_data |= IXGBE_HLREG0_LPBK;
1926 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1927
1928 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1929 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1930 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1931
1932 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1933 switch (adapter->hw.mac.type) {
1934 case ixgbe_mac_X540:
1935 case ixgbe_mac_X550:
1936 case ixgbe_mac_X550EM_x:
1937 case ixgbe_mac_x550em_a:
1938 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1939 reg_data |= IXGBE_MACC_FLU;
1940 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1941 break;
1942 default:
1943 if (hw->mac.orig_autoc) {
1944 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1945 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1946 } else {
1947 return 10;
1948 }
1949 }
1950 IXGBE_WRITE_FLUSH(hw);
1951 usleep_range(10000, 20000);
1952
1953 /* Disable Atlas Tx lanes; re-enabled in reset path */
1954 if (hw->mac.type == ixgbe_mac_82598EB) {
1955 u8 atlas;
1956
1957 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1958 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1959 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1960
1961 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1962 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1963 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1964
1965 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1966 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1967 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1968
1969 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1970 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1971 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1972 }
1973
1974 return 0;
1975}
1976
1977static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1978{
1979 u32 reg_data;
1980
1981 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1982 reg_data &= ~IXGBE_HLREG0_LPBK;
1983 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1984}
1985
1986static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1987 unsigned int frame_size)
1988{
1989 memset(skb->data, 0xFF, frame_size);
1990 frame_size >>= 1;
1991 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1992 skb->data[frame_size + 10] = 0xBE;
1993 skb->data[frame_size + 12] = 0xAF;
1994}
1995
1996static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1997 unsigned int frame_size)
1998{
1999 unsigned char *data;
2000
2001 frame_size >>= 1;
2002
2003 data = page_address(rx_buffer->page) + rx_buffer->page_offset;
2004
2005 return data[3] == 0xFF && data[frame_size + 10] == 0xBE &&
2006 data[frame_size + 12] == 0xAF;
2007}
2008
2009static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
2010 struct ixgbe_ring *tx_ring,
2011 unsigned int size)
2012{
2013 union ixgbe_adv_rx_desc *rx_desc;
2014 u16 rx_ntc, tx_ntc, count = 0;
2015
2016 /* initialize next to clean and descriptor values */
2017 rx_ntc = rx_ring->next_to_clean;
2018 tx_ntc = tx_ring->next_to_clean;
2019 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2020
2021 while (tx_ntc != tx_ring->next_to_use) {
2022 union ixgbe_adv_tx_desc *tx_desc;
2023 struct ixgbe_tx_buffer *tx_buffer;
2024
2025 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
2026
2027 /* if DD is not set transmit has not completed */
2028 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2029 return count;
2030
2031 /* unmap buffer on Tx side */
2032 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
2033
2034 /* Free all the Tx ring sk_buffs */
2035 dev_kfree_skb_any(tx_buffer->skb);
2036
2037 /* unmap skb header data */
2038 dma_unmap_single(tx_ring->dev,
2039 dma_unmap_addr(tx_buffer, dma),
2040 dma_unmap_len(tx_buffer, len),
2041 DMA_TO_DEVICE);
2042 dma_unmap_len_set(tx_buffer, len, 0);
2043
2044 /* increment Tx next to clean counter */
2045 tx_ntc++;
2046 if (tx_ntc == tx_ring->count)
2047 tx_ntc = 0;
2048 }
2049
2050 while (rx_desc->wb.upper.length) {
2051 struct ixgbe_rx_buffer *rx_buffer;
2052
2053 /* check Rx buffer */
2054 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
2055
2056 /* sync Rx buffer for CPU read */
2057 dma_sync_single_for_cpu(rx_ring->dev,
2058 rx_buffer->dma,
2059 ixgbe_rx_bufsz(rx_ring),
2060 DMA_FROM_DEVICE);
2061
2062 /* verify contents of skb */
2063 if (ixgbe_check_lbtest_frame(rx_buffer, size))
2064 count++;
2065 else
2066 break;
2067
2068 /* sync Rx buffer for device write */
2069 dma_sync_single_for_device(rx_ring->dev,
2070 rx_buffer->dma,
2071 ixgbe_rx_bufsz(rx_ring),
2072 DMA_FROM_DEVICE);
2073
2074 /* increment Rx next to clean counter */
2075 rx_ntc++;
2076 if (rx_ntc == rx_ring->count)
2077 rx_ntc = 0;
2078
2079 /* fetch next descriptor */
2080 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2081 }
2082
2083 netdev_tx_reset_queue(txring_txq(tx_ring));
2084
2085 /* re-map buffers to ring, store next to clean values */
2086 ixgbe_alloc_rx_buffers(rx_ring, count);
2087 rx_ring->next_to_clean = rx_ntc;
2088 tx_ring->next_to_clean = tx_ntc;
2089
2090 return count;
2091}
2092
2093static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2094{
2095 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2096 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2097 int i, j, lc, good_cnt, ret_val = 0;
2098 unsigned int size = 1024;
2099 netdev_tx_t tx_ret_val;
2100 struct sk_buff *skb;
2101 u32 flags_orig = adapter->flags;
2102
2103 /* DCB can modify the frames on Tx */
2104 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2105
2106 /* allocate test skb */
2107 skb = alloc_skb(size, GFP_KERNEL);
2108 if (!skb)
2109 return 11;
2110
2111 /* place data into test skb */
2112 ixgbe_create_lbtest_frame(skb, size);
2113 skb_put(skb, size);
2114
2115 /*
2116 * Calculate the loop count based on the largest descriptor ring
2117 * The idea is to wrap the largest ring a number of times using 64
2118 * send/receive pairs during each loop
2119 */
2120
2121 if (rx_ring->count <= tx_ring->count)
2122 lc = ((tx_ring->count / 64) * 2) + 1;
2123 else
2124 lc = ((rx_ring->count / 64) * 2) + 1;
2125
2126 for (j = 0; j <= lc; j++) {
2127 /* reset count of good packets */
2128 good_cnt = 0;
2129
2130 /* place 64 packets on the transmit queue*/
2131 for (i = 0; i < 64; i++) {
2132 skb_get(skb);
2133 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2134 adapter,
2135 tx_ring);
2136 if (tx_ret_val == NETDEV_TX_OK)
2137 good_cnt++;
2138 }
2139
2140 if (good_cnt != 64) {
2141 ret_val = 12;
2142 break;
2143 }
2144
2145 /* allow 200 milliseconds for packets to go from Tx to Rx */
2146 msleep(200);
2147
2148 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2149 if (good_cnt != 64) {
2150 ret_val = 13;
2151 break;
2152 }
2153 }
2154
2155 /* free the original skb */
2156 kfree_skb(skb);
2157 adapter->flags = flags_orig;
2158
2159 return ret_val;
2160}
2161
2162static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2163{
2164 *data = ixgbe_setup_desc_rings(adapter);
2165 if (*data)
2166 goto out;
2167 *data = ixgbe_setup_loopback_test(adapter);
2168 if (*data)
2169 goto err_loopback;
2170 *data = ixgbe_run_loopback_test(adapter);
2171 ixgbe_loopback_cleanup(adapter);
2172
2173err_loopback:
2174 ixgbe_free_desc_rings(adapter);
2175out:
2176 return *data;
2177}
2178
2179static void ixgbe_diag_test(struct net_device *netdev,
2180 struct ethtool_test *eth_test, u64 *data)
2181{
2182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2183 bool if_running = netif_running(netdev);
2184
2185 if (ixgbe_removed(adapter->hw.hw_addr)) {
2186 e_err(hw, "Adapter removed - test blocked\n");
2187 data[0] = 1;
2188 data[1] = 1;
2189 data[2] = 1;
2190 data[3] = 1;
2191 data[4] = 1;
2192 eth_test->flags |= ETH_TEST_FL_FAILED;
2193 return;
2194 }
2195 set_bit(__IXGBE_TESTING, &adapter->state);
2196 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2197 struct ixgbe_hw *hw = &adapter->hw;
2198
2199 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2200 int i;
2201 for (i = 0; i < adapter->num_vfs; i++) {
2202 if (adapter->vfinfo[i].clear_to_send) {
2203 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2204 data[0] = 1;
2205 data[1] = 1;
2206 data[2] = 1;
2207 data[3] = 1;
2208 data[4] = 1;
2209 eth_test->flags |= ETH_TEST_FL_FAILED;
2210 clear_bit(__IXGBE_TESTING,
2211 &adapter->state);
2212 return;
2213 }
2214 }
2215 }
2216
2217 /* Offline tests */
2218 e_info(hw, "offline testing starting\n");
2219
2220 /* Link test performed before hardware reset so autoneg doesn't
2221 * interfere with test result
2222 */
2223 if (ixgbe_link_test(adapter, &data[4]))
2224 eth_test->flags |= ETH_TEST_FL_FAILED;
2225
2226 if (if_running)
2227 /* indicate we're in test mode */
2228 ixgbe_close(netdev);
2229 else
2230 ixgbe_reset(adapter);
2231
2232 e_info(hw, "register testing starting\n");
2233 if (ixgbe_reg_test(adapter, &data[0]))
2234 eth_test->flags |= ETH_TEST_FL_FAILED;
2235
2236 ixgbe_reset(adapter);
2237 e_info(hw, "eeprom testing starting\n");
2238 if (ixgbe_eeprom_test(adapter, &data[1]))
2239 eth_test->flags |= ETH_TEST_FL_FAILED;
2240
2241 ixgbe_reset(adapter);
2242 e_info(hw, "interrupt testing starting\n");
2243 if (ixgbe_intr_test(adapter, &data[2]))
2244 eth_test->flags |= ETH_TEST_FL_FAILED;
2245
2246 /* If SRIOV or VMDq is enabled then skip MAC
2247 * loopback diagnostic. */
2248 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2249 IXGBE_FLAG_VMDQ_ENABLED)) {
2250 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2251 data[3] = 0;
2252 goto skip_loopback;
2253 }
2254
2255 ixgbe_reset(adapter);
2256 e_info(hw, "loopback testing starting\n");
2257 if (ixgbe_loopback_test(adapter, &data[3]))
2258 eth_test->flags |= ETH_TEST_FL_FAILED;
2259
2260skip_loopback:
2261 ixgbe_reset(adapter);
2262
2263 /* clear testing bit and return adapter to previous state */
2264 clear_bit(__IXGBE_TESTING, &adapter->state);
2265 if (if_running)
2266 ixgbe_open(netdev);
2267 else if (hw->mac.ops.disable_tx_laser)
2268 hw->mac.ops.disable_tx_laser(hw);
2269 } else {
2270 e_info(hw, "online testing starting\n");
2271
2272 /* Online tests */
2273 if (ixgbe_link_test(adapter, &data[4]))
2274 eth_test->flags |= ETH_TEST_FL_FAILED;
2275
2276 /* Offline tests aren't run; pass by default */
2277 data[0] = 0;
2278 data[1] = 0;
2279 data[2] = 0;
2280 data[3] = 0;
2281
2282 clear_bit(__IXGBE_TESTING, &adapter->state);
2283 }
2284}
2285
2286static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2287 struct ethtool_wolinfo *wol)
2288{
2289 struct ixgbe_hw *hw = &adapter->hw;
2290 int retval = 0;
2291
2292 /* WOL not supported for all devices */
2293 if (!ixgbe_wol_supported(adapter, hw->device_id,
2294 hw->subsystem_device_id)) {
2295 retval = 1;
2296 wol->supported = 0;
2297 }
2298
2299 return retval;
2300}
2301
2302static void ixgbe_get_wol(struct net_device *netdev,
2303 struct ethtool_wolinfo *wol)
2304{
2305 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2306
2307 wol->supported = WAKE_UCAST | WAKE_MCAST |
2308 WAKE_BCAST | WAKE_MAGIC;
2309 wol->wolopts = 0;
2310
2311 if (ixgbe_wol_exclusion(adapter, wol) ||
2312 !device_can_wakeup(&adapter->pdev->dev))
2313 return;
2314
2315 if (adapter->wol & IXGBE_WUFC_EX)
2316 wol->wolopts |= WAKE_UCAST;
2317 if (adapter->wol & IXGBE_WUFC_MC)
2318 wol->wolopts |= WAKE_MCAST;
2319 if (adapter->wol & IXGBE_WUFC_BC)
2320 wol->wolopts |= WAKE_BCAST;
2321 if (adapter->wol & IXGBE_WUFC_MAG)
2322 wol->wolopts |= WAKE_MAGIC;
2323}
2324
2325static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2326{
2327 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2328
2329 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
2330 WAKE_FILTER))
2331 return -EOPNOTSUPP;
2332
2333 if (ixgbe_wol_exclusion(adapter, wol))
2334 return wol->wolopts ? -EOPNOTSUPP : 0;
2335
2336 adapter->wol = 0;
2337
2338 if (wol->wolopts & WAKE_UCAST)
2339 adapter->wol |= IXGBE_WUFC_EX;
2340 if (wol->wolopts & WAKE_MCAST)
2341 adapter->wol |= IXGBE_WUFC_MC;
2342 if (wol->wolopts & WAKE_BCAST)
2343 adapter->wol |= IXGBE_WUFC_BC;
2344 if (wol->wolopts & WAKE_MAGIC)
2345 adapter->wol |= IXGBE_WUFC_MAG;
2346
2347 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2348
2349 return 0;
2350}
2351
2352static int ixgbe_nway_reset(struct net_device *netdev)
2353{
2354 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2355
2356 if (netif_running(netdev))
2357 ixgbe_reinit_locked(adapter);
2358
2359 return 0;
2360}
2361
2362static int ixgbe_set_phys_id(struct net_device *netdev,
2363 enum ethtool_phys_id_state state)
2364{
2365 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2366 struct ixgbe_hw *hw = &adapter->hw;
2367
2368 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2369 return -EOPNOTSUPP;
2370
2371 switch (state) {
2372 case ETHTOOL_ID_ACTIVE:
2373 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2374 return 2;
2375
2376 case ETHTOOL_ID_ON:
2377 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2378 break;
2379
2380 case ETHTOOL_ID_OFF:
2381 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2382 break;
2383
2384 case ETHTOOL_ID_INACTIVE:
2385 /* Restore LED settings */
2386 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2387 break;
2388 }
2389
2390 return 0;
2391}
2392
2393static int ixgbe_get_coalesce(struct net_device *netdev,
2394 struct ethtool_coalesce *ec,
2395 struct kernel_ethtool_coalesce *kernel_coal,
2396 struct netlink_ext_ack *extack)
2397{
2398 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2399
2400 /* only valid if in constant ITR mode */
2401 if (adapter->rx_itr_setting <= 1)
2402 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2403 else
2404 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2405
2406 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2407 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2408 return 0;
2409
2410 /* only valid if in constant ITR mode */
2411 if (adapter->tx_itr_setting <= 1)
2412 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2413 else
2414 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2415
2416 return 0;
2417}
2418
2419/*
2420 * this function must be called before setting the new value of
2421 * rx_itr_setting
2422 */
2423static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2424{
2425 struct net_device *netdev = adapter->netdev;
2426
2427 /* nothing to do if LRO or RSC are not enabled */
2428 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2429 !(netdev->features & NETIF_F_LRO))
2430 return false;
2431
2432 /* check the feature flag value and enable RSC if necessary */
2433 if (adapter->rx_itr_setting == 1 ||
2434 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2435 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2436 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2437 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2438 return true;
2439 }
2440 /* if interrupt rate is too high then disable RSC */
2441 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2442 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2443 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2444 return true;
2445 }
2446 return false;
2447}
2448
2449static int ixgbe_set_coalesce(struct net_device *netdev,
2450 struct ethtool_coalesce *ec,
2451 struct kernel_ethtool_coalesce *kernel_coal,
2452 struct netlink_ext_ack *extack)
2453{
2454 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2455 struct ixgbe_q_vector *q_vector;
2456 int i;
2457 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2458 bool need_reset = false;
2459
2460 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2461 /* reject Tx specific changes in case of mixed RxTx vectors */
2462 if (ec->tx_coalesce_usecs)
2463 return -EINVAL;
2464 tx_itr_prev = adapter->rx_itr_setting;
2465 } else {
2466 tx_itr_prev = adapter->tx_itr_setting;
2467 }
2468
2469 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2470 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2471 return -EINVAL;
2472
2473 if (ec->rx_coalesce_usecs > 1)
2474 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2475 else
2476 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2477
2478 if (adapter->rx_itr_setting == 1)
2479 rx_itr_param = IXGBE_20K_ITR;
2480 else
2481 rx_itr_param = adapter->rx_itr_setting;
2482
2483 if (ec->tx_coalesce_usecs > 1)
2484 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2485 else
2486 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2487
2488 if (adapter->tx_itr_setting == 1)
2489 tx_itr_param = IXGBE_12K_ITR;
2490 else
2491 tx_itr_param = adapter->tx_itr_setting;
2492
2493 /* mixed Rx/Tx */
2494 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2495 adapter->tx_itr_setting = adapter->rx_itr_setting;
2496
2497 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2498 if ((adapter->tx_itr_setting != 1) &&
2499 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2500 if ((tx_itr_prev == 1) ||
2501 (tx_itr_prev >= IXGBE_100K_ITR))
2502 need_reset = true;
2503 } else {
2504 if ((tx_itr_prev != 1) &&
2505 (tx_itr_prev < IXGBE_100K_ITR))
2506 need_reset = true;
2507 }
2508
2509 /* check the old value and enable RSC if necessary */
2510 need_reset |= ixgbe_update_rsc(adapter);
2511
2512 for (i = 0; i < adapter->num_q_vectors; i++) {
2513 q_vector = adapter->q_vector[i];
2514 if (q_vector->tx.count && !q_vector->rx.count)
2515 /* tx only */
2516 q_vector->itr = tx_itr_param;
2517 else
2518 /* rx only or mixed */
2519 q_vector->itr = rx_itr_param;
2520 ixgbe_write_eitr(q_vector);
2521 }
2522
2523 /*
2524 * do reset here at the end to make sure EITR==0 case is handled
2525 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2526 * also locks in RSC enable/disable which requires reset
2527 */
2528 if (need_reset)
2529 ixgbe_do_reset(netdev);
2530
2531 return 0;
2532}
2533
2534static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2535 struct ethtool_rxnfc *cmd)
2536{
2537 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2538 struct ethtool_rx_flow_spec *fsp =
2539 (struct ethtool_rx_flow_spec *)&cmd->fs;
2540 struct hlist_node *node2;
2541 struct ixgbe_fdir_filter *rule = NULL;
2542
2543 /* report total rule count */
2544 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2545
2546 hlist_for_each_entry_safe(rule, node2,
2547 &adapter->fdir_filter_list, fdir_node) {
2548 if (fsp->location <= rule->sw_idx)
2549 break;
2550 }
2551
2552 if (!rule || fsp->location != rule->sw_idx)
2553 return -EINVAL;
2554
2555 /* fill out the flow spec entry */
2556
2557 /* set flow type field */
2558 switch (rule->filter.formatted.flow_type) {
2559 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2560 fsp->flow_type = TCP_V4_FLOW;
2561 break;
2562 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2563 fsp->flow_type = UDP_V4_FLOW;
2564 break;
2565 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2566 fsp->flow_type = SCTP_V4_FLOW;
2567 break;
2568 case IXGBE_ATR_FLOW_TYPE_IPV4:
2569 fsp->flow_type = IP_USER_FLOW;
2570 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2571 fsp->h_u.usr_ip4_spec.proto = 0;
2572 fsp->m_u.usr_ip4_spec.proto = 0;
2573 break;
2574 default:
2575 return -EINVAL;
2576 }
2577
2578 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2579 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2580 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2581 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2582 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2583 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2584 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2585 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2586 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2587 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2588 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2589 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2590 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2591 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2592 fsp->flow_type |= FLOW_EXT;
2593
2594 /* record action */
2595 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2596 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2597 else
2598 fsp->ring_cookie = rule->action;
2599
2600 return 0;
2601}
2602
2603static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2604 struct ethtool_rxnfc *cmd,
2605 u32 *rule_locs)
2606{
2607 struct hlist_node *node2;
2608 struct ixgbe_fdir_filter *rule;
2609 int cnt = 0;
2610
2611 /* report total rule count */
2612 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2613
2614 hlist_for_each_entry_safe(rule, node2,
2615 &adapter->fdir_filter_list, fdir_node) {
2616 if (cnt == cmd->rule_cnt)
2617 return -EMSGSIZE;
2618 rule_locs[cnt] = rule->sw_idx;
2619 cnt++;
2620 }
2621
2622 cmd->rule_cnt = cnt;
2623
2624 return 0;
2625}
2626
2627static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2628 struct ethtool_rxnfc *cmd)
2629{
2630 cmd->data = 0;
2631
2632 /* Report default options for RSS on ixgbe */
2633 switch (cmd->flow_type) {
2634 case TCP_V4_FLOW:
2635 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2636 fallthrough;
2637 case UDP_V4_FLOW:
2638 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2639 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2640 fallthrough;
2641 case SCTP_V4_FLOW:
2642 case AH_ESP_V4_FLOW:
2643 case AH_V4_FLOW:
2644 case ESP_V4_FLOW:
2645 case IPV4_FLOW:
2646 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2647 break;
2648 case TCP_V6_FLOW:
2649 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2650 fallthrough;
2651 case UDP_V6_FLOW:
2652 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2653 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2654 fallthrough;
2655 case SCTP_V6_FLOW:
2656 case AH_ESP_V6_FLOW:
2657 case AH_V6_FLOW:
2658 case ESP_V6_FLOW:
2659 case IPV6_FLOW:
2660 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2661 break;
2662 default:
2663 return -EINVAL;
2664 }
2665
2666 return 0;
2667}
2668
2669static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2670{
2671 if (adapter->hw.mac.type < ixgbe_mac_X550)
2672 return 16;
2673 else
2674 return 64;
2675}
2676
2677static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2678 u32 *rule_locs)
2679{
2680 struct ixgbe_adapter *adapter = netdev_priv(dev);
2681 int ret = -EOPNOTSUPP;
2682
2683 switch (cmd->cmd) {
2684 case ETHTOOL_GRXRINGS:
2685 cmd->data = min_t(int, adapter->num_rx_queues,
2686 ixgbe_rss_indir_tbl_max(adapter));
2687 ret = 0;
2688 break;
2689 case ETHTOOL_GRXCLSRLCNT:
2690 cmd->rule_cnt = adapter->fdir_filter_count;
2691 ret = 0;
2692 break;
2693 case ETHTOOL_GRXCLSRULE:
2694 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2695 break;
2696 case ETHTOOL_GRXCLSRLALL:
2697 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2698 break;
2699 case ETHTOOL_GRXFH:
2700 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2701 break;
2702 default:
2703 break;
2704 }
2705
2706 return ret;
2707}
2708
2709int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2710 struct ixgbe_fdir_filter *input,
2711 u16 sw_idx)
2712{
2713 struct ixgbe_hw *hw = &adapter->hw;
2714 struct hlist_node *node2;
2715 struct ixgbe_fdir_filter *rule, *parent;
2716 int err = -EINVAL;
2717
2718 parent = NULL;
2719 rule = NULL;
2720
2721 hlist_for_each_entry_safe(rule, node2,
2722 &adapter->fdir_filter_list, fdir_node) {
2723 /* hash found, or no matching entry */
2724 if (rule->sw_idx >= sw_idx)
2725 break;
2726 parent = rule;
2727 }
2728
2729 /* if there is an old rule occupying our place remove it */
2730 if (rule && (rule->sw_idx == sw_idx)) {
2731 if (!input || (rule->filter.formatted.bkt_hash !=
2732 input->filter.formatted.bkt_hash)) {
2733 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2734 &rule->filter,
2735 sw_idx);
2736 }
2737
2738 hlist_del(&rule->fdir_node);
2739 kfree(rule);
2740 adapter->fdir_filter_count--;
2741 }
2742
2743 /*
2744 * If no input this was a delete, err should be 0 if a rule was
2745 * successfully found and removed from the list else -EINVAL
2746 */
2747 if (!input)
2748 return err;
2749
2750 /* initialize node and set software index */
2751 INIT_HLIST_NODE(&input->fdir_node);
2752
2753 /* add filter to the list */
2754 if (parent)
2755 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2756 else
2757 hlist_add_head(&input->fdir_node,
2758 &adapter->fdir_filter_list);
2759
2760 /* update counts */
2761 adapter->fdir_filter_count++;
2762
2763 return 0;
2764}
2765
2766static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2767 u8 *flow_type)
2768{
2769 switch (fsp->flow_type & ~FLOW_EXT) {
2770 case TCP_V4_FLOW:
2771 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2772 break;
2773 case UDP_V4_FLOW:
2774 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2775 break;
2776 case SCTP_V4_FLOW:
2777 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2778 break;
2779 case IP_USER_FLOW:
2780 switch (fsp->h_u.usr_ip4_spec.proto) {
2781 case IPPROTO_TCP:
2782 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2783 break;
2784 case IPPROTO_UDP:
2785 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2786 break;
2787 case IPPROTO_SCTP:
2788 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2789 break;
2790 case 0:
2791 if (!fsp->m_u.usr_ip4_spec.proto) {
2792 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2793 break;
2794 }
2795 fallthrough;
2796 default:
2797 return 0;
2798 }
2799 break;
2800 default:
2801 return 0;
2802 }
2803
2804 return 1;
2805}
2806
2807static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2808 struct ethtool_rxnfc *cmd)
2809{
2810 struct ethtool_rx_flow_spec *fsp =
2811 (struct ethtool_rx_flow_spec *)&cmd->fs;
2812 struct ixgbe_hw *hw = &adapter->hw;
2813 struct ixgbe_fdir_filter *input;
2814 union ixgbe_atr_input mask;
2815 u8 queue;
2816 int err;
2817
2818 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2819 return -EOPNOTSUPP;
2820
2821 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2822 * we use the drop index.
2823 */
2824 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2825 queue = IXGBE_FDIR_DROP_QUEUE;
2826 } else {
2827 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2828 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2829
2830 if (!vf && (ring >= adapter->num_rx_queues))
2831 return -EINVAL;
2832 else if (vf &&
2833 ((vf > adapter->num_vfs) ||
2834 ring >= adapter->num_rx_queues_per_pool))
2835 return -EINVAL;
2836
2837 /* Map the ring onto the absolute queue index */
2838 if (!vf)
2839 queue = adapter->rx_ring[ring]->reg_idx;
2840 else
2841 queue = ((vf - 1) *
2842 adapter->num_rx_queues_per_pool) + ring;
2843 }
2844
2845 /* Don't allow indexes to exist outside of available space */
2846 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2847 e_err(drv, "Location out of range\n");
2848 return -EINVAL;
2849 }
2850
2851 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2852 if (!input)
2853 return -ENOMEM;
2854
2855 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2856
2857 /* set SW index */
2858 input->sw_idx = fsp->location;
2859
2860 /* record flow type */
2861 if (!ixgbe_flowspec_to_flow_type(fsp,
2862 &input->filter.formatted.flow_type)) {
2863 e_err(drv, "Unrecognized flow type\n");
2864 goto err_out;
2865 }
2866
2867 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2868 IXGBE_ATR_L4TYPE_MASK;
2869
2870 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2871 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2872
2873 /* Copy input into formatted structures */
2874 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2875 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2876 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2877 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2878 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2879 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2880 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2881 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2882
2883 if (fsp->flow_type & FLOW_EXT) {
2884 input->filter.formatted.vm_pool =
2885 (unsigned char)ntohl(fsp->h_ext.data[1]);
2886 mask.formatted.vm_pool =
2887 (unsigned char)ntohl(fsp->m_ext.data[1]);
2888 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2889 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2890 input->filter.formatted.flex_bytes =
2891 fsp->h_ext.vlan_etype;
2892 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2893 }
2894
2895 /* determine if we need to drop or route the packet */
2896 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2897 input->action = IXGBE_FDIR_DROP_QUEUE;
2898 else
2899 input->action = fsp->ring_cookie;
2900
2901 spin_lock(&adapter->fdir_perfect_lock);
2902
2903 if (hlist_empty(&adapter->fdir_filter_list)) {
2904 /* save mask and program input mask into HW */
2905 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2906 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2907 if (err) {
2908 e_err(drv, "Error writing mask\n");
2909 goto err_out_w_lock;
2910 }
2911 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2912 e_err(drv, "Only one mask supported per port\n");
2913 goto err_out_w_lock;
2914 }
2915
2916 /* apply mask and compute/store hash */
2917 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2918
2919 /* program filters to filter memory */
2920 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2921 &input->filter, input->sw_idx, queue);
2922 if (err)
2923 goto err_out_w_lock;
2924
2925 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2926
2927 spin_unlock(&adapter->fdir_perfect_lock);
2928
2929 return err;
2930err_out_w_lock:
2931 spin_unlock(&adapter->fdir_perfect_lock);
2932err_out:
2933 kfree(input);
2934 return -EINVAL;
2935}
2936
2937static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2938 struct ethtool_rxnfc *cmd)
2939{
2940 struct ethtool_rx_flow_spec *fsp =
2941 (struct ethtool_rx_flow_spec *)&cmd->fs;
2942 int err;
2943
2944 spin_lock(&adapter->fdir_perfect_lock);
2945 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2946 spin_unlock(&adapter->fdir_perfect_lock);
2947
2948 return err;
2949}
2950
2951#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2952 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2953static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2954 struct ethtool_rxnfc *nfc)
2955{
2956 u32 flags2 = adapter->flags2;
2957
2958 /*
2959 * RSS does not support anything other than hashing
2960 * to queues on src and dst IPs and ports
2961 */
2962 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2963 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2964 return -EINVAL;
2965
2966 switch (nfc->flow_type) {
2967 case TCP_V4_FLOW:
2968 case TCP_V6_FLOW:
2969 if (!(nfc->data & RXH_IP_SRC) ||
2970 !(nfc->data & RXH_IP_DST) ||
2971 !(nfc->data & RXH_L4_B_0_1) ||
2972 !(nfc->data & RXH_L4_B_2_3))
2973 return -EINVAL;
2974 break;
2975 case UDP_V4_FLOW:
2976 if (!(nfc->data & RXH_IP_SRC) ||
2977 !(nfc->data & RXH_IP_DST))
2978 return -EINVAL;
2979 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2980 case 0:
2981 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2982 break;
2983 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2984 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2985 break;
2986 default:
2987 return -EINVAL;
2988 }
2989 break;
2990 case UDP_V6_FLOW:
2991 if (!(nfc->data & RXH_IP_SRC) ||
2992 !(nfc->data & RXH_IP_DST))
2993 return -EINVAL;
2994 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2995 case 0:
2996 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2997 break;
2998 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2999 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
3000 break;
3001 default:
3002 return -EINVAL;
3003 }
3004 break;
3005 case AH_ESP_V4_FLOW:
3006 case AH_V4_FLOW:
3007 case ESP_V4_FLOW:
3008 case SCTP_V4_FLOW:
3009 case AH_ESP_V6_FLOW:
3010 case AH_V6_FLOW:
3011 case ESP_V6_FLOW:
3012 case SCTP_V6_FLOW:
3013 if (!(nfc->data & RXH_IP_SRC) ||
3014 !(nfc->data & RXH_IP_DST) ||
3015 (nfc->data & RXH_L4_B_0_1) ||
3016 (nfc->data & RXH_L4_B_2_3))
3017 return -EINVAL;
3018 break;
3019 default:
3020 return -EINVAL;
3021 }
3022
3023 /* if we changed something we need to update flags */
3024 if (flags2 != adapter->flags2) {
3025 struct ixgbe_hw *hw = &adapter->hw;
3026 u32 mrqc;
3027 unsigned int pf_pool = adapter->num_vfs;
3028
3029 if ((hw->mac.type >= ixgbe_mac_X550) &&
3030 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3031 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
3032 else
3033 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
3034
3035 if ((flags2 & UDP_RSS_FLAGS) &&
3036 !(adapter->flags2 & UDP_RSS_FLAGS))
3037 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
3038
3039 adapter->flags2 = flags2;
3040
3041 /* Perform hash on these packet types */
3042 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
3043 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3044 | IXGBE_MRQC_RSS_FIELD_IPV6
3045 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3046
3047 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
3048 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
3049
3050 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3051 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3052
3053 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3054 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3055
3056 if ((hw->mac.type >= ixgbe_mac_X550) &&
3057 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3058 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
3059 else
3060 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3061 }
3062
3063 return 0;
3064}
3065
3066static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3067{
3068 struct ixgbe_adapter *adapter = netdev_priv(dev);
3069 int ret = -EOPNOTSUPP;
3070
3071 switch (cmd->cmd) {
3072 case ETHTOOL_SRXCLSRLINS:
3073 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
3074 break;
3075 case ETHTOOL_SRXCLSRLDEL:
3076 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
3077 break;
3078 case ETHTOOL_SRXFH:
3079 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
3080 break;
3081 default:
3082 break;
3083 }
3084
3085 return ret;
3086}
3087
3088static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
3089{
3090 return IXGBE_RSS_KEY_SIZE;
3091}
3092
3093static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3094{
3095 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3096
3097 return ixgbe_rss_indir_tbl_entries(adapter);
3098}
3099
3100static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3101{
3102 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3103 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3104
3105 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3106 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3107
3108 for (i = 0; i < reta_size; i++)
3109 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3110}
3111
3112static int ixgbe_get_rxfh(struct net_device *netdev,
3113 struct ethtool_rxfh_param *rxfh)
3114{
3115 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3116
3117 rxfh->hfunc = ETH_RSS_HASH_TOP;
3118
3119 if (rxfh->indir)
3120 ixgbe_get_reta(adapter, rxfh->indir);
3121
3122 if (rxfh->key)
3123 memcpy(rxfh->key, adapter->rss_key,
3124 ixgbe_get_rxfh_key_size(netdev));
3125
3126 return 0;
3127}
3128
3129static int ixgbe_set_rxfh(struct net_device *netdev,
3130 struct ethtool_rxfh_param *rxfh,
3131 struct netlink_ext_ack *extack)
3132{
3133 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3134 int i;
3135 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3136
3137 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3138 rxfh->hfunc != ETH_RSS_HASH_TOP)
3139 return -EOPNOTSUPP;
3140
3141 /* Fill out the redirection table */
3142 if (rxfh->indir) {
3143 int max_queues = min_t(int, adapter->num_rx_queues,
3144 ixgbe_rss_indir_tbl_max(adapter));
3145
3146 /*Allow at least 2 queues w/ SR-IOV.*/
3147 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3148 (max_queues < 2))
3149 max_queues = 2;
3150
3151 /* Verify user input. */
3152 for (i = 0; i < reta_entries; i++)
3153 if (rxfh->indir[i] >= max_queues)
3154 return -EINVAL;
3155
3156 for (i = 0; i < reta_entries; i++)
3157 adapter->rss_indir_tbl[i] = rxfh->indir[i];
3158
3159 ixgbe_store_reta(adapter);
3160 }
3161
3162 /* Fill out the rss hash key */
3163 if (rxfh->key) {
3164 memcpy(adapter->rss_key, rxfh->key,
3165 ixgbe_get_rxfh_key_size(netdev));
3166 ixgbe_store_key(adapter);
3167 }
3168
3169 return 0;
3170}
3171
3172static int ixgbe_get_ts_info(struct net_device *dev,
3173 struct kernel_ethtool_ts_info *info)
3174{
3175 struct ixgbe_adapter *adapter = netdev_priv(dev);
3176
3177 /* we always support timestamping disabled */
3178 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3179
3180 switch (adapter->hw.mac.type) {
3181 case ixgbe_mac_X550:
3182 case ixgbe_mac_X550EM_x:
3183 case ixgbe_mac_x550em_a:
3184 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3185 break;
3186 case ixgbe_mac_X540:
3187 case ixgbe_mac_82599EB:
3188 info->rx_filters |=
3189 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3190 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3191 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3192 break;
3193 default:
3194 return ethtool_op_get_ts_info(dev, info);
3195 }
3196
3197 info->so_timestamping =
3198 SOF_TIMESTAMPING_TX_SOFTWARE |
3199 SOF_TIMESTAMPING_TX_HARDWARE |
3200 SOF_TIMESTAMPING_RX_HARDWARE |
3201 SOF_TIMESTAMPING_RAW_HARDWARE;
3202
3203 if (adapter->ptp_clock)
3204 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3205
3206 info->tx_types =
3207 BIT(HWTSTAMP_TX_OFF) |
3208 BIT(HWTSTAMP_TX_ON);
3209
3210 return 0;
3211}
3212
3213static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3214{
3215 unsigned int max_combined;
3216 u8 tcs = adapter->hw_tcs;
3217
3218 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3219 /* We only support one q_vector without MSI-X */
3220 max_combined = 1;
3221 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3222 /* Limit value based on the queue mask */
3223 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3224 } else if (tcs > 1) {
3225 /* For DCB report channels per traffic class */
3226 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3227 /* 8 TC w/ 4 queues per TC */
3228 max_combined = 4;
3229 } else if (tcs > 4) {
3230 /* 8 TC w/ 8 queues per TC */
3231 max_combined = 8;
3232 } else {
3233 /* 4 TC w/ 16 queues per TC */
3234 max_combined = 16;
3235 }
3236 } else if (adapter->atr_sample_rate) {
3237 /* support up to 64 queues with ATR */
3238 max_combined = IXGBE_MAX_FDIR_INDICES;
3239 } else {
3240 /* support up to 16 queues with RSS */
3241 max_combined = ixgbe_max_rss_indices(adapter);
3242 }
3243
3244 return min_t(int, max_combined, num_online_cpus());
3245}
3246
3247static void ixgbe_get_channels(struct net_device *dev,
3248 struct ethtool_channels *ch)
3249{
3250 struct ixgbe_adapter *adapter = netdev_priv(dev);
3251
3252 /* report maximum channels */
3253 ch->max_combined = ixgbe_max_channels(adapter);
3254
3255 /* report info for other vector */
3256 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3257 ch->max_other = NON_Q_VECTORS;
3258 ch->other_count = NON_Q_VECTORS;
3259 }
3260
3261 /* record RSS queues */
3262 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3263
3264 /* nothing else to report if RSS is disabled */
3265 if (ch->combined_count == 1)
3266 return;
3267
3268 /* we do not support ATR queueing if SR-IOV is enabled */
3269 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3270 return;
3271
3272 /* same thing goes for being DCB enabled */
3273 if (adapter->hw_tcs > 1)
3274 return;
3275
3276 /* if ATR is disabled we can exit */
3277 if (!adapter->atr_sample_rate)
3278 return;
3279
3280 /* report flow director queues as maximum channels */
3281 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3282}
3283
3284static int ixgbe_set_channels(struct net_device *dev,
3285 struct ethtool_channels *ch)
3286{
3287 struct ixgbe_adapter *adapter = netdev_priv(dev);
3288 unsigned int count = ch->combined_count;
3289 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3290
3291 /* verify they are not requesting separate vectors */
3292 if (!count || ch->rx_count || ch->tx_count)
3293 return -EINVAL;
3294
3295 /* verify other_count has not changed */
3296 if (ch->other_count != NON_Q_VECTORS)
3297 return -EINVAL;
3298
3299 /* verify the number of channels does not exceed hardware limits */
3300 if (count > ixgbe_max_channels(adapter))
3301 return -EINVAL;
3302
3303 /* update feature limits from largest to smallest supported values */
3304 adapter->ring_feature[RING_F_FDIR].limit = count;
3305
3306 /* cap RSS limit */
3307 if (count > max_rss_indices)
3308 count = max_rss_indices;
3309 adapter->ring_feature[RING_F_RSS].limit = count;
3310
3311#ifdef IXGBE_FCOE
3312 /* cap FCoE limit at 8 */
3313 if (count > IXGBE_FCRETA_SIZE)
3314 count = IXGBE_FCRETA_SIZE;
3315 adapter->ring_feature[RING_F_FCOE].limit = count;
3316
3317#endif
3318 /* use setup TC to update any traffic class queue mapping */
3319 return ixgbe_setup_tc(dev, adapter->hw_tcs);
3320}
3321
3322static int ixgbe_get_module_info(struct net_device *dev,
3323 struct ethtool_modinfo *modinfo)
3324{
3325 struct ixgbe_adapter *adapter = netdev_priv(dev);
3326 struct ixgbe_hw *hw = &adapter->hw;
3327 u8 sff8472_rev, addr_mode;
3328 bool page_swap = false;
3329 int status;
3330
3331 if (hw->phy.type == ixgbe_phy_fw)
3332 return -ENXIO;
3333
3334 /* Check whether we support SFF-8472 or not */
3335 status = hw->phy.ops.read_i2c_eeprom(hw,
3336 IXGBE_SFF_SFF_8472_COMP,
3337 &sff8472_rev);
3338 if (status)
3339 return -EIO;
3340
3341 /* addressing mode is not supported */
3342 status = hw->phy.ops.read_i2c_eeprom(hw,
3343 IXGBE_SFF_SFF_8472_SWAP,
3344 &addr_mode);
3345 if (status)
3346 return -EIO;
3347
3348 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3349 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3350 page_swap = true;
3351 }
3352
3353 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
3354 !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
3355 /* We have a SFP, but it does not support SFF-8472 */
3356 modinfo->type = ETH_MODULE_SFF_8079;
3357 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3358 } else {
3359 /* We have a SFP which supports a revision of SFF-8472. */
3360 modinfo->type = ETH_MODULE_SFF_8472;
3361 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3362 }
3363
3364 return 0;
3365}
3366
3367static int ixgbe_get_module_eeprom(struct net_device *dev,
3368 struct ethtool_eeprom *ee,
3369 u8 *data)
3370{
3371 struct ixgbe_adapter *adapter = netdev_priv(dev);
3372 struct ixgbe_hw *hw = &adapter->hw;
3373 int status = -EFAULT;
3374 u8 databyte = 0xFF;
3375 int i = 0;
3376
3377 if (ee->len == 0)
3378 return -EINVAL;
3379
3380 if (hw->phy.type == ixgbe_phy_fw)
3381 return -ENXIO;
3382
3383 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3384 /* I2C reads can take long time */
3385 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3386 return -EBUSY;
3387
3388 if (i < ETH_MODULE_SFF_8079_LEN)
3389 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3390 else
3391 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3392
3393 if (status)
3394 return -EIO;
3395
3396 data[i - ee->offset] = databyte;
3397 }
3398
3399 return 0;
3400}
3401
3402static const struct {
3403 ixgbe_link_speed mac_speed;
3404 u32 link_mode;
3405} ixgbe_ls_map[] = {
3406 { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
3407 { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
3408 { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
3409 { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
3410 { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
3411};
3412
3413static const struct {
3414 u32 lp_advertised;
3415 u32 link_mode;
3416} ixgbe_lp_map[] = {
3417 { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
3418 { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
3419 { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
3420 { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
3421 { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
3422 { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
3423};
3424
3425static int
3426ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
3427{
3428 __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
3429 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3430 struct ixgbe_hw *hw = &adapter->hw;
3431 int rc;
3432 u16 i;
3433
3434 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3435 if (rc)
3436 return rc;
3437
3438 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3439 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3440 linkmode_set_bit(ixgbe_lp_map[i].link_mode,
3441 edata->lp_advertised);
3442 }
3443
3444 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3445 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3446 linkmode_set_bit(ixgbe_lp_map[i].link_mode,
3447 edata->supported);
3448 }
3449
3450 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3451 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3452 linkmode_set_bit(ixgbe_lp_map[i].link_mode,
3453 edata->advertised);
3454 }
3455
3456 edata->eee_enabled = !linkmode_empty(edata->advertised);
3457 edata->tx_lpi_enabled = edata->eee_enabled;
3458
3459 linkmode_and(common, edata->advertised, edata->lp_advertised);
3460 edata->eee_active = !linkmode_empty(common);
3461
3462 return 0;
3463}
3464
3465static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
3466{
3467 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3468 struct ixgbe_hw *hw = &adapter->hw;
3469
3470 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3471 return -EOPNOTSUPP;
3472
3473 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3474 return ixgbe_get_eee_fw(adapter, edata);
3475
3476 return -EOPNOTSUPP;
3477}
3478
3479static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
3480{
3481 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3482 struct ixgbe_hw *hw = &adapter->hw;
3483 struct ethtool_keee eee_data;
3484 int ret_val;
3485
3486 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3487 return -EOPNOTSUPP;
3488
3489 memset(&eee_data, 0, sizeof(struct ethtool_keee));
3490
3491 ret_val = ixgbe_get_eee(netdev, &eee_data);
3492 if (ret_val)
3493 return ret_val;
3494
3495 if (eee_data.eee_enabled && !edata->eee_enabled) {
3496 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3497 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3498 return -EINVAL;
3499 }
3500
3501 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3502 e_err(drv,
3503 "Setting EEE Tx LPI timer is not supported\n");
3504 return -EINVAL;
3505 }
3506
3507 if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
3508 e_err(drv,
3509 "Setting EEE advertised speeds is not supported\n");
3510 return -EINVAL;
3511 }
3512 }
3513
3514 if (eee_data.eee_enabled != edata->eee_enabled) {
3515 if (edata->eee_enabled) {
3516 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3517 hw->phy.eee_speeds_advertised =
3518 hw->phy.eee_speeds_supported;
3519 } else {
3520 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3521 hw->phy.eee_speeds_advertised = 0;
3522 }
3523
3524 /* reset link */
3525 if (netif_running(netdev))
3526 ixgbe_reinit_locked(adapter);
3527 else
3528 ixgbe_reset(adapter);
3529 }
3530
3531 return 0;
3532}
3533
3534static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3535{
3536 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3537 u32 priv_flags = 0;
3538
3539 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3540 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3541
3542 if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
3543 priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
3544
3545 if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
3546 priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
3547
3548 return priv_flags;
3549}
3550
3551static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3552{
3553 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3554 unsigned int flags2 = adapter->flags2;
3555 unsigned int i;
3556
3557 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3558 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3559 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3560
3561 flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
3562 if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
3563 flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
3564
3565 flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
3566 if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
3567 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3568 /* Reset primary abort counter */
3569 for (i = 0; i < adapter->num_vfs; i++)
3570 adapter->vfinfo[i].primary_abort_count = 0;
3571
3572 flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
3573 } else {
3574 e_info(probe,
3575 "Cannot set private flags: Operation not supported\n");
3576 return -EOPNOTSUPP;
3577 }
3578 }
3579
3580 if (flags2 != adapter->flags2) {
3581 adapter->flags2 = flags2;
3582
3583 /* reset interface to repopulate queues */
3584 if (netif_running(netdev))
3585 ixgbe_reinit_locked(adapter);
3586 }
3587
3588 return 0;
3589}
3590
3591static const struct ethtool_ops ixgbe_ethtool_ops = {
3592 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
3593 .get_drvinfo = ixgbe_get_drvinfo,
3594 .get_regs_len = ixgbe_get_regs_len,
3595 .get_regs = ixgbe_get_regs,
3596 .get_wol = ixgbe_get_wol,
3597 .set_wol = ixgbe_set_wol,
3598 .nway_reset = ixgbe_nway_reset,
3599 .get_link = ethtool_op_get_link,
3600 .get_eeprom_len = ixgbe_get_eeprom_len,
3601 .get_eeprom = ixgbe_get_eeprom,
3602 .set_eeprom = ixgbe_set_eeprom,
3603 .get_ringparam = ixgbe_get_ringparam,
3604 .set_ringparam = ixgbe_set_ringparam,
3605 .get_pause_stats = ixgbe_get_pause_stats,
3606 .get_pauseparam = ixgbe_get_pauseparam,
3607 .set_pauseparam = ixgbe_set_pauseparam,
3608 .get_msglevel = ixgbe_get_msglevel,
3609 .set_msglevel = ixgbe_set_msglevel,
3610 .self_test = ixgbe_diag_test,
3611 .get_strings = ixgbe_get_strings,
3612 .set_phys_id = ixgbe_set_phys_id,
3613 .get_sset_count = ixgbe_get_sset_count,
3614 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3615 .get_coalesce = ixgbe_get_coalesce,
3616 .set_coalesce = ixgbe_set_coalesce,
3617 .get_rxnfc = ixgbe_get_rxnfc,
3618 .set_rxnfc = ixgbe_set_rxnfc,
3619 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3620 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3621 .get_rxfh = ixgbe_get_rxfh,
3622 .set_rxfh = ixgbe_set_rxfh,
3623 .get_eee = ixgbe_get_eee,
3624 .set_eee = ixgbe_set_eee,
3625 .get_channels = ixgbe_get_channels,
3626 .set_channels = ixgbe_set_channels,
3627 .get_priv_flags = ixgbe_get_priv_flags,
3628 .set_priv_flags = ixgbe_set_priv_flags,
3629 .get_ts_info = ixgbe_get_ts_info,
3630 .get_module_info = ixgbe_get_module_info,
3631 .get_module_eeprom = ixgbe_get_module_eeprom,
3632 .get_link_ksettings = ixgbe_get_link_ksettings,
3633 .set_link_ksettings = ixgbe_set_link_ksettings,
3634};
3635
3636void ixgbe_set_ethtool_ops(struct net_device *netdev)
3637{
3638 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3639}
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/* ethtool support for ixgbe */
30
31#include <linux/interrupt.h>
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/ethtool.h>
38#include <linux/vmalloc.h>
39#include <linux/highmem.h>
40#include <linux/uaccess.h>
41
42#include "ixgbe.h"
43#include "ixgbe_phy.h"
44
45
46#define IXGBE_ALL_RAR_ENTRIES 16
47
48enum {NETDEV_STATS, IXGBE_STATS};
49
50struct ixgbe_stats {
51 char stat_string[ETH_GSTRING_LEN];
52 int type;
53 int sizeof_stat;
54 int stat_offset;
55};
56
57#define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
63
64static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
70 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
71 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
72 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73 {"lsc_int", IXGBE_STAT(lsc_int)},
74 {"tx_busy", IXGBE_STAT(tx_busy)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast)},
81 {"broadcast", IXGBE_STAT(stats.bprc)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
90 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
100 {"rx_length_errors", IXGBE_STAT(stats.rlec)},
101 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
102 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
103 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
104 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
105 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
106 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
107 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
108 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
109 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
110 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
111 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
112 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
113 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
114 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
115 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
116 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
117 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
118 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
119 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
120 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
121#ifdef IXGBE_FCOE
122 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
123 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
124 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
125 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
126 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
127 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
128 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
129 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
130#endif /* IXGBE_FCOE */
131};
132
133/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
134 * we set the num_rx_queues to evaluate to num_tx_queues. This is
135 * used because we do not have a good way to get the max number of
136 * rx queues with CONFIG_RPS disabled.
137 */
138#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
139
140#define IXGBE_QUEUE_STATS_LEN ( \
141 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
142 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
143#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
144#define IXGBE_PB_STATS_LEN ( \
145 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
146 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
147 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
148 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
149 / sizeof(u64))
150#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
151 IXGBE_PB_STATS_LEN + \
152 IXGBE_QUEUE_STATS_LEN)
153
154static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
155 "Register test (offline)", "Eeprom test (offline)",
156 "Interrupt test (offline)", "Loopback test (offline)",
157 "Link test (on/offline)"
158};
159#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
160
161static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
162#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
163 "legacy-rx",
164};
165
166#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
167
168/* currently supported speeds for 10G */
169#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
170 SUPPORTED_10000baseKX4_Full | \
171 SUPPORTED_10000baseKR_Full)
172
173#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
174
175static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
176{
177 if (!ixgbe_isbackplane(hw->phy.media_type))
178 return SUPPORTED_10000baseT_Full;
179
180 switch (hw->device_id) {
181 case IXGBE_DEV_ID_82598:
182 case IXGBE_DEV_ID_82599_KX4:
183 case IXGBE_DEV_ID_82599_KX4_MEZZ:
184 case IXGBE_DEV_ID_X550EM_X_KX4:
185 return SUPPORTED_10000baseKX4_Full;
186 case IXGBE_DEV_ID_82598_BX:
187 case IXGBE_DEV_ID_82599_KR:
188 case IXGBE_DEV_ID_X550EM_X_KR:
189 case IXGBE_DEV_ID_X550EM_X_XFI:
190 return SUPPORTED_10000baseKR_Full;
191 default:
192 return SUPPORTED_10000baseKX4_Full |
193 SUPPORTED_10000baseKR_Full;
194 }
195}
196
197static int ixgbe_get_link_ksettings(struct net_device *netdev,
198 struct ethtool_link_ksettings *cmd)
199{
200 struct ixgbe_adapter *adapter = netdev_priv(netdev);
201 struct ixgbe_hw *hw = &adapter->hw;
202 ixgbe_link_speed supported_link;
203 bool autoneg = false;
204 u32 supported, advertising;
205
206 ethtool_convert_link_mode_to_legacy_u32(&supported,
207 cmd->link_modes.supported);
208
209 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
210
211 /* set the supported link speeds */
212 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
213 supported |= ixgbe_get_supported_10gtypes(hw);
214 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
215 supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
216 SUPPORTED_1000baseKX_Full :
217 SUPPORTED_1000baseT_Full;
218 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
219 supported |= SUPPORTED_100baseT_Full;
220 if (supported_link & IXGBE_LINK_SPEED_10_FULL)
221 supported |= SUPPORTED_10baseT_Full;
222
223 /* default advertised speed if phy.autoneg_advertised isn't set */
224 advertising = supported;
225 /* set the advertised speeds */
226 if (hw->phy.autoneg_advertised) {
227 advertising = 0;
228 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
229 advertising |= ADVERTISED_10baseT_Full;
230 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
231 advertising |= ADVERTISED_100baseT_Full;
232 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
233 advertising |= supported & ADVRTSD_MSK_10G;
234 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
235 if (supported & SUPPORTED_1000baseKX_Full)
236 advertising |= ADVERTISED_1000baseKX_Full;
237 else
238 advertising |= ADVERTISED_1000baseT_Full;
239 }
240 } else {
241 if (hw->phy.multispeed_fiber && !autoneg) {
242 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
243 advertising = ADVERTISED_10000baseT_Full;
244 }
245 }
246
247 if (autoneg) {
248 supported |= SUPPORTED_Autoneg;
249 advertising |= ADVERTISED_Autoneg;
250 cmd->base.autoneg = AUTONEG_ENABLE;
251 } else
252 cmd->base.autoneg = AUTONEG_DISABLE;
253
254 /* Determine the remaining settings based on the PHY type. */
255 switch (adapter->hw.phy.type) {
256 case ixgbe_phy_tn:
257 case ixgbe_phy_aq:
258 case ixgbe_phy_x550em_ext_t:
259 case ixgbe_phy_fw:
260 case ixgbe_phy_cu_unknown:
261 supported |= SUPPORTED_TP;
262 advertising |= ADVERTISED_TP;
263 cmd->base.port = PORT_TP;
264 break;
265 case ixgbe_phy_qt:
266 supported |= SUPPORTED_FIBRE;
267 advertising |= ADVERTISED_FIBRE;
268 cmd->base.port = PORT_FIBRE;
269 break;
270 case ixgbe_phy_nl:
271 case ixgbe_phy_sfp_passive_tyco:
272 case ixgbe_phy_sfp_passive_unknown:
273 case ixgbe_phy_sfp_ftl:
274 case ixgbe_phy_sfp_avago:
275 case ixgbe_phy_sfp_intel:
276 case ixgbe_phy_sfp_unknown:
277 case ixgbe_phy_qsfp_passive_unknown:
278 case ixgbe_phy_qsfp_active_unknown:
279 case ixgbe_phy_qsfp_intel:
280 case ixgbe_phy_qsfp_unknown:
281 /* SFP+ devices, further checking needed */
282 switch (adapter->hw.phy.sfp_type) {
283 case ixgbe_sfp_type_da_cu:
284 case ixgbe_sfp_type_da_cu_core0:
285 case ixgbe_sfp_type_da_cu_core1:
286 supported |= SUPPORTED_FIBRE;
287 advertising |= ADVERTISED_FIBRE;
288 cmd->base.port = PORT_DA;
289 break;
290 case ixgbe_sfp_type_sr:
291 case ixgbe_sfp_type_lr:
292 case ixgbe_sfp_type_srlr_core0:
293 case ixgbe_sfp_type_srlr_core1:
294 case ixgbe_sfp_type_1g_sx_core0:
295 case ixgbe_sfp_type_1g_sx_core1:
296 case ixgbe_sfp_type_1g_lx_core0:
297 case ixgbe_sfp_type_1g_lx_core1:
298 supported |= SUPPORTED_FIBRE;
299 advertising |= ADVERTISED_FIBRE;
300 cmd->base.port = PORT_FIBRE;
301 break;
302 case ixgbe_sfp_type_not_present:
303 supported |= SUPPORTED_FIBRE;
304 advertising |= ADVERTISED_FIBRE;
305 cmd->base.port = PORT_NONE;
306 break;
307 case ixgbe_sfp_type_1g_cu_core0:
308 case ixgbe_sfp_type_1g_cu_core1:
309 supported |= SUPPORTED_TP;
310 advertising |= ADVERTISED_TP;
311 cmd->base.port = PORT_TP;
312 break;
313 case ixgbe_sfp_type_unknown:
314 default:
315 supported |= SUPPORTED_FIBRE;
316 advertising |= ADVERTISED_FIBRE;
317 cmd->base.port = PORT_OTHER;
318 break;
319 }
320 break;
321 case ixgbe_phy_xaui:
322 supported |= SUPPORTED_FIBRE;
323 advertising |= ADVERTISED_FIBRE;
324 cmd->base.port = PORT_NONE;
325 break;
326 case ixgbe_phy_unknown:
327 case ixgbe_phy_generic:
328 case ixgbe_phy_sfp_unsupported:
329 default:
330 supported |= SUPPORTED_FIBRE;
331 advertising |= ADVERTISED_FIBRE;
332 cmd->base.port = PORT_OTHER;
333 break;
334 }
335
336 /* Indicate pause support */
337 supported |= SUPPORTED_Pause;
338
339 switch (hw->fc.requested_mode) {
340 case ixgbe_fc_full:
341 advertising |= ADVERTISED_Pause;
342 break;
343 case ixgbe_fc_rx_pause:
344 advertising |= ADVERTISED_Pause |
345 ADVERTISED_Asym_Pause;
346 break;
347 case ixgbe_fc_tx_pause:
348 advertising |= ADVERTISED_Asym_Pause;
349 break;
350 default:
351 advertising &= ~(ADVERTISED_Pause |
352 ADVERTISED_Asym_Pause);
353 }
354
355 if (netif_carrier_ok(netdev)) {
356 switch (adapter->link_speed) {
357 case IXGBE_LINK_SPEED_10GB_FULL:
358 cmd->base.speed = SPEED_10000;
359 break;
360 case IXGBE_LINK_SPEED_5GB_FULL:
361 cmd->base.speed = SPEED_5000;
362 break;
363 case IXGBE_LINK_SPEED_2_5GB_FULL:
364 cmd->base.speed = SPEED_2500;
365 break;
366 case IXGBE_LINK_SPEED_1GB_FULL:
367 cmd->base.speed = SPEED_1000;
368 break;
369 case IXGBE_LINK_SPEED_100_FULL:
370 cmd->base.speed = SPEED_100;
371 break;
372 case IXGBE_LINK_SPEED_10_FULL:
373 cmd->base.speed = SPEED_10;
374 break;
375 default:
376 break;
377 }
378 cmd->base.duplex = DUPLEX_FULL;
379 } else {
380 cmd->base.speed = SPEED_UNKNOWN;
381 cmd->base.duplex = DUPLEX_UNKNOWN;
382 }
383
384 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
385 supported);
386 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
387 advertising);
388
389 return 0;
390}
391
392static int ixgbe_set_link_ksettings(struct net_device *netdev,
393 const struct ethtool_link_ksettings *cmd)
394{
395 struct ixgbe_adapter *adapter = netdev_priv(netdev);
396 struct ixgbe_hw *hw = &adapter->hw;
397 u32 advertised, old;
398 s32 err = 0;
399 u32 supported, advertising;
400
401 ethtool_convert_link_mode_to_legacy_u32(&supported,
402 cmd->link_modes.supported);
403 ethtool_convert_link_mode_to_legacy_u32(&advertising,
404 cmd->link_modes.advertising);
405
406 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
407 (hw->phy.multispeed_fiber)) {
408 /*
409 * this function does not support duplex forcing, but can
410 * limit the advertising of the adapter to the specified speed
411 */
412 if (advertising & ~supported)
413 return -EINVAL;
414
415 /* only allow one speed at a time if no autoneg */
416 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
417 if (advertising ==
418 (ADVERTISED_10000baseT_Full |
419 ADVERTISED_1000baseT_Full))
420 return -EINVAL;
421 }
422
423 old = hw->phy.autoneg_advertised;
424 advertised = 0;
425 if (advertising & ADVERTISED_10000baseT_Full)
426 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
427
428 if (advertising & ADVERTISED_1000baseT_Full)
429 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
430
431 if (advertising & ADVERTISED_100baseT_Full)
432 advertised |= IXGBE_LINK_SPEED_100_FULL;
433
434 if (advertising & ADVERTISED_10baseT_Full)
435 advertised |= IXGBE_LINK_SPEED_10_FULL;
436
437 if (old == advertised)
438 return err;
439 /* this sets the link speed and restarts auto-neg */
440 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
441 usleep_range(1000, 2000);
442
443 hw->mac.autotry_restart = true;
444 err = hw->mac.ops.setup_link(hw, advertised, true);
445 if (err) {
446 e_info(probe, "setup link failed with code %d\n", err);
447 hw->mac.ops.setup_link(hw, old, true);
448 }
449 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
450 } else {
451 /* in this case we currently only support 10Gb/FULL */
452 u32 speed = cmd->base.speed;
453
454 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
455 (advertising != ADVERTISED_10000baseT_Full) ||
456 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
457 return -EINVAL;
458 }
459
460 return err;
461}
462
463static void ixgbe_get_pauseparam(struct net_device *netdev,
464 struct ethtool_pauseparam *pause)
465{
466 struct ixgbe_adapter *adapter = netdev_priv(netdev);
467 struct ixgbe_hw *hw = &adapter->hw;
468
469 if (ixgbe_device_supports_autoneg_fc(hw) &&
470 !hw->fc.disable_fc_autoneg)
471 pause->autoneg = 1;
472 else
473 pause->autoneg = 0;
474
475 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
476 pause->rx_pause = 1;
477 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
478 pause->tx_pause = 1;
479 } else if (hw->fc.current_mode == ixgbe_fc_full) {
480 pause->rx_pause = 1;
481 pause->tx_pause = 1;
482 }
483}
484
485static int ixgbe_set_pauseparam(struct net_device *netdev,
486 struct ethtool_pauseparam *pause)
487{
488 struct ixgbe_adapter *adapter = netdev_priv(netdev);
489 struct ixgbe_hw *hw = &adapter->hw;
490 struct ixgbe_fc_info fc = hw->fc;
491
492 /* 82598 does no support link flow control with DCB enabled */
493 if ((hw->mac.type == ixgbe_mac_82598EB) &&
494 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
495 return -EINVAL;
496
497 /* some devices do not support autoneg of link flow control */
498 if ((pause->autoneg == AUTONEG_ENABLE) &&
499 !ixgbe_device_supports_autoneg_fc(hw))
500 return -EINVAL;
501
502 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
503
504 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
505 fc.requested_mode = ixgbe_fc_full;
506 else if (pause->rx_pause && !pause->tx_pause)
507 fc.requested_mode = ixgbe_fc_rx_pause;
508 else if (!pause->rx_pause && pause->tx_pause)
509 fc.requested_mode = ixgbe_fc_tx_pause;
510 else
511 fc.requested_mode = ixgbe_fc_none;
512
513 /* if the thing changed then we'll update and use new autoneg */
514 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
515 hw->fc = fc;
516 if (netif_running(netdev))
517 ixgbe_reinit_locked(adapter);
518 else
519 ixgbe_reset(adapter);
520 }
521
522 return 0;
523}
524
525static u32 ixgbe_get_msglevel(struct net_device *netdev)
526{
527 struct ixgbe_adapter *adapter = netdev_priv(netdev);
528 return adapter->msg_enable;
529}
530
531static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
532{
533 struct ixgbe_adapter *adapter = netdev_priv(netdev);
534 adapter->msg_enable = data;
535}
536
537static int ixgbe_get_regs_len(struct net_device *netdev)
538{
539#define IXGBE_REGS_LEN 1139
540 return IXGBE_REGS_LEN * sizeof(u32);
541}
542
543#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
544
545static void ixgbe_get_regs(struct net_device *netdev,
546 struct ethtool_regs *regs, void *p)
547{
548 struct ixgbe_adapter *adapter = netdev_priv(netdev);
549 struct ixgbe_hw *hw = &adapter->hw;
550 u32 *regs_buff = p;
551 u8 i;
552
553 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
554
555 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
556 hw->device_id;
557
558 /* General Registers */
559 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
560 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
561 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
562 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
563 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
564 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
565 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
566 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
567
568 /* NVM Register */
569 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
570 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
571 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
572 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
573 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
574 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
575 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
576 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
577 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
578 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
579
580 /* Interrupt */
581 /* don't read EICR because it can clear interrupt causes, instead
582 * read EICS which is a shadow but doesn't clear EICR */
583 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
584 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
585 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
586 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
587 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
588 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
589 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
590 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
591 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
592 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
593 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
594 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
595
596 /* Flow Control */
597 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
598 for (i = 0; i < 4; i++)
599 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
600 for (i = 0; i < 8; i++) {
601 switch (hw->mac.type) {
602 case ixgbe_mac_82598EB:
603 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
604 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
605 break;
606 case ixgbe_mac_82599EB:
607 case ixgbe_mac_X540:
608 case ixgbe_mac_X550:
609 case ixgbe_mac_X550EM_x:
610 case ixgbe_mac_x550em_a:
611 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
612 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
613 break;
614 default:
615 break;
616 }
617 }
618 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
619 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
620
621 /* Receive DMA */
622 for (i = 0; i < 64; i++)
623 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
624 for (i = 0; i < 64; i++)
625 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
626 for (i = 0; i < 64; i++)
627 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
628 for (i = 0; i < 64; i++)
629 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
630 for (i = 0; i < 64; i++)
631 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
632 for (i = 0; i < 64; i++)
633 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
634 for (i = 0; i < 16; i++)
635 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
636 for (i = 0; i < 16; i++)
637 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
638 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
639 for (i = 0; i < 8; i++)
640 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
641 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
642 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
643
644 /* Receive */
645 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
646 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
647 for (i = 0; i < 16; i++)
648 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
649 for (i = 0; i < 16; i++)
650 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
651 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
652 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
653 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
654 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
655 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
656 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
657 for (i = 0; i < 8; i++)
658 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
659 for (i = 0; i < 8; i++)
660 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
661 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
662
663 /* Transmit */
664 for (i = 0; i < 32; i++)
665 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
666 for (i = 0; i < 32; i++)
667 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
668 for (i = 0; i < 32; i++)
669 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
670 for (i = 0; i < 32; i++)
671 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
672 for (i = 0; i < 32; i++)
673 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
674 for (i = 0; i < 32; i++)
675 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
676 for (i = 0; i < 32; i++)
677 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
678 for (i = 0; i < 32; i++)
679 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
680 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
681 for (i = 0; i < 16; i++)
682 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
683 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
684 for (i = 0; i < 8; i++)
685 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
686 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
687
688 /* Wake Up */
689 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
690 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
691 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
692 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
693 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
694 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
695 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
696 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
697 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
698
699 /* DCB */
700 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
701 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
702
703 switch (hw->mac.type) {
704 case ixgbe_mac_82598EB:
705 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
706 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
707 for (i = 0; i < 8; i++)
708 regs_buff[833 + i] =
709 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
710 for (i = 0; i < 8; i++)
711 regs_buff[841 + i] =
712 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
713 for (i = 0; i < 8; i++)
714 regs_buff[849 + i] =
715 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
716 for (i = 0; i < 8; i++)
717 regs_buff[857 + i] =
718 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
719 break;
720 case ixgbe_mac_82599EB:
721 case ixgbe_mac_X540:
722 case ixgbe_mac_X550:
723 case ixgbe_mac_X550EM_x:
724 case ixgbe_mac_x550em_a:
725 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
726 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
727 for (i = 0; i < 8; i++)
728 regs_buff[833 + i] =
729 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
730 for (i = 0; i < 8; i++)
731 regs_buff[841 + i] =
732 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
733 for (i = 0; i < 8; i++)
734 regs_buff[849 + i] =
735 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
736 for (i = 0; i < 8; i++)
737 regs_buff[857 + i] =
738 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
739 break;
740 default:
741 break;
742 }
743
744 for (i = 0; i < 8; i++)
745 regs_buff[865 + i] =
746 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
747 for (i = 0; i < 8; i++)
748 regs_buff[873 + i] =
749 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
750
751 /* Statistics */
752 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
753 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
754 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
755 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
756 for (i = 0; i < 8; i++)
757 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
758 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
759 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
760 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
761 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
762 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
763 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
764 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
765 for (i = 0; i < 8; i++)
766 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
767 for (i = 0; i < 8; i++)
768 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
769 for (i = 0; i < 8; i++)
770 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
771 for (i = 0; i < 8; i++)
772 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
773 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
774 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
775 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
776 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
777 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
778 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
779 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
780 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
781 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
782 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
783 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
784 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
785 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
786 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
787 for (i = 0; i < 8; i++)
788 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
789 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
790 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
791 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
792 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
793 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
794 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
795 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
796 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
797 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
798 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
799 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
800 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
801 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
802 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
803 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
804 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
805 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
806 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
807 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
808 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
809 for (i = 0; i < 16; i++)
810 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
811 for (i = 0; i < 16; i++)
812 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
813 for (i = 0; i < 16; i++)
814 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
815 for (i = 0; i < 16; i++)
816 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
817
818 /* MAC */
819 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
820 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
821 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
822 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
823 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
824 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
825 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
826 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
827 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
828 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
829 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
830 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
831 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
832 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
833 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
834 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
835 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
836 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
837 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
838 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
839 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
840 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
841 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
842 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
843 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
844 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
845 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
846 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
847 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
848 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
849 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
850 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
851 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
852
853 /* Diagnostic */
854 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
855 for (i = 0; i < 8; i++)
856 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
857 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
858 for (i = 0; i < 4; i++)
859 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
860 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
861 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
862 for (i = 0; i < 8; i++)
863 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
864 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
865 for (i = 0; i < 4; i++)
866 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
867 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
868 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
869 for (i = 0; i < 4; i++)
870 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
871 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
872 for (i = 0; i < 4; i++)
873 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
874 for (i = 0; i < 8; i++)
875 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
876 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
877 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
878 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
879 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
880 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
881 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
882 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
883 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
884 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
885
886 /* 82599 X540 specific registers */
887 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
888
889 /* 82599 X540 specific DCB registers */
890 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
891 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
892 for (i = 0; i < 4; i++)
893 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
894 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
895 /* same as RTTQCNRM */
896 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
897 /* same as RTTQCNRR */
898
899 /* X540 specific DCB registers */
900 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
901 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
902}
903
904static int ixgbe_get_eeprom_len(struct net_device *netdev)
905{
906 struct ixgbe_adapter *adapter = netdev_priv(netdev);
907 return adapter->hw.eeprom.word_size * 2;
908}
909
910static int ixgbe_get_eeprom(struct net_device *netdev,
911 struct ethtool_eeprom *eeprom, u8 *bytes)
912{
913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
914 struct ixgbe_hw *hw = &adapter->hw;
915 u16 *eeprom_buff;
916 int first_word, last_word, eeprom_len;
917 int ret_val = 0;
918 u16 i;
919
920 if (eeprom->len == 0)
921 return -EINVAL;
922
923 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
924
925 first_word = eeprom->offset >> 1;
926 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
927 eeprom_len = last_word - first_word + 1;
928
929 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
930 if (!eeprom_buff)
931 return -ENOMEM;
932
933 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
934 eeprom_buff);
935
936 /* Device's eeprom is always little-endian, word addressable */
937 for (i = 0; i < eeprom_len; i++)
938 le16_to_cpus(&eeprom_buff[i]);
939
940 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
941 kfree(eeprom_buff);
942
943 return ret_val;
944}
945
946static int ixgbe_set_eeprom(struct net_device *netdev,
947 struct ethtool_eeprom *eeprom, u8 *bytes)
948{
949 struct ixgbe_adapter *adapter = netdev_priv(netdev);
950 struct ixgbe_hw *hw = &adapter->hw;
951 u16 *eeprom_buff;
952 void *ptr;
953 int max_len, first_word, last_word, ret_val = 0;
954 u16 i;
955
956 if (eeprom->len == 0)
957 return -EINVAL;
958
959 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
960 return -EINVAL;
961
962 max_len = hw->eeprom.word_size * 2;
963
964 first_word = eeprom->offset >> 1;
965 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
966 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
967 if (!eeprom_buff)
968 return -ENOMEM;
969
970 ptr = eeprom_buff;
971
972 if (eeprom->offset & 1) {
973 /*
974 * need read/modify/write of first changed EEPROM word
975 * only the second byte of the word is being modified
976 */
977 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
978 if (ret_val)
979 goto err;
980
981 ptr++;
982 }
983 if ((eeprom->offset + eeprom->len) & 1) {
984 /*
985 * need read/modify/write of last changed EEPROM word
986 * only the first byte of the word is being modified
987 */
988 ret_val = hw->eeprom.ops.read(hw, last_word,
989 &eeprom_buff[last_word - first_word]);
990 if (ret_val)
991 goto err;
992 }
993
994 /* Device's eeprom is always little-endian, word addressable */
995 for (i = 0; i < last_word - first_word + 1; i++)
996 le16_to_cpus(&eeprom_buff[i]);
997
998 memcpy(ptr, bytes, eeprom->len);
999
1000 for (i = 0; i < last_word - first_word + 1; i++)
1001 cpu_to_le16s(&eeprom_buff[i]);
1002
1003 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1004 last_word - first_word + 1,
1005 eeprom_buff);
1006
1007 /* Update the checksum */
1008 if (ret_val == 0)
1009 hw->eeprom.ops.update_checksum(hw);
1010
1011err:
1012 kfree(eeprom_buff);
1013 return ret_val;
1014}
1015
1016static void ixgbe_get_drvinfo(struct net_device *netdev,
1017 struct ethtool_drvinfo *drvinfo)
1018{
1019 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1020
1021 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1022 strlcpy(drvinfo->version, ixgbe_driver_version,
1023 sizeof(drvinfo->version));
1024
1025 strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1026 sizeof(drvinfo->fw_version));
1027
1028 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1029 sizeof(drvinfo->bus_info));
1030
1031 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1032}
1033
1034static void ixgbe_get_ringparam(struct net_device *netdev,
1035 struct ethtool_ringparam *ring)
1036{
1037 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1038 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1039 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1040
1041 ring->rx_max_pending = IXGBE_MAX_RXD;
1042 ring->tx_max_pending = IXGBE_MAX_TXD;
1043 ring->rx_pending = rx_ring->count;
1044 ring->tx_pending = tx_ring->count;
1045}
1046
1047static int ixgbe_set_ringparam(struct net_device *netdev,
1048 struct ethtool_ringparam *ring)
1049{
1050 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1051 struct ixgbe_ring *temp_ring;
1052 int i, j, err = 0;
1053 u32 new_rx_count, new_tx_count;
1054
1055 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1056 return -EINVAL;
1057
1058 new_tx_count = clamp_t(u32, ring->tx_pending,
1059 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1060 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1061
1062 new_rx_count = clamp_t(u32, ring->rx_pending,
1063 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1064 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1065
1066 if ((new_tx_count == adapter->tx_ring_count) &&
1067 (new_rx_count == adapter->rx_ring_count)) {
1068 /* nothing to do */
1069 return 0;
1070 }
1071
1072 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1073 usleep_range(1000, 2000);
1074
1075 if (!netif_running(adapter->netdev)) {
1076 for (i = 0; i < adapter->num_tx_queues; i++)
1077 adapter->tx_ring[i]->count = new_tx_count;
1078 for (i = 0; i < adapter->num_xdp_queues; i++)
1079 adapter->xdp_ring[i]->count = new_tx_count;
1080 for (i = 0; i < adapter->num_rx_queues; i++)
1081 adapter->rx_ring[i]->count = new_rx_count;
1082 adapter->tx_ring_count = new_tx_count;
1083 adapter->xdp_ring_count = new_tx_count;
1084 adapter->rx_ring_count = new_rx_count;
1085 goto clear_reset;
1086 }
1087
1088 /* allocate temporary buffer to store rings in */
1089 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1090 adapter->num_rx_queues);
1091 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1092
1093 if (!temp_ring) {
1094 err = -ENOMEM;
1095 goto clear_reset;
1096 }
1097
1098 ixgbe_down(adapter);
1099
1100 /*
1101 * Setup new Tx resources and free the old Tx resources in that order.
1102 * We can then assign the new resources to the rings via a memcpy.
1103 * The advantage to this approach is that we are guaranteed to still
1104 * have resources even in the case of an allocation failure.
1105 */
1106 if (new_tx_count != adapter->tx_ring_count) {
1107 for (i = 0; i < adapter->num_tx_queues; i++) {
1108 memcpy(&temp_ring[i], adapter->tx_ring[i],
1109 sizeof(struct ixgbe_ring));
1110
1111 temp_ring[i].count = new_tx_count;
1112 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1113 if (err) {
1114 while (i) {
1115 i--;
1116 ixgbe_free_tx_resources(&temp_ring[i]);
1117 }
1118 goto err_setup;
1119 }
1120 }
1121
1122 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1123 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1124 sizeof(struct ixgbe_ring));
1125
1126 temp_ring[i].count = new_tx_count;
1127 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1128 if (err) {
1129 while (i) {
1130 i--;
1131 ixgbe_free_tx_resources(&temp_ring[i]);
1132 }
1133 goto err_setup;
1134 }
1135 }
1136
1137 for (i = 0; i < adapter->num_tx_queues; i++) {
1138 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1139
1140 memcpy(adapter->tx_ring[i], &temp_ring[i],
1141 sizeof(struct ixgbe_ring));
1142 }
1143 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1144 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1145
1146 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1147 sizeof(struct ixgbe_ring));
1148 }
1149
1150 adapter->tx_ring_count = new_tx_count;
1151 }
1152
1153 /* Repeat the process for the Rx rings if needed */
1154 if (new_rx_count != adapter->rx_ring_count) {
1155 for (i = 0; i < adapter->num_rx_queues; i++) {
1156 memcpy(&temp_ring[i], adapter->rx_ring[i],
1157 sizeof(struct ixgbe_ring));
1158
1159 /* Clear copied XDP RX-queue info */
1160 memset(&temp_ring[i].xdp_rxq, 0,
1161 sizeof(temp_ring[i].xdp_rxq));
1162
1163 temp_ring[i].count = new_rx_count;
1164 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1165 if (err) {
1166 while (i) {
1167 i--;
1168 ixgbe_free_rx_resources(&temp_ring[i]);
1169 }
1170 goto err_setup;
1171 }
1172
1173 }
1174
1175 for (i = 0; i < adapter->num_rx_queues; i++) {
1176 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1177
1178 memcpy(adapter->rx_ring[i], &temp_ring[i],
1179 sizeof(struct ixgbe_ring));
1180 }
1181
1182 adapter->rx_ring_count = new_rx_count;
1183 }
1184
1185err_setup:
1186 ixgbe_up(adapter);
1187 vfree(temp_ring);
1188clear_reset:
1189 clear_bit(__IXGBE_RESETTING, &adapter->state);
1190 return err;
1191}
1192
1193static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1194{
1195 switch (sset) {
1196 case ETH_SS_TEST:
1197 return IXGBE_TEST_LEN;
1198 case ETH_SS_STATS:
1199 return IXGBE_STATS_LEN;
1200 case ETH_SS_PRIV_FLAGS:
1201 return IXGBE_PRIV_FLAGS_STR_LEN;
1202 default:
1203 return -EOPNOTSUPP;
1204 }
1205}
1206
1207static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1208 struct ethtool_stats *stats, u64 *data)
1209{
1210 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1211 struct rtnl_link_stats64 temp;
1212 const struct rtnl_link_stats64 *net_stats;
1213 unsigned int start;
1214 struct ixgbe_ring *ring;
1215 int i, j;
1216 char *p = NULL;
1217
1218 ixgbe_update_stats(adapter);
1219 net_stats = dev_get_stats(netdev, &temp);
1220 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1221 switch (ixgbe_gstrings_stats[i].type) {
1222 case NETDEV_STATS:
1223 p = (char *) net_stats +
1224 ixgbe_gstrings_stats[i].stat_offset;
1225 break;
1226 case IXGBE_STATS:
1227 p = (char *) adapter +
1228 ixgbe_gstrings_stats[i].stat_offset;
1229 break;
1230 default:
1231 data[i] = 0;
1232 continue;
1233 }
1234
1235 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1236 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1237 }
1238 for (j = 0; j < netdev->num_tx_queues; j++) {
1239 ring = adapter->tx_ring[j];
1240 if (!ring) {
1241 data[i] = 0;
1242 data[i+1] = 0;
1243 i += 2;
1244 continue;
1245 }
1246
1247 do {
1248 start = u64_stats_fetch_begin_irq(&ring->syncp);
1249 data[i] = ring->stats.packets;
1250 data[i+1] = ring->stats.bytes;
1251 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1252 i += 2;
1253 }
1254 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1255 ring = adapter->rx_ring[j];
1256 if (!ring) {
1257 data[i] = 0;
1258 data[i+1] = 0;
1259 i += 2;
1260 continue;
1261 }
1262
1263 do {
1264 start = u64_stats_fetch_begin_irq(&ring->syncp);
1265 data[i] = ring->stats.packets;
1266 data[i+1] = ring->stats.bytes;
1267 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1268 i += 2;
1269 }
1270
1271 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1272 data[i++] = adapter->stats.pxontxc[j];
1273 data[i++] = adapter->stats.pxofftxc[j];
1274 }
1275 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1276 data[i++] = adapter->stats.pxonrxc[j];
1277 data[i++] = adapter->stats.pxoffrxc[j];
1278 }
1279}
1280
1281static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1282 u8 *data)
1283{
1284 char *p = (char *)data;
1285 unsigned int i;
1286
1287 switch (stringset) {
1288 case ETH_SS_TEST:
1289 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1290 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1291 data += ETH_GSTRING_LEN;
1292 }
1293 break;
1294 case ETH_SS_STATS:
1295 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1296 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1297 ETH_GSTRING_LEN);
1298 p += ETH_GSTRING_LEN;
1299 }
1300 for (i = 0; i < netdev->num_tx_queues; i++) {
1301 sprintf(p, "tx_queue_%u_packets", i);
1302 p += ETH_GSTRING_LEN;
1303 sprintf(p, "tx_queue_%u_bytes", i);
1304 p += ETH_GSTRING_LEN;
1305 }
1306 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1307 sprintf(p, "rx_queue_%u_packets", i);
1308 p += ETH_GSTRING_LEN;
1309 sprintf(p, "rx_queue_%u_bytes", i);
1310 p += ETH_GSTRING_LEN;
1311 }
1312 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1313 sprintf(p, "tx_pb_%u_pxon", i);
1314 p += ETH_GSTRING_LEN;
1315 sprintf(p, "tx_pb_%u_pxoff", i);
1316 p += ETH_GSTRING_LEN;
1317 }
1318 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1319 sprintf(p, "rx_pb_%u_pxon", i);
1320 p += ETH_GSTRING_LEN;
1321 sprintf(p, "rx_pb_%u_pxoff", i);
1322 p += ETH_GSTRING_LEN;
1323 }
1324 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1325 break;
1326 case ETH_SS_PRIV_FLAGS:
1327 memcpy(data, ixgbe_priv_flags_strings,
1328 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1329 }
1330}
1331
1332static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1333{
1334 struct ixgbe_hw *hw = &adapter->hw;
1335 bool link_up;
1336 u32 link_speed = 0;
1337
1338 if (ixgbe_removed(hw->hw_addr)) {
1339 *data = 1;
1340 return 1;
1341 }
1342 *data = 0;
1343
1344 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1345 if (link_up)
1346 return *data;
1347 else
1348 *data = 1;
1349 return *data;
1350}
1351
1352/* ethtool register test data */
1353struct ixgbe_reg_test {
1354 u16 reg;
1355 u8 array_len;
1356 u8 test_type;
1357 u32 mask;
1358 u32 write;
1359};
1360
1361/* In the hardware, registers are laid out either singly, in arrays
1362 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1363 * most tests take place on arrays or single registers (handled
1364 * as a single-element array) and special-case the tables.
1365 * Table tests are always pattern tests.
1366 *
1367 * We also make provision for some required setup steps by specifying
1368 * registers to be written without any read-back testing.
1369 */
1370
1371#define PATTERN_TEST 1
1372#define SET_READ_TEST 2
1373#define WRITE_NO_TEST 3
1374#define TABLE32_TEST 4
1375#define TABLE64_TEST_LO 5
1376#define TABLE64_TEST_HI 6
1377
1378/* default 82599 register test */
1379static const struct ixgbe_reg_test reg_test_82599[] = {
1380 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1381 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1382 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1383 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1384 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1385 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1386 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1387 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1388 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1389 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1390 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1391 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1392 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1393 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1394 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1395 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1396 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1397 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1398 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1399 { .reg = 0 }
1400};
1401
1402/* default 82598 register test */
1403static const struct ixgbe_reg_test reg_test_82598[] = {
1404 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1405 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1406 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1407 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1408 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1409 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1410 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1411 /* Enable all four RX queues before testing. */
1412 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1413 /* RDH is read-only for 82598, only test RDT. */
1414 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1415 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1416 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1417 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1418 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1419 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1420 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1421 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1422 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1423 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1424 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1425 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1426 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1427 { .reg = 0 }
1428};
1429
1430static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1431 u32 mask, u32 write)
1432{
1433 u32 pat, val, before;
1434 static const u32 test_pattern[] = {
1435 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1436
1437 if (ixgbe_removed(adapter->hw.hw_addr)) {
1438 *data = 1;
1439 return true;
1440 }
1441 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1442 before = ixgbe_read_reg(&adapter->hw, reg);
1443 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1444 val = ixgbe_read_reg(&adapter->hw, reg);
1445 if (val != (test_pattern[pat] & write & mask)) {
1446 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1447 reg, val, (test_pattern[pat] & write & mask));
1448 *data = reg;
1449 ixgbe_write_reg(&adapter->hw, reg, before);
1450 return true;
1451 }
1452 ixgbe_write_reg(&adapter->hw, reg, before);
1453 }
1454 return false;
1455}
1456
1457static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1458 u32 mask, u32 write)
1459{
1460 u32 val, before;
1461
1462 if (ixgbe_removed(adapter->hw.hw_addr)) {
1463 *data = 1;
1464 return true;
1465 }
1466 before = ixgbe_read_reg(&adapter->hw, reg);
1467 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1468 val = ixgbe_read_reg(&adapter->hw, reg);
1469 if ((write & mask) != (val & mask)) {
1470 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1471 reg, (val & mask), (write & mask));
1472 *data = reg;
1473 ixgbe_write_reg(&adapter->hw, reg, before);
1474 return true;
1475 }
1476 ixgbe_write_reg(&adapter->hw, reg, before);
1477 return false;
1478}
1479
1480static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1481{
1482 const struct ixgbe_reg_test *test;
1483 u32 value, before, after;
1484 u32 i, toggle;
1485
1486 if (ixgbe_removed(adapter->hw.hw_addr)) {
1487 e_err(drv, "Adapter removed - register test blocked\n");
1488 *data = 1;
1489 return 1;
1490 }
1491 switch (adapter->hw.mac.type) {
1492 case ixgbe_mac_82598EB:
1493 toggle = 0x7FFFF3FF;
1494 test = reg_test_82598;
1495 break;
1496 case ixgbe_mac_82599EB:
1497 case ixgbe_mac_X540:
1498 case ixgbe_mac_X550:
1499 case ixgbe_mac_X550EM_x:
1500 case ixgbe_mac_x550em_a:
1501 toggle = 0x7FFFF30F;
1502 test = reg_test_82599;
1503 break;
1504 default:
1505 *data = 1;
1506 return 1;
1507 }
1508
1509 /*
1510 * Because the status register is such a special case,
1511 * we handle it separately from the rest of the register
1512 * tests. Some bits are read-only, some toggle, and some
1513 * are writeable on newer MACs.
1514 */
1515 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1516 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1517 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1518 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1519 if (value != after) {
1520 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1521 after, value);
1522 *data = 1;
1523 return 1;
1524 }
1525 /* restore previous status */
1526 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1527
1528 /*
1529 * Perform the remainder of the register test, looping through
1530 * the test table until we either fail or reach the null entry.
1531 */
1532 while (test->reg) {
1533 for (i = 0; i < test->array_len; i++) {
1534 bool b = false;
1535
1536 switch (test->test_type) {
1537 case PATTERN_TEST:
1538 b = reg_pattern_test(adapter, data,
1539 test->reg + (i * 0x40),
1540 test->mask,
1541 test->write);
1542 break;
1543 case SET_READ_TEST:
1544 b = reg_set_and_check(adapter, data,
1545 test->reg + (i * 0x40),
1546 test->mask,
1547 test->write);
1548 break;
1549 case WRITE_NO_TEST:
1550 ixgbe_write_reg(&adapter->hw,
1551 test->reg + (i * 0x40),
1552 test->write);
1553 break;
1554 case TABLE32_TEST:
1555 b = reg_pattern_test(adapter, data,
1556 test->reg + (i * 4),
1557 test->mask,
1558 test->write);
1559 break;
1560 case TABLE64_TEST_LO:
1561 b = reg_pattern_test(adapter, data,
1562 test->reg + (i * 8),
1563 test->mask,
1564 test->write);
1565 break;
1566 case TABLE64_TEST_HI:
1567 b = reg_pattern_test(adapter, data,
1568 (test->reg + 4) + (i * 8),
1569 test->mask,
1570 test->write);
1571 break;
1572 }
1573 if (b)
1574 return 1;
1575 }
1576 test++;
1577 }
1578
1579 *data = 0;
1580 return 0;
1581}
1582
1583static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1584{
1585 struct ixgbe_hw *hw = &adapter->hw;
1586 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1587 *data = 1;
1588 else
1589 *data = 0;
1590 return *data;
1591}
1592
1593static irqreturn_t ixgbe_test_intr(int irq, void *data)
1594{
1595 struct net_device *netdev = (struct net_device *) data;
1596 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1597
1598 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1599
1600 return IRQ_HANDLED;
1601}
1602
1603static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1604{
1605 struct net_device *netdev = adapter->netdev;
1606 u32 mask, i = 0, shared_int = true;
1607 u32 irq = adapter->pdev->irq;
1608
1609 *data = 0;
1610
1611 /* Hook up test interrupt handler just for this test */
1612 if (adapter->msix_entries) {
1613 /* NOTE: we don't test MSI-X interrupts here, yet */
1614 return 0;
1615 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1616 shared_int = false;
1617 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1618 netdev)) {
1619 *data = 1;
1620 return -1;
1621 }
1622 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1623 netdev->name, netdev)) {
1624 shared_int = false;
1625 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1626 netdev->name, netdev)) {
1627 *data = 1;
1628 return -1;
1629 }
1630 e_info(hw, "testing %s interrupt\n", shared_int ?
1631 "shared" : "unshared");
1632
1633 /* Disable all the interrupts */
1634 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1635 IXGBE_WRITE_FLUSH(&adapter->hw);
1636 usleep_range(10000, 20000);
1637
1638 /* Test each interrupt */
1639 for (; i < 10; i++) {
1640 /* Interrupt to test */
1641 mask = BIT(i);
1642
1643 if (!shared_int) {
1644 /*
1645 * Disable the interrupts to be reported in
1646 * the cause register and then force the same
1647 * interrupt and see if one gets posted. If
1648 * an interrupt was posted to the bus, the
1649 * test failed.
1650 */
1651 adapter->test_icr = 0;
1652 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1653 ~mask & 0x00007FFF);
1654 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1655 ~mask & 0x00007FFF);
1656 IXGBE_WRITE_FLUSH(&adapter->hw);
1657 usleep_range(10000, 20000);
1658
1659 if (adapter->test_icr & mask) {
1660 *data = 3;
1661 break;
1662 }
1663 }
1664
1665 /*
1666 * Enable the interrupt to be reported in the cause
1667 * register and then force the same interrupt and see
1668 * if one gets posted. If an interrupt was not posted
1669 * to the bus, the test failed.
1670 */
1671 adapter->test_icr = 0;
1672 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1673 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1674 IXGBE_WRITE_FLUSH(&adapter->hw);
1675 usleep_range(10000, 20000);
1676
1677 if (!(adapter->test_icr & mask)) {
1678 *data = 4;
1679 break;
1680 }
1681
1682 if (!shared_int) {
1683 /*
1684 * Disable the other interrupts to be reported in
1685 * the cause register and then force the other
1686 * interrupts and see if any get posted. If
1687 * an interrupt was posted to the bus, the
1688 * test failed.
1689 */
1690 adapter->test_icr = 0;
1691 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1692 ~mask & 0x00007FFF);
1693 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1694 ~mask & 0x00007FFF);
1695 IXGBE_WRITE_FLUSH(&adapter->hw);
1696 usleep_range(10000, 20000);
1697
1698 if (adapter->test_icr) {
1699 *data = 5;
1700 break;
1701 }
1702 }
1703 }
1704
1705 /* Disable all the interrupts */
1706 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1707 IXGBE_WRITE_FLUSH(&adapter->hw);
1708 usleep_range(10000, 20000);
1709
1710 /* Unhook test interrupt handler */
1711 free_irq(irq, netdev);
1712
1713 return *data;
1714}
1715
1716static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1717{
1718 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1719 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1720 struct ixgbe_hw *hw = &adapter->hw;
1721 u32 reg_ctl;
1722
1723 /* shut down the DMA engines now so they can be reinitialized later */
1724
1725 /* first Rx */
1726 hw->mac.ops.disable_rx(hw);
1727 ixgbe_disable_rx_queue(adapter, rx_ring);
1728
1729 /* now Tx */
1730 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1731 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1732 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1733
1734 switch (hw->mac.type) {
1735 case ixgbe_mac_82599EB:
1736 case ixgbe_mac_X540:
1737 case ixgbe_mac_X550:
1738 case ixgbe_mac_X550EM_x:
1739 case ixgbe_mac_x550em_a:
1740 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1741 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1742 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1743 break;
1744 default:
1745 break;
1746 }
1747
1748 ixgbe_reset(adapter);
1749
1750 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1751 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1752}
1753
1754static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1755{
1756 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1757 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1758 struct ixgbe_hw *hw = &adapter->hw;
1759 u32 rctl, reg_data;
1760 int ret_val;
1761 int err;
1762
1763 /* Setup Tx descriptor ring and Tx buffers */
1764 tx_ring->count = IXGBE_DEFAULT_TXD;
1765 tx_ring->queue_index = 0;
1766 tx_ring->dev = &adapter->pdev->dev;
1767 tx_ring->netdev = adapter->netdev;
1768 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1769
1770 err = ixgbe_setup_tx_resources(tx_ring);
1771 if (err)
1772 return 1;
1773
1774 switch (adapter->hw.mac.type) {
1775 case ixgbe_mac_82599EB:
1776 case ixgbe_mac_X540:
1777 case ixgbe_mac_X550:
1778 case ixgbe_mac_X550EM_x:
1779 case ixgbe_mac_x550em_a:
1780 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1781 reg_data |= IXGBE_DMATXCTL_TE;
1782 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1783 break;
1784 default:
1785 break;
1786 }
1787
1788 ixgbe_configure_tx_ring(adapter, tx_ring);
1789
1790 /* Setup Rx Descriptor ring and Rx buffers */
1791 rx_ring->count = IXGBE_DEFAULT_RXD;
1792 rx_ring->queue_index = 0;
1793 rx_ring->dev = &adapter->pdev->dev;
1794 rx_ring->netdev = adapter->netdev;
1795 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1796
1797 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1798 if (err) {
1799 ret_val = 4;
1800 goto err_nomem;
1801 }
1802
1803 hw->mac.ops.disable_rx(hw);
1804
1805 ixgbe_configure_rx_ring(adapter, rx_ring);
1806
1807 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1808 rctl |= IXGBE_RXCTRL_DMBYPS;
1809 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1810
1811 hw->mac.ops.enable_rx(hw);
1812
1813 return 0;
1814
1815err_nomem:
1816 ixgbe_free_desc_rings(adapter);
1817 return ret_val;
1818}
1819
1820static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1821{
1822 struct ixgbe_hw *hw = &adapter->hw;
1823 u32 reg_data;
1824
1825
1826 /* Setup MAC loopback */
1827 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1828 reg_data |= IXGBE_HLREG0_LPBK;
1829 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1830
1831 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1832 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1833 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1834
1835 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1836 switch (adapter->hw.mac.type) {
1837 case ixgbe_mac_X540:
1838 case ixgbe_mac_X550:
1839 case ixgbe_mac_X550EM_x:
1840 case ixgbe_mac_x550em_a:
1841 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1842 reg_data |= IXGBE_MACC_FLU;
1843 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1844 break;
1845 default:
1846 if (hw->mac.orig_autoc) {
1847 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1848 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1849 } else {
1850 return 10;
1851 }
1852 }
1853 IXGBE_WRITE_FLUSH(hw);
1854 usleep_range(10000, 20000);
1855
1856 /* Disable Atlas Tx lanes; re-enabled in reset path */
1857 if (hw->mac.type == ixgbe_mac_82598EB) {
1858 u8 atlas;
1859
1860 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1861 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1862 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1863
1864 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1865 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1866 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1867
1868 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1869 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1870 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1871
1872 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1873 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1874 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1875 }
1876
1877 return 0;
1878}
1879
1880static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1881{
1882 u32 reg_data;
1883
1884 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1885 reg_data &= ~IXGBE_HLREG0_LPBK;
1886 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1887}
1888
1889static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1890 unsigned int frame_size)
1891{
1892 memset(skb->data, 0xFF, frame_size);
1893 frame_size >>= 1;
1894 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1895 memset(&skb->data[frame_size + 10], 0xBE, 1);
1896 memset(&skb->data[frame_size + 12], 0xAF, 1);
1897}
1898
1899static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1900 unsigned int frame_size)
1901{
1902 unsigned char *data;
1903 bool match = true;
1904
1905 frame_size >>= 1;
1906
1907 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1908
1909 if (data[3] != 0xFF ||
1910 data[frame_size + 10] != 0xBE ||
1911 data[frame_size + 12] != 0xAF)
1912 match = false;
1913
1914 kunmap(rx_buffer->page);
1915
1916 return match;
1917}
1918
1919static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1920 struct ixgbe_ring *tx_ring,
1921 unsigned int size)
1922{
1923 union ixgbe_adv_rx_desc *rx_desc;
1924 u16 rx_ntc, tx_ntc, count = 0;
1925
1926 /* initialize next to clean and descriptor values */
1927 rx_ntc = rx_ring->next_to_clean;
1928 tx_ntc = tx_ring->next_to_clean;
1929 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1930
1931 while (tx_ntc != tx_ring->next_to_use) {
1932 union ixgbe_adv_tx_desc *tx_desc;
1933 struct ixgbe_tx_buffer *tx_buffer;
1934
1935 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
1936
1937 /* if DD is not set transmit has not completed */
1938 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1939 return count;
1940
1941 /* unmap buffer on Tx side */
1942 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1943
1944 /* Free all the Tx ring sk_buffs */
1945 dev_kfree_skb_any(tx_buffer->skb);
1946
1947 /* unmap skb header data */
1948 dma_unmap_single(tx_ring->dev,
1949 dma_unmap_addr(tx_buffer, dma),
1950 dma_unmap_len(tx_buffer, len),
1951 DMA_TO_DEVICE);
1952 dma_unmap_len_set(tx_buffer, len, 0);
1953
1954 /* increment Tx next to clean counter */
1955 tx_ntc++;
1956 if (tx_ntc == tx_ring->count)
1957 tx_ntc = 0;
1958 }
1959
1960 while (rx_desc->wb.upper.length) {
1961 struct ixgbe_rx_buffer *rx_buffer;
1962
1963 /* check Rx buffer */
1964 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1965
1966 /* sync Rx buffer for CPU read */
1967 dma_sync_single_for_cpu(rx_ring->dev,
1968 rx_buffer->dma,
1969 ixgbe_rx_bufsz(rx_ring),
1970 DMA_FROM_DEVICE);
1971
1972 /* verify contents of skb */
1973 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1974 count++;
1975 else
1976 break;
1977
1978 /* sync Rx buffer for device write */
1979 dma_sync_single_for_device(rx_ring->dev,
1980 rx_buffer->dma,
1981 ixgbe_rx_bufsz(rx_ring),
1982 DMA_FROM_DEVICE);
1983
1984 /* increment Rx next to clean counter */
1985 rx_ntc++;
1986 if (rx_ntc == rx_ring->count)
1987 rx_ntc = 0;
1988
1989 /* fetch next descriptor */
1990 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1991 }
1992
1993 netdev_tx_reset_queue(txring_txq(tx_ring));
1994
1995 /* re-map buffers to ring, store next to clean values */
1996 ixgbe_alloc_rx_buffers(rx_ring, count);
1997 rx_ring->next_to_clean = rx_ntc;
1998 tx_ring->next_to_clean = tx_ntc;
1999
2000 return count;
2001}
2002
2003static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2004{
2005 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2006 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2007 int i, j, lc, good_cnt, ret_val = 0;
2008 unsigned int size = 1024;
2009 netdev_tx_t tx_ret_val;
2010 struct sk_buff *skb;
2011 u32 flags_orig = adapter->flags;
2012
2013 /* DCB can modify the frames on Tx */
2014 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2015
2016 /* allocate test skb */
2017 skb = alloc_skb(size, GFP_KERNEL);
2018 if (!skb)
2019 return 11;
2020
2021 /* place data into test skb */
2022 ixgbe_create_lbtest_frame(skb, size);
2023 skb_put(skb, size);
2024
2025 /*
2026 * Calculate the loop count based on the largest descriptor ring
2027 * The idea is to wrap the largest ring a number of times using 64
2028 * send/receive pairs during each loop
2029 */
2030
2031 if (rx_ring->count <= tx_ring->count)
2032 lc = ((tx_ring->count / 64) * 2) + 1;
2033 else
2034 lc = ((rx_ring->count / 64) * 2) + 1;
2035
2036 for (j = 0; j <= lc; j++) {
2037 /* reset count of good packets */
2038 good_cnt = 0;
2039
2040 /* place 64 packets on the transmit queue*/
2041 for (i = 0; i < 64; i++) {
2042 skb_get(skb);
2043 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2044 adapter,
2045 tx_ring);
2046 if (tx_ret_val == NETDEV_TX_OK)
2047 good_cnt++;
2048 }
2049
2050 if (good_cnt != 64) {
2051 ret_val = 12;
2052 break;
2053 }
2054
2055 /* allow 200 milliseconds for packets to go from Tx to Rx */
2056 msleep(200);
2057
2058 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2059 if (good_cnt != 64) {
2060 ret_val = 13;
2061 break;
2062 }
2063 }
2064
2065 /* free the original skb */
2066 kfree_skb(skb);
2067 adapter->flags = flags_orig;
2068
2069 return ret_val;
2070}
2071
2072static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2073{
2074 *data = ixgbe_setup_desc_rings(adapter);
2075 if (*data)
2076 goto out;
2077 *data = ixgbe_setup_loopback_test(adapter);
2078 if (*data)
2079 goto err_loopback;
2080 *data = ixgbe_run_loopback_test(adapter);
2081 ixgbe_loopback_cleanup(adapter);
2082
2083err_loopback:
2084 ixgbe_free_desc_rings(adapter);
2085out:
2086 return *data;
2087}
2088
2089static void ixgbe_diag_test(struct net_device *netdev,
2090 struct ethtool_test *eth_test, u64 *data)
2091{
2092 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2093 bool if_running = netif_running(netdev);
2094
2095 if (ixgbe_removed(adapter->hw.hw_addr)) {
2096 e_err(hw, "Adapter removed - test blocked\n");
2097 data[0] = 1;
2098 data[1] = 1;
2099 data[2] = 1;
2100 data[3] = 1;
2101 data[4] = 1;
2102 eth_test->flags |= ETH_TEST_FL_FAILED;
2103 return;
2104 }
2105 set_bit(__IXGBE_TESTING, &adapter->state);
2106 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2107 struct ixgbe_hw *hw = &adapter->hw;
2108
2109 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2110 int i;
2111 for (i = 0; i < adapter->num_vfs; i++) {
2112 if (adapter->vfinfo[i].clear_to_send) {
2113 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2114 data[0] = 1;
2115 data[1] = 1;
2116 data[2] = 1;
2117 data[3] = 1;
2118 data[4] = 1;
2119 eth_test->flags |= ETH_TEST_FL_FAILED;
2120 clear_bit(__IXGBE_TESTING,
2121 &adapter->state);
2122 goto skip_ol_tests;
2123 }
2124 }
2125 }
2126
2127 /* Offline tests */
2128 e_info(hw, "offline testing starting\n");
2129
2130 /* Link test performed before hardware reset so autoneg doesn't
2131 * interfere with test result
2132 */
2133 if (ixgbe_link_test(adapter, &data[4]))
2134 eth_test->flags |= ETH_TEST_FL_FAILED;
2135
2136 if (if_running)
2137 /* indicate we're in test mode */
2138 ixgbe_close(netdev);
2139 else
2140 ixgbe_reset(adapter);
2141
2142 e_info(hw, "register testing starting\n");
2143 if (ixgbe_reg_test(adapter, &data[0]))
2144 eth_test->flags |= ETH_TEST_FL_FAILED;
2145
2146 ixgbe_reset(adapter);
2147 e_info(hw, "eeprom testing starting\n");
2148 if (ixgbe_eeprom_test(adapter, &data[1]))
2149 eth_test->flags |= ETH_TEST_FL_FAILED;
2150
2151 ixgbe_reset(adapter);
2152 e_info(hw, "interrupt testing starting\n");
2153 if (ixgbe_intr_test(adapter, &data[2]))
2154 eth_test->flags |= ETH_TEST_FL_FAILED;
2155
2156 /* If SRIOV or VMDq is enabled then skip MAC
2157 * loopback diagnostic. */
2158 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2159 IXGBE_FLAG_VMDQ_ENABLED)) {
2160 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2161 data[3] = 0;
2162 goto skip_loopback;
2163 }
2164
2165 ixgbe_reset(adapter);
2166 e_info(hw, "loopback testing starting\n");
2167 if (ixgbe_loopback_test(adapter, &data[3]))
2168 eth_test->flags |= ETH_TEST_FL_FAILED;
2169
2170skip_loopback:
2171 ixgbe_reset(adapter);
2172
2173 /* clear testing bit and return adapter to previous state */
2174 clear_bit(__IXGBE_TESTING, &adapter->state);
2175 if (if_running)
2176 ixgbe_open(netdev);
2177 else if (hw->mac.ops.disable_tx_laser)
2178 hw->mac.ops.disable_tx_laser(hw);
2179 } else {
2180 e_info(hw, "online testing starting\n");
2181
2182 /* Online tests */
2183 if (ixgbe_link_test(adapter, &data[4]))
2184 eth_test->flags |= ETH_TEST_FL_FAILED;
2185
2186 /* Offline tests aren't run; pass by default */
2187 data[0] = 0;
2188 data[1] = 0;
2189 data[2] = 0;
2190 data[3] = 0;
2191
2192 clear_bit(__IXGBE_TESTING, &adapter->state);
2193 }
2194
2195skip_ol_tests:
2196 msleep_interruptible(4 * 1000);
2197}
2198
2199static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2200 struct ethtool_wolinfo *wol)
2201{
2202 struct ixgbe_hw *hw = &adapter->hw;
2203 int retval = 0;
2204
2205 /* WOL not supported for all devices */
2206 if (!ixgbe_wol_supported(adapter, hw->device_id,
2207 hw->subsystem_device_id)) {
2208 retval = 1;
2209 wol->supported = 0;
2210 }
2211
2212 return retval;
2213}
2214
2215static void ixgbe_get_wol(struct net_device *netdev,
2216 struct ethtool_wolinfo *wol)
2217{
2218 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2219
2220 wol->supported = WAKE_UCAST | WAKE_MCAST |
2221 WAKE_BCAST | WAKE_MAGIC;
2222 wol->wolopts = 0;
2223
2224 if (ixgbe_wol_exclusion(adapter, wol) ||
2225 !device_can_wakeup(&adapter->pdev->dev))
2226 return;
2227
2228 if (adapter->wol & IXGBE_WUFC_EX)
2229 wol->wolopts |= WAKE_UCAST;
2230 if (adapter->wol & IXGBE_WUFC_MC)
2231 wol->wolopts |= WAKE_MCAST;
2232 if (adapter->wol & IXGBE_WUFC_BC)
2233 wol->wolopts |= WAKE_BCAST;
2234 if (adapter->wol & IXGBE_WUFC_MAG)
2235 wol->wolopts |= WAKE_MAGIC;
2236}
2237
2238static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2239{
2240 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2241
2242 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2243 return -EOPNOTSUPP;
2244
2245 if (ixgbe_wol_exclusion(adapter, wol))
2246 return wol->wolopts ? -EOPNOTSUPP : 0;
2247
2248 adapter->wol = 0;
2249
2250 if (wol->wolopts & WAKE_UCAST)
2251 adapter->wol |= IXGBE_WUFC_EX;
2252 if (wol->wolopts & WAKE_MCAST)
2253 adapter->wol |= IXGBE_WUFC_MC;
2254 if (wol->wolopts & WAKE_BCAST)
2255 adapter->wol |= IXGBE_WUFC_BC;
2256 if (wol->wolopts & WAKE_MAGIC)
2257 adapter->wol |= IXGBE_WUFC_MAG;
2258
2259 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2260
2261 return 0;
2262}
2263
2264static int ixgbe_nway_reset(struct net_device *netdev)
2265{
2266 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2267
2268 if (netif_running(netdev))
2269 ixgbe_reinit_locked(adapter);
2270
2271 return 0;
2272}
2273
2274static int ixgbe_set_phys_id(struct net_device *netdev,
2275 enum ethtool_phys_id_state state)
2276{
2277 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2278 struct ixgbe_hw *hw = &adapter->hw;
2279
2280 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2281 return -EOPNOTSUPP;
2282
2283 switch (state) {
2284 case ETHTOOL_ID_ACTIVE:
2285 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2286 return 2;
2287
2288 case ETHTOOL_ID_ON:
2289 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2290 break;
2291
2292 case ETHTOOL_ID_OFF:
2293 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2294 break;
2295
2296 case ETHTOOL_ID_INACTIVE:
2297 /* Restore LED settings */
2298 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2299 break;
2300 }
2301
2302 return 0;
2303}
2304
2305static int ixgbe_get_coalesce(struct net_device *netdev,
2306 struct ethtool_coalesce *ec)
2307{
2308 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2309
2310 /* only valid if in constant ITR mode */
2311 if (adapter->rx_itr_setting <= 1)
2312 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2313 else
2314 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2315
2316 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2317 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2318 return 0;
2319
2320 /* only valid if in constant ITR mode */
2321 if (adapter->tx_itr_setting <= 1)
2322 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2323 else
2324 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2325
2326 return 0;
2327}
2328
2329/*
2330 * this function must be called before setting the new value of
2331 * rx_itr_setting
2332 */
2333static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2334{
2335 struct net_device *netdev = adapter->netdev;
2336
2337 /* nothing to do if LRO or RSC are not enabled */
2338 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2339 !(netdev->features & NETIF_F_LRO))
2340 return false;
2341
2342 /* check the feature flag value and enable RSC if necessary */
2343 if (adapter->rx_itr_setting == 1 ||
2344 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2345 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2346 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2347 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2348 return true;
2349 }
2350 /* if interrupt rate is too high then disable RSC */
2351 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2352 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2353 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2354 return true;
2355 }
2356 return false;
2357}
2358
2359static int ixgbe_set_coalesce(struct net_device *netdev,
2360 struct ethtool_coalesce *ec)
2361{
2362 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2363 struct ixgbe_q_vector *q_vector;
2364 int i;
2365 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2366 bool need_reset = false;
2367
2368 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2369 /* reject Tx specific changes in case of mixed RxTx vectors */
2370 if (ec->tx_coalesce_usecs)
2371 return -EINVAL;
2372 tx_itr_prev = adapter->rx_itr_setting;
2373 } else {
2374 tx_itr_prev = adapter->tx_itr_setting;
2375 }
2376
2377 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2378 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2379 return -EINVAL;
2380
2381 if (ec->rx_coalesce_usecs > 1)
2382 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2383 else
2384 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2385
2386 if (adapter->rx_itr_setting == 1)
2387 rx_itr_param = IXGBE_20K_ITR;
2388 else
2389 rx_itr_param = adapter->rx_itr_setting;
2390
2391 if (ec->tx_coalesce_usecs > 1)
2392 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2393 else
2394 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2395
2396 if (adapter->tx_itr_setting == 1)
2397 tx_itr_param = IXGBE_12K_ITR;
2398 else
2399 tx_itr_param = adapter->tx_itr_setting;
2400
2401 /* mixed Rx/Tx */
2402 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2403 adapter->tx_itr_setting = adapter->rx_itr_setting;
2404
2405 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2406 if ((adapter->tx_itr_setting != 1) &&
2407 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2408 if ((tx_itr_prev == 1) ||
2409 (tx_itr_prev >= IXGBE_100K_ITR))
2410 need_reset = true;
2411 } else {
2412 if ((tx_itr_prev != 1) &&
2413 (tx_itr_prev < IXGBE_100K_ITR))
2414 need_reset = true;
2415 }
2416
2417 /* check the old value and enable RSC if necessary */
2418 need_reset |= ixgbe_update_rsc(adapter);
2419
2420 for (i = 0; i < adapter->num_q_vectors; i++) {
2421 q_vector = adapter->q_vector[i];
2422 if (q_vector->tx.count && !q_vector->rx.count)
2423 /* tx only */
2424 q_vector->itr = tx_itr_param;
2425 else
2426 /* rx only or mixed */
2427 q_vector->itr = rx_itr_param;
2428 ixgbe_write_eitr(q_vector);
2429 }
2430
2431 /*
2432 * do reset here at the end to make sure EITR==0 case is handled
2433 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2434 * also locks in RSC enable/disable which requires reset
2435 */
2436 if (need_reset)
2437 ixgbe_do_reset(netdev);
2438
2439 return 0;
2440}
2441
2442static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2443 struct ethtool_rxnfc *cmd)
2444{
2445 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2446 struct ethtool_rx_flow_spec *fsp =
2447 (struct ethtool_rx_flow_spec *)&cmd->fs;
2448 struct hlist_node *node2;
2449 struct ixgbe_fdir_filter *rule = NULL;
2450
2451 /* report total rule count */
2452 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2453
2454 hlist_for_each_entry_safe(rule, node2,
2455 &adapter->fdir_filter_list, fdir_node) {
2456 if (fsp->location <= rule->sw_idx)
2457 break;
2458 }
2459
2460 if (!rule || fsp->location != rule->sw_idx)
2461 return -EINVAL;
2462
2463 /* fill out the flow spec entry */
2464
2465 /* set flow type field */
2466 switch (rule->filter.formatted.flow_type) {
2467 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2468 fsp->flow_type = TCP_V4_FLOW;
2469 break;
2470 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2471 fsp->flow_type = UDP_V4_FLOW;
2472 break;
2473 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2474 fsp->flow_type = SCTP_V4_FLOW;
2475 break;
2476 case IXGBE_ATR_FLOW_TYPE_IPV4:
2477 fsp->flow_type = IP_USER_FLOW;
2478 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2479 fsp->h_u.usr_ip4_spec.proto = 0;
2480 fsp->m_u.usr_ip4_spec.proto = 0;
2481 break;
2482 default:
2483 return -EINVAL;
2484 }
2485
2486 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2487 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2488 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2489 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2490 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2491 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2492 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2493 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2494 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2495 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2496 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2497 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2498 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2499 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2500 fsp->flow_type |= FLOW_EXT;
2501
2502 /* record action */
2503 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2504 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2505 else
2506 fsp->ring_cookie = rule->action;
2507
2508 return 0;
2509}
2510
2511static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2512 struct ethtool_rxnfc *cmd,
2513 u32 *rule_locs)
2514{
2515 struct hlist_node *node2;
2516 struct ixgbe_fdir_filter *rule;
2517 int cnt = 0;
2518
2519 /* report total rule count */
2520 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2521
2522 hlist_for_each_entry_safe(rule, node2,
2523 &adapter->fdir_filter_list, fdir_node) {
2524 if (cnt == cmd->rule_cnt)
2525 return -EMSGSIZE;
2526 rule_locs[cnt] = rule->sw_idx;
2527 cnt++;
2528 }
2529
2530 cmd->rule_cnt = cnt;
2531
2532 return 0;
2533}
2534
2535static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2536 struct ethtool_rxnfc *cmd)
2537{
2538 cmd->data = 0;
2539
2540 /* Report default options for RSS on ixgbe */
2541 switch (cmd->flow_type) {
2542 case TCP_V4_FLOW:
2543 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2544 /* fallthrough */
2545 case UDP_V4_FLOW:
2546 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2547 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2548 /* fallthrough */
2549 case SCTP_V4_FLOW:
2550 case AH_ESP_V4_FLOW:
2551 case AH_V4_FLOW:
2552 case ESP_V4_FLOW:
2553 case IPV4_FLOW:
2554 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2555 break;
2556 case TCP_V6_FLOW:
2557 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2558 /* fallthrough */
2559 case UDP_V6_FLOW:
2560 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2561 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2562 /* fallthrough */
2563 case SCTP_V6_FLOW:
2564 case AH_ESP_V6_FLOW:
2565 case AH_V6_FLOW:
2566 case ESP_V6_FLOW:
2567 case IPV6_FLOW:
2568 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2569 break;
2570 default:
2571 return -EINVAL;
2572 }
2573
2574 return 0;
2575}
2576
2577static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2578 u32 *rule_locs)
2579{
2580 struct ixgbe_adapter *adapter = netdev_priv(dev);
2581 int ret = -EOPNOTSUPP;
2582
2583 switch (cmd->cmd) {
2584 case ETHTOOL_GRXRINGS:
2585 cmd->data = adapter->num_rx_queues;
2586 ret = 0;
2587 break;
2588 case ETHTOOL_GRXCLSRLCNT:
2589 cmd->rule_cnt = adapter->fdir_filter_count;
2590 ret = 0;
2591 break;
2592 case ETHTOOL_GRXCLSRULE:
2593 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2594 break;
2595 case ETHTOOL_GRXCLSRLALL:
2596 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2597 break;
2598 case ETHTOOL_GRXFH:
2599 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2600 break;
2601 default:
2602 break;
2603 }
2604
2605 return ret;
2606}
2607
2608int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2609 struct ixgbe_fdir_filter *input,
2610 u16 sw_idx)
2611{
2612 struct ixgbe_hw *hw = &adapter->hw;
2613 struct hlist_node *node2;
2614 struct ixgbe_fdir_filter *rule, *parent;
2615 int err = -EINVAL;
2616
2617 parent = NULL;
2618 rule = NULL;
2619
2620 hlist_for_each_entry_safe(rule, node2,
2621 &adapter->fdir_filter_list, fdir_node) {
2622 /* hash found, or no matching entry */
2623 if (rule->sw_idx >= sw_idx)
2624 break;
2625 parent = rule;
2626 }
2627
2628 /* if there is an old rule occupying our place remove it */
2629 if (rule && (rule->sw_idx == sw_idx)) {
2630 if (!input || (rule->filter.formatted.bkt_hash !=
2631 input->filter.formatted.bkt_hash)) {
2632 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2633 &rule->filter,
2634 sw_idx);
2635 }
2636
2637 hlist_del(&rule->fdir_node);
2638 kfree(rule);
2639 adapter->fdir_filter_count--;
2640 }
2641
2642 /*
2643 * If no input this was a delete, err should be 0 if a rule was
2644 * successfully found and removed from the list else -EINVAL
2645 */
2646 if (!input)
2647 return err;
2648
2649 /* initialize node and set software index */
2650 INIT_HLIST_NODE(&input->fdir_node);
2651
2652 /* add filter to the list */
2653 if (parent)
2654 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2655 else
2656 hlist_add_head(&input->fdir_node,
2657 &adapter->fdir_filter_list);
2658
2659 /* update counts */
2660 adapter->fdir_filter_count++;
2661
2662 return 0;
2663}
2664
2665static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2666 u8 *flow_type)
2667{
2668 switch (fsp->flow_type & ~FLOW_EXT) {
2669 case TCP_V4_FLOW:
2670 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2671 break;
2672 case UDP_V4_FLOW:
2673 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2674 break;
2675 case SCTP_V4_FLOW:
2676 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2677 break;
2678 case IP_USER_FLOW:
2679 switch (fsp->h_u.usr_ip4_spec.proto) {
2680 case IPPROTO_TCP:
2681 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2682 break;
2683 case IPPROTO_UDP:
2684 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2685 break;
2686 case IPPROTO_SCTP:
2687 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2688 break;
2689 case 0:
2690 if (!fsp->m_u.usr_ip4_spec.proto) {
2691 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2692 break;
2693 }
2694 /* fall through */
2695 default:
2696 return 0;
2697 }
2698 break;
2699 default:
2700 return 0;
2701 }
2702
2703 return 1;
2704}
2705
2706static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2707 struct ethtool_rxnfc *cmd)
2708{
2709 struct ethtool_rx_flow_spec *fsp =
2710 (struct ethtool_rx_flow_spec *)&cmd->fs;
2711 struct ixgbe_hw *hw = &adapter->hw;
2712 struct ixgbe_fdir_filter *input;
2713 union ixgbe_atr_input mask;
2714 u8 queue;
2715 int err;
2716
2717 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2718 return -EOPNOTSUPP;
2719
2720 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2721 * we use the drop index.
2722 */
2723 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2724 queue = IXGBE_FDIR_DROP_QUEUE;
2725 } else {
2726 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2727 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2728
2729 if (!vf && (ring >= adapter->num_rx_queues))
2730 return -EINVAL;
2731 else if (vf &&
2732 ((vf > adapter->num_vfs) ||
2733 ring >= adapter->num_rx_queues_per_pool))
2734 return -EINVAL;
2735
2736 /* Map the ring onto the absolute queue index */
2737 if (!vf)
2738 queue = adapter->rx_ring[ring]->reg_idx;
2739 else
2740 queue = ((vf - 1) *
2741 adapter->num_rx_queues_per_pool) + ring;
2742 }
2743
2744 /* Don't allow indexes to exist outside of available space */
2745 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2746 e_err(drv, "Location out of range\n");
2747 return -EINVAL;
2748 }
2749
2750 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2751 if (!input)
2752 return -ENOMEM;
2753
2754 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2755
2756 /* set SW index */
2757 input->sw_idx = fsp->location;
2758
2759 /* record flow type */
2760 if (!ixgbe_flowspec_to_flow_type(fsp,
2761 &input->filter.formatted.flow_type)) {
2762 e_err(drv, "Unrecognized flow type\n");
2763 goto err_out;
2764 }
2765
2766 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2767 IXGBE_ATR_L4TYPE_MASK;
2768
2769 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2770 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2771
2772 /* Copy input into formatted structures */
2773 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2774 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2775 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2776 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2777 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2778 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2779 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2780 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2781
2782 if (fsp->flow_type & FLOW_EXT) {
2783 input->filter.formatted.vm_pool =
2784 (unsigned char)ntohl(fsp->h_ext.data[1]);
2785 mask.formatted.vm_pool =
2786 (unsigned char)ntohl(fsp->m_ext.data[1]);
2787 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2788 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2789 input->filter.formatted.flex_bytes =
2790 fsp->h_ext.vlan_etype;
2791 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2792 }
2793
2794 /* determine if we need to drop or route the packet */
2795 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2796 input->action = IXGBE_FDIR_DROP_QUEUE;
2797 else
2798 input->action = fsp->ring_cookie;
2799
2800 spin_lock(&adapter->fdir_perfect_lock);
2801
2802 if (hlist_empty(&adapter->fdir_filter_list)) {
2803 /* save mask and program input mask into HW */
2804 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2805 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2806 if (err) {
2807 e_err(drv, "Error writing mask\n");
2808 goto err_out_w_lock;
2809 }
2810 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2811 e_err(drv, "Only one mask supported per port\n");
2812 goto err_out_w_lock;
2813 }
2814
2815 /* apply mask and compute/store hash */
2816 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2817
2818 /* program filters to filter memory */
2819 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2820 &input->filter, input->sw_idx, queue);
2821 if (err)
2822 goto err_out_w_lock;
2823
2824 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2825
2826 spin_unlock(&adapter->fdir_perfect_lock);
2827
2828 return err;
2829err_out_w_lock:
2830 spin_unlock(&adapter->fdir_perfect_lock);
2831err_out:
2832 kfree(input);
2833 return -EINVAL;
2834}
2835
2836static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2837 struct ethtool_rxnfc *cmd)
2838{
2839 struct ethtool_rx_flow_spec *fsp =
2840 (struct ethtool_rx_flow_spec *)&cmd->fs;
2841 int err;
2842
2843 spin_lock(&adapter->fdir_perfect_lock);
2844 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2845 spin_unlock(&adapter->fdir_perfect_lock);
2846
2847 return err;
2848}
2849
2850#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2851 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2852static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2853 struct ethtool_rxnfc *nfc)
2854{
2855 u32 flags2 = adapter->flags2;
2856
2857 /*
2858 * RSS does not support anything other than hashing
2859 * to queues on src and dst IPs and ports
2860 */
2861 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2862 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2863 return -EINVAL;
2864
2865 switch (nfc->flow_type) {
2866 case TCP_V4_FLOW:
2867 case TCP_V6_FLOW:
2868 if (!(nfc->data & RXH_IP_SRC) ||
2869 !(nfc->data & RXH_IP_DST) ||
2870 !(nfc->data & RXH_L4_B_0_1) ||
2871 !(nfc->data & RXH_L4_B_2_3))
2872 return -EINVAL;
2873 break;
2874 case UDP_V4_FLOW:
2875 if (!(nfc->data & RXH_IP_SRC) ||
2876 !(nfc->data & RXH_IP_DST))
2877 return -EINVAL;
2878 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2879 case 0:
2880 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2881 break;
2882 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2883 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2884 break;
2885 default:
2886 return -EINVAL;
2887 }
2888 break;
2889 case UDP_V6_FLOW:
2890 if (!(nfc->data & RXH_IP_SRC) ||
2891 !(nfc->data & RXH_IP_DST))
2892 return -EINVAL;
2893 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2894 case 0:
2895 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2896 break;
2897 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2898 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2899 break;
2900 default:
2901 return -EINVAL;
2902 }
2903 break;
2904 case AH_ESP_V4_FLOW:
2905 case AH_V4_FLOW:
2906 case ESP_V4_FLOW:
2907 case SCTP_V4_FLOW:
2908 case AH_ESP_V6_FLOW:
2909 case AH_V6_FLOW:
2910 case ESP_V6_FLOW:
2911 case SCTP_V6_FLOW:
2912 if (!(nfc->data & RXH_IP_SRC) ||
2913 !(nfc->data & RXH_IP_DST) ||
2914 (nfc->data & RXH_L4_B_0_1) ||
2915 (nfc->data & RXH_L4_B_2_3))
2916 return -EINVAL;
2917 break;
2918 default:
2919 return -EINVAL;
2920 }
2921
2922 /* if we changed something we need to update flags */
2923 if (flags2 != adapter->flags2) {
2924 struct ixgbe_hw *hw = &adapter->hw;
2925 u32 mrqc;
2926 unsigned int pf_pool = adapter->num_vfs;
2927
2928 if ((hw->mac.type >= ixgbe_mac_X550) &&
2929 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2930 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2931 else
2932 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2933
2934 if ((flags2 & UDP_RSS_FLAGS) &&
2935 !(adapter->flags2 & UDP_RSS_FLAGS))
2936 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2937
2938 adapter->flags2 = flags2;
2939
2940 /* Perform hash on these packet types */
2941 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2942 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2943 | IXGBE_MRQC_RSS_FIELD_IPV6
2944 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2945
2946 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2947 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2948
2949 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2950 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2951
2952 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2953 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2954
2955 if ((hw->mac.type >= ixgbe_mac_X550) &&
2956 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2957 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2958 else
2959 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2960 }
2961
2962 return 0;
2963}
2964
2965static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2966{
2967 struct ixgbe_adapter *adapter = netdev_priv(dev);
2968 int ret = -EOPNOTSUPP;
2969
2970 switch (cmd->cmd) {
2971 case ETHTOOL_SRXCLSRLINS:
2972 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2973 break;
2974 case ETHTOOL_SRXCLSRLDEL:
2975 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2976 break;
2977 case ETHTOOL_SRXFH:
2978 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2979 break;
2980 default:
2981 break;
2982 }
2983
2984 return ret;
2985}
2986
2987static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2988{
2989 if (adapter->hw.mac.type < ixgbe_mac_X550)
2990 return 16;
2991 else
2992 return 64;
2993}
2994
2995static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2996{
2997 return IXGBE_RSS_KEY_SIZE;
2998}
2999
3000static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3001{
3002 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3003
3004 return ixgbe_rss_indir_tbl_entries(adapter);
3005}
3006
3007static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3008{
3009 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3010 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3011
3012 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3013 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3014
3015 for (i = 0; i < reta_size; i++)
3016 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3017}
3018
3019static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3020 u8 *hfunc)
3021{
3022 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3023
3024 if (hfunc)
3025 *hfunc = ETH_RSS_HASH_TOP;
3026
3027 if (indir)
3028 ixgbe_get_reta(adapter, indir);
3029
3030 if (key)
3031 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3032
3033 return 0;
3034}
3035
3036static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3037 const u8 *key, const u8 hfunc)
3038{
3039 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3040 int i;
3041 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3042
3043 if (hfunc)
3044 return -EINVAL;
3045
3046 /* Fill out the redirection table */
3047 if (indir) {
3048 int max_queues = min_t(int, adapter->num_rx_queues,
3049 ixgbe_rss_indir_tbl_max(adapter));
3050
3051 /*Allow at least 2 queues w/ SR-IOV.*/
3052 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3053 (max_queues < 2))
3054 max_queues = 2;
3055
3056 /* Verify user input. */
3057 for (i = 0; i < reta_entries; i++)
3058 if (indir[i] >= max_queues)
3059 return -EINVAL;
3060
3061 for (i = 0; i < reta_entries; i++)
3062 adapter->rss_indir_tbl[i] = indir[i];
3063
3064 ixgbe_store_reta(adapter);
3065 }
3066
3067 /* Fill out the rss hash key */
3068 if (key) {
3069 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3070 ixgbe_store_key(adapter);
3071 }
3072
3073 return 0;
3074}
3075
3076static int ixgbe_get_ts_info(struct net_device *dev,
3077 struct ethtool_ts_info *info)
3078{
3079 struct ixgbe_adapter *adapter = netdev_priv(dev);
3080
3081 /* we always support timestamping disabled */
3082 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3083
3084 switch (adapter->hw.mac.type) {
3085 case ixgbe_mac_X550:
3086 case ixgbe_mac_X550EM_x:
3087 case ixgbe_mac_x550em_a:
3088 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3089 break;
3090 case ixgbe_mac_X540:
3091 case ixgbe_mac_82599EB:
3092 info->rx_filters |=
3093 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3094 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3095 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3096 break;
3097 default:
3098 return ethtool_op_get_ts_info(dev, info);
3099 }
3100
3101 info->so_timestamping =
3102 SOF_TIMESTAMPING_TX_SOFTWARE |
3103 SOF_TIMESTAMPING_RX_SOFTWARE |
3104 SOF_TIMESTAMPING_SOFTWARE |
3105 SOF_TIMESTAMPING_TX_HARDWARE |
3106 SOF_TIMESTAMPING_RX_HARDWARE |
3107 SOF_TIMESTAMPING_RAW_HARDWARE;
3108
3109 if (adapter->ptp_clock)
3110 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3111 else
3112 info->phc_index = -1;
3113
3114 info->tx_types =
3115 BIT(HWTSTAMP_TX_OFF) |
3116 BIT(HWTSTAMP_TX_ON);
3117
3118 return 0;
3119}
3120
3121static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3122{
3123 unsigned int max_combined;
3124 u8 tcs = adapter->hw_tcs;
3125
3126 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3127 /* We only support one q_vector without MSI-X */
3128 max_combined = 1;
3129 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3130 /* Limit value based on the queue mask */
3131 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3132 } else if (tcs > 1) {
3133 /* For DCB report channels per traffic class */
3134 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3135 /* 8 TC w/ 4 queues per TC */
3136 max_combined = 4;
3137 } else if (tcs > 4) {
3138 /* 8 TC w/ 8 queues per TC */
3139 max_combined = 8;
3140 } else {
3141 /* 4 TC w/ 16 queues per TC */
3142 max_combined = 16;
3143 }
3144 } else if (adapter->atr_sample_rate) {
3145 /* support up to 64 queues with ATR */
3146 max_combined = IXGBE_MAX_FDIR_INDICES;
3147 } else {
3148 /* support up to 16 queues with RSS */
3149 max_combined = ixgbe_max_rss_indices(adapter);
3150 }
3151
3152 return max_combined;
3153}
3154
3155static void ixgbe_get_channels(struct net_device *dev,
3156 struct ethtool_channels *ch)
3157{
3158 struct ixgbe_adapter *adapter = netdev_priv(dev);
3159
3160 /* report maximum channels */
3161 ch->max_combined = ixgbe_max_channels(adapter);
3162
3163 /* report info for other vector */
3164 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3165 ch->max_other = NON_Q_VECTORS;
3166 ch->other_count = NON_Q_VECTORS;
3167 }
3168
3169 /* record RSS queues */
3170 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3171
3172 /* nothing else to report if RSS is disabled */
3173 if (ch->combined_count == 1)
3174 return;
3175
3176 /* we do not support ATR queueing if SR-IOV is enabled */
3177 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3178 return;
3179
3180 /* same thing goes for being DCB enabled */
3181 if (adapter->hw_tcs > 1)
3182 return;
3183
3184 /* if ATR is disabled we can exit */
3185 if (!adapter->atr_sample_rate)
3186 return;
3187
3188 /* report flow director queues as maximum channels */
3189 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3190}
3191
3192static int ixgbe_set_channels(struct net_device *dev,
3193 struct ethtool_channels *ch)
3194{
3195 struct ixgbe_adapter *adapter = netdev_priv(dev);
3196 unsigned int count = ch->combined_count;
3197 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3198
3199 /* verify they are not requesting separate vectors */
3200 if (!count || ch->rx_count || ch->tx_count)
3201 return -EINVAL;
3202
3203 /* verify other_count has not changed */
3204 if (ch->other_count != NON_Q_VECTORS)
3205 return -EINVAL;
3206
3207 /* verify the number of channels does not exceed hardware limits */
3208 if (count > ixgbe_max_channels(adapter))
3209 return -EINVAL;
3210
3211 /* update feature limits from largest to smallest supported values */
3212 adapter->ring_feature[RING_F_FDIR].limit = count;
3213
3214 /* cap RSS limit */
3215 if (count > max_rss_indices)
3216 count = max_rss_indices;
3217 adapter->ring_feature[RING_F_RSS].limit = count;
3218
3219#ifdef IXGBE_FCOE
3220 /* cap FCoE limit at 8 */
3221 if (count > IXGBE_FCRETA_SIZE)
3222 count = IXGBE_FCRETA_SIZE;
3223 adapter->ring_feature[RING_F_FCOE].limit = count;
3224
3225#endif
3226 /* use setup TC to update any traffic class queue mapping */
3227 return ixgbe_setup_tc(dev, adapter->hw_tcs);
3228}
3229
3230static int ixgbe_get_module_info(struct net_device *dev,
3231 struct ethtool_modinfo *modinfo)
3232{
3233 struct ixgbe_adapter *adapter = netdev_priv(dev);
3234 struct ixgbe_hw *hw = &adapter->hw;
3235 s32 status;
3236 u8 sff8472_rev, addr_mode;
3237 bool page_swap = false;
3238
3239 if (hw->phy.type == ixgbe_phy_fw)
3240 return -ENXIO;
3241
3242 /* Check whether we support SFF-8472 or not */
3243 status = hw->phy.ops.read_i2c_eeprom(hw,
3244 IXGBE_SFF_SFF_8472_COMP,
3245 &sff8472_rev);
3246 if (status)
3247 return -EIO;
3248
3249 /* addressing mode is not supported */
3250 status = hw->phy.ops.read_i2c_eeprom(hw,
3251 IXGBE_SFF_SFF_8472_SWAP,
3252 &addr_mode);
3253 if (status)
3254 return -EIO;
3255
3256 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3257 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3258 page_swap = true;
3259 }
3260
3261 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3262 /* We have a SFP, but it does not support SFF-8472 */
3263 modinfo->type = ETH_MODULE_SFF_8079;
3264 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3265 } else {
3266 /* We have a SFP which supports a revision of SFF-8472. */
3267 modinfo->type = ETH_MODULE_SFF_8472;
3268 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3269 }
3270
3271 return 0;
3272}
3273
3274static int ixgbe_get_module_eeprom(struct net_device *dev,
3275 struct ethtool_eeprom *ee,
3276 u8 *data)
3277{
3278 struct ixgbe_adapter *adapter = netdev_priv(dev);
3279 struct ixgbe_hw *hw = &adapter->hw;
3280 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3281 u8 databyte = 0xFF;
3282 int i = 0;
3283
3284 if (ee->len == 0)
3285 return -EINVAL;
3286
3287 if (hw->phy.type == ixgbe_phy_fw)
3288 return -ENXIO;
3289
3290 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3291 /* I2C reads can take long time */
3292 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3293 return -EBUSY;
3294
3295 if (i < ETH_MODULE_SFF_8079_LEN)
3296 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3297 else
3298 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3299
3300 if (status)
3301 return -EIO;
3302
3303 data[i - ee->offset] = databyte;
3304 }
3305
3306 return 0;
3307}
3308
3309static const struct {
3310 ixgbe_link_speed mac_speed;
3311 u32 supported;
3312} ixgbe_ls_map[] = {
3313 { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3314 { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3315 { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3316 { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3317 { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3318};
3319
3320static const struct {
3321 u32 lp_advertised;
3322 u32 mac_speed;
3323} ixgbe_lp_map[] = {
3324 { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3325 { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3326 { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3327 { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3328 { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3329 { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3330};
3331
3332static int
3333ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3334{
3335 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3336 struct ixgbe_hw *hw = &adapter->hw;
3337 s32 rc;
3338 u16 i;
3339
3340 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3341 if (rc)
3342 return rc;
3343
3344 edata->lp_advertised = 0;
3345 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3346 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3347 edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3348 }
3349
3350 edata->supported = 0;
3351 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3352 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3353 edata->supported |= ixgbe_ls_map[i].supported;
3354 }
3355
3356 edata->advertised = 0;
3357 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3358 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3359 edata->advertised |= ixgbe_ls_map[i].supported;
3360 }
3361
3362 edata->eee_enabled = !!edata->advertised;
3363 edata->tx_lpi_enabled = edata->eee_enabled;
3364 if (edata->advertised & edata->lp_advertised)
3365 edata->eee_active = true;
3366
3367 return 0;
3368}
3369
3370static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3371{
3372 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3373 struct ixgbe_hw *hw = &adapter->hw;
3374
3375 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3376 return -EOPNOTSUPP;
3377
3378 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3379 return ixgbe_get_eee_fw(adapter, edata);
3380
3381 return -EOPNOTSUPP;
3382}
3383
3384static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3385{
3386 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3387 struct ixgbe_hw *hw = &adapter->hw;
3388 struct ethtool_eee eee_data;
3389 s32 ret_val;
3390
3391 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3392 return -EOPNOTSUPP;
3393
3394 memset(&eee_data, 0, sizeof(struct ethtool_eee));
3395
3396 ret_val = ixgbe_get_eee(netdev, &eee_data);
3397 if (ret_val)
3398 return ret_val;
3399
3400 if (eee_data.eee_enabled && !edata->eee_enabled) {
3401 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3402 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3403 return -EINVAL;
3404 }
3405
3406 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3407 e_err(drv,
3408 "Setting EEE Tx LPI timer is not supported\n");
3409 return -EINVAL;
3410 }
3411
3412 if (eee_data.advertised != edata->advertised) {
3413 e_err(drv,
3414 "Setting EEE advertised speeds is not supported\n");
3415 return -EINVAL;
3416 }
3417 }
3418
3419 if (eee_data.eee_enabled != edata->eee_enabled) {
3420 if (edata->eee_enabled) {
3421 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3422 hw->phy.eee_speeds_advertised =
3423 hw->phy.eee_speeds_supported;
3424 } else {
3425 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3426 hw->phy.eee_speeds_advertised = 0;
3427 }
3428
3429 /* reset link */
3430 if (netif_running(netdev))
3431 ixgbe_reinit_locked(adapter);
3432 else
3433 ixgbe_reset(adapter);
3434 }
3435
3436 return 0;
3437}
3438
3439static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3440{
3441 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3442 u32 priv_flags = 0;
3443
3444 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3445 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3446
3447 return priv_flags;
3448}
3449
3450static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3451{
3452 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3453 unsigned int flags2 = adapter->flags2;
3454
3455 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3456 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3457 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3458
3459 if (flags2 != adapter->flags2) {
3460 adapter->flags2 = flags2;
3461
3462 /* reset interface to repopulate queues */
3463 if (netif_running(netdev))
3464 ixgbe_reinit_locked(adapter);
3465 }
3466
3467 return 0;
3468}
3469
3470static const struct ethtool_ops ixgbe_ethtool_ops = {
3471 .get_drvinfo = ixgbe_get_drvinfo,
3472 .get_regs_len = ixgbe_get_regs_len,
3473 .get_regs = ixgbe_get_regs,
3474 .get_wol = ixgbe_get_wol,
3475 .set_wol = ixgbe_set_wol,
3476 .nway_reset = ixgbe_nway_reset,
3477 .get_link = ethtool_op_get_link,
3478 .get_eeprom_len = ixgbe_get_eeprom_len,
3479 .get_eeprom = ixgbe_get_eeprom,
3480 .set_eeprom = ixgbe_set_eeprom,
3481 .get_ringparam = ixgbe_get_ringparam,
3482 .set_ringparam = ixgbe_set_ringparam,
3483 .get_pauseparam = ixgbe_get_pauseparam,
3484 .set_pauseparam = ixgbe_set_pauseparam,
3485 .get_msglevel = ixgbe_get_msglevel,
3486 .set_msglevel = ixgbe_set_msglevel,
3487 .self_test = ixgbe_diag_test,
3488 .get_strings = ixgbe_get_strings,
3489 .set_phys_id = ixgbe_set_phys_id,
3490 .get_sset_count = ixgbe_get_sset_count,
3491 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3492 .get_coalesce = ixgbe_get_coalesce,
3493 .set_coalesce = ixgbe_set_coalesce,
3494 .get_rxnfc = ixgbe_get_rxnfc,
3495 .set_rxnfc = ixgbe_set_rxnfc,
3496 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3497 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3498 .get_rxfh = ixgbe_get_rxfh,
3499 .set_rxfh = ixgbe_set_rxfh,
3500 .get_eee = ixgbe_get_eee,
3501 .set_eee = ixgbe_set_eee,
3502 .get_channels = ixgbe_get_channels,
3503 .set_channels = ixgbe_set_channels,
3504 .get_priv_flags = ixgbe_get_priv_flags,
3505 .set_priv_flags = ixgbe_set_priv_flags,
3506 .get_ts_info = ixgbe_get_ts_info,
3507 .get_module_info = ixgbe_get_module_info,
3508 .get_module_eeprom = ixgbe_get_module_eeprom,
3509 .get_link_ksettings = ixgbe_get_link_ksettings,
3510 .set_link_ksettings = ixgbe_set_link_ksettings,
3511};
3512
3513void ixgbe_set_ethtool_ops(struct net_device *netdev)
3514{
3515 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3516}