Loading...
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for ixgbe */
29
30#include <linux/interrupt.h>
31#include <linux/types.h>
32#include <linux/module.h>
33#include <linux/slab.h>
34#include <linux/pci.h>
35#include <linux/netdevice.h>
36#include <linux/ethtool.h>
37#include <linux/vmalloc.h>
38#include <linux/highmem.h>
39#include <linux/uaccess.h>
40
41#include "ixgbe.h"
42
43
44#define IXGBE_ALL_RAR_ENTRIES 16
45
46enum {NETDEV_STATS, IXGBE_STATS};
47
48struct ixgbe_stats {
49 char stat_string[ETH_GSTRING_LEN];
50 int type;
51 int sizeof_stat;
52 int stat_offset;
53};
54
55#define IXGBE_STAT(m) IXGBE_STATS, \
56 sizeof(((struct ixgbe_adapter *)0)->m), \
57 offsetof(struct ixgbe_adapter, m)
58#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
59 sizeof(((struct rtnl_link_stats64 *)0)->m), \
60 offsetof(struct rtnl_link_stats64, m)
61
62static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
63 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
64 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
65 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
66 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
67 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
68 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
69 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
70 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
71 {"lsc_int", IXGBE_STAT(lsc_int)},
72 {"tx_busy", IXGBE_STAT(tx_busy)},
73 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
74 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
75 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
76 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
77 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
78 {"multicast", IXGBE_NETDEV_STAT(multicast)},
79 {"broadcast", IXGBE_STAT(stats.bprc)},
80 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
81 {"collisions", IXGBE_NETDEV_STAT(collisions)},
82 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
83 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
84 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
85 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
86 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
87 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
88 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
89 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
90 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
91 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
92 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
93 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
94 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
95 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
96 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
97 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
98 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
99 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
100 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
101 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
102 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
103 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
104 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
105 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
106 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
107 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
108 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
109 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
110 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
111 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
112#ifdef IXGBE_FCOE
113 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
114 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
115 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
116 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
117 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
118 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
119 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
120 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
121#endif /* IXGBE_FCOE */
122};
123
124/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
125 * we set the num_rx_queues to evaluate to num_tx_queues. This is
126 * used because we do not have a good way to get the max number of
127 * rx queues with CONFIG_RPS disabled.
128 */
129#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
130
131#define IXGBE_QUEUE_STATS_LEN ( \
132 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
133 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
134#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
135#define IXGBE_PB_STATS_LEN ( \
136 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
137 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
138 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
140 / sizeof(u64))
141#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
142 IXGBE_PB_STATS_LEN + \
143 IXGBE_QUEUE_STATS_LEN)
144
145static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
146 "Register test (offline)", "Eeprom test (offline)",
147 "Interrupt test (offline)", "Loopback test (offline)",
148 "Link test (on/offline)"
149};
150#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
151
152static int ixgbe_get_settings(struct net_device *netdev,
153 struct ethtool_cmd *ecmd)
154{
155 struct ixgbe_adapter *adapter = netdev_priv(netdev);
156 struct ixgbe_hw *hw = &adapter->hw;
157 u32 link_speed = 0;
158 bool link_up;
159
160 ecmd->supported = SUPPORTED_10000baseT_Full;
161 ecmd->autoneg = AUTONEG_ENABLE;
162 ecmd->transceiver = XCVR_EXTERNAL;
163 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
164 (hw->phy.multispeed_fiber)) {
165 ecmd->supported |= (SUPPORTED_1000baseT_Full |
166 SUPPORTED_Autoneg);
167
168 switch (hw->mac.type) {
169 case ixgbe_mac_X540:
170 ecmd->supported |= SUPPORTED_100baseT_Full;
171 break;
172 default:
173 break;
174 }
175
176 ecmd->advertising = ADVERTISED_Autoneg;
177 if (hw->phy.autoneg_advertised) {
178 if (hw->phy.autoneg_advertised &
179 IXGBE_LINK_SPEED_100_FULL)
180 ecmd->advertising |= ADVERTISED_100baseT_Full;
181 if (hw->phy.autoneg_advertised &
182 IXGBE_LINK_SPEED_10GB_FULL)
183 ecmd->advertising |= ADVERTISED_10000baseT_Full;
184 if (hw->phy.autoneg_advertised &
185 IXGBE_LINK_SPEED_1GB_FULL)
186 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 } else {
188 /*
189 * Default advertised modes in case
190 * phy.autoneg_advertised isn't set.
191 */
192 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
193 ADVERTISED_1000baseT_Full);
194 if (hw->mac.type == ixgbe_mac_X540)
195 ecmd->advertising |= ADVERTISED_100baseT_Full;
196 }
197
198 if (hw->phy.media_type == ixgbe_media_type_copper) {
199 ecmd->supported |= SUPPORTED_TP;
200 ecmd->advertising |= ADVERTISED_TP;
201 ecmd->port = PORT_TP;
202 } else {
203 ecmd->supported |= SUPPORTED_FIBRE;
204 ecmd->advertising |= ADVERTISED_FIBRE;
205 ecmd->port = PORT_FIBRE;
206 }
207 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
208 /* Set as FIBRE until SERDES defined in kernel */
209 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
210 ecmd->supported = (SUPPORTED_1000baseT_Full |
211 SUPPORTED_FIBRE);
212 ecmd->advertising = (ADVERTISED_1000baseT_Full |
213 ADVERTISED_FIBRE);
214 ecmd->port = PORT_FIBRE;
215 ecmd->autoneg = AUTONEG_DISABLE;
216 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
217 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
218 ecmd->supported |= (SUPPORTED_1000baseT_Full |
219 SUPPORTED_Autoneg |
220 SUPPORTED_FIBRE);
221 ecmd->advertising = (ADVERTISED_10000baseT_Full |
222 ADVERTISED_1000baseT_Full |
223 ADVERTISED_Autoneg |
224 ADVERTISED_FIBRE);
225 ecmd->port = PORT_FIBRE;
226 } else {
227 ecmd->supported |= (SUPPORTED_1000baseT_Full |
228 SUPPORTED_FIBRE);
229 ecmd->advertising = (ADVERTISED_10000baseT_Full |
230 ADVERTISED_1000baseT_Full |
231 ADVERTISED_FIBRE);
232 ecmd->port = PORT_FIBRE;
233 }
234 } else {
235 ecmd->supported |= SUPPORTED_FIBRE;
236 ecmd->advertising = (ADVERTISED_10000baseT_Full |
237 ADVERTISED_FIBRE);
238 ecmd->port = PORT_FIBRE;
239 ecmd->autoneg = AUTONEG_DISABLE;
240 }
241
242 /* Get PHY type */
243 switch (adapter->hw.phy.type) {
244 case ixgbe_phy_tn:
245 case ixgbe_phy_aq:
246 case ixgbe_phy_cu_unknown:
247 /* Copper 10G-BASET */
248 ecmd->port = PORT_TP;
249 break;
250 case ixgbe_phy_qt:
251 ecmd->port = PORT_FIBRE;
252 break;
253 case ixgbe_phy_nl:
254 case ixgbe_phy_sfp_passive_tyco:
255 case ixgbe_phy_sfp_passive_unknown:
256 case ixgbe_phy_sfp_ftl:
257 case ixgbe_phy_sfp_avago:
258 case ixgbe_phy_sfp_intel:
259 case ixgbe_phy_sfp_unknown:
260 switch (adapter->hw.phy.sfp_type) {
261 /* SFP+ devices, further checking needed */
262 case ixgbe_sfp_type_da_cu:
263 case ixgbe_sfp_type_da_cu_core0:
264 case ixgbe_sfp_type_da_cu_core1:
265 ecmd->port = PORT_DA;
266 break;
267 case ixgbe_sfp_type_sr:
268 case ixgbe_sfp_type_lr:
269 case ixgbe_sfp_type_srlr_core0:
270 case ixgbe_sfp_type_srlr_core1:
271 ecmd->port = PORT_FIBRE;
272 break;
273 case ixgbe_sfp_type_not_present:
274 ecmd->port = PORT_NONE;
275 break;
276 case ixgbe_sfp_type_1g_cu_core0:
277 case ixgbe_sfp_type_1g_cu_core1:
278 ecmd->port = PORT_TP;
279 ecmd->supported = SUPPORTED_TP;
280 ecmd->advertising = (ADVERTISED_1000baseT_Full |
281 ADVERTISED_TP);
282 break;
283 case ixgbe_sfp_type_unknown:
284 default:
285 ecmd->port = PORT_OTHER;
286 break;
287 }
288 break;
289 case ixgbe_phy_xaui:
290 ecmd->port = PORT_NONE;
291 break;
292 case ixgbe_phy_unknown:
293 case ixgbe_phy_generic:
294 case ixgbe_phy_sfp_unsupported:
295 default:
296 ecmd->port = PORT_OTHER;
297 break;
298 }
299
300 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
301 if (link_up) {
302 switch (link_speed) {
303 case IXGBE_LINK_SPEED_10GB_FULL:
304 ethtool_cmd_speed_set(ecmd, SPEED_10000);
305 break;
306 case IXGBE_LINK_SPEED_1GB_FULL:
307 ethtool_cmd_speed_set(ecmd, SPEED_1000);
308 break;
309 case IXGBE_LINK_SPEED_100_FULL:
310 ethtool_cmd_speed_set(ecmd, SPEED_100);
311 break;
312 default:
313 break;
314 }
315 ecmd->duplex = DUPLEX_FULL;
316 } else {
317 ethtool_cmd_speed_set(ecmd, -1);
318 ecmd->duplex = -1;
319 }
320
321 return 0;
322}
323
324static int ixgbe_set_settings(struct net_device *netdev,
325 struct ethtool_cmd *ecmd)
326{
327 struct ixgbe_adapter *adapter = netdev_priv(netdev);
328 struct ixgbe_hw *hw = &adapter->hw;
329 u32 advertised, old;
330 s32 err = 0;
331
332 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
333 (hw->phy.multispeed_fiber)) {
334 /*
335 * this function does not support duplex forcing, but can
336 * limit the advertising of the adapter to the specified speed
337 */
338 if (ecmd->autoneg == AUTONEG_DISABLE)
339 return -EINVAL;
340
341 if (ecmd->advertising & ~ecmd->supported)
342 return -EINVAL;
343
344 old = hw->phy.autoneg_advertised;
345 advertised = 0;
346 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
347 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
348
349 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
350 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
351
352 if (ecmd->advertising & ADVERTISED_100baseT_Full)
353 advertised |= IXGBE_LINK_SPEED_100_FULL;
354
355 if (old == advertised)
356 return err;
357 /* this sets the link speed and restarts auto-neg */
358 hw->mac.autotry_restart = true;
359 err = hw->mac.ops.setup_link(hw, advertised, true, true);
360 if (err) {
361 e_info(probe, "setup link failed with code %d\n", err);
362 hw->mac.ops.setup_link(hw, old, true, true);
363 }
364 } else {
365 /* in this case we currently only support 10Gb/FULL */
366 u32 speed = ethtool_cmd_speed(ecmd);
367 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
368 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
369 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
370 return -EINVAL;
371 }
372
373 return err;
374}
375
376static void ixgbe_get_pauseparam(struct net_device *netdev,
377 struct ethtool_pauseparam *pause)
378{
379 struct ixgbe_adapter *adapter = netdev_priv(netdev);
380 struct ixgbe_hw *hw = &adapter->hw;
381
382 if (hw->fc.disable_fc_autoneg)
383 pause->autoneg = 0;
384 else
385 pause->autoneg = 1;
386
387 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
388 pause->rx_pause = 1;
389 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
390 pause->tx_pause = 1;
391 } else if (hw->fc.current_mode == ixgbe_fc_full) {
392 pause->rx_pause = 1;
393 pause->tx_pause = 1;
394 }
395}
396
397static int ixgbe_set_pauseparam(struct net_device *netdev,
398 struct ethtool_pauseparam *pause)
399{
400 struct ixgbe_adapter *adapter = netdev_priv(netdev);
401 struct ixgbe_hw *hw = &adapter->hw;
402 struct ixgbe_fc_info fc = hw->fc;
403
404 /* 82598 does no support link flow control with DCB enabled */
405 if ((hw->mac.type == ixgbe_mac_82598EB) &&
406 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
407 return -EINVAL;
408
409 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
410
411 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
412 fc.requested_mode = ixgbe_fc_full;
413 else if (pause->rx_pause && !pause->tx_pause)
414 fc.requested_mode = ixgbe_fc_rx_pause;
415 else if (!pause->rx_pause && pause->tx_pause)
416 fc.requested_mode = ixgbe_fc_tx_pause;
417 else
418 fc.requested_mode = ixgbe_fc_none;
419
420 /* if the thing changed then we'll update and use new autoneg */
421 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
422 hw->fc = fc;
423 if (netif_running(netdev))
424 ixgbe_reinit_locked(adapter);
425 else
426 ixgbe_reset(adapter);
427 }
428
429 return 0;
430}
431
432static u32 ixgbe_get_msglevel(struct net_device *netdev)
433{
434 struct ixgbe_adapter *adapter = netdev_priv(netdev);
435 return adapter->msg_enable;
436}
437
438static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
439{
440 struct ixgbe_adapter *adapter = netdev_priv(netdev);
441 adapter->msg_enable = data;
442}
443
444static int ixgbe_get_regs_len(struct net_device *netdev)
445{
446#define IXGBE_REGS_LEN 1129
447 return IXGBE_REGS_LEN * sizeof(u32);
448}
449
450#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
451
452static void ixgbe_get_regs(struct net_device *netdev,
453 struct ethtool_regs *regs, void *p)
454{
455 struct ixgbe_adapter *adapter = netdev_priv(netdev);
456 struct ixgbe_hw *hw = &adapter->hw;
457 u32 *regs_buff = p;
458 u8 i;
459
460 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
461
462 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
463
464 /* General Registers */
465 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
466 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
467 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
468 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
469 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
470 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
471 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
472 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
473
474 /* NVM Register */
475 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
476 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
477 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
478 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
479 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
480 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
481 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
482 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
483 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
484 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
485
486 /* Interrupt */
487 /* don't read EICR because it can clear interrupt causes, instead
488 * read EICS which is a shadow but doesn't clear EICR */
489 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
490 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
491 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
492 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
493 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
494 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
495 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
496 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
497 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
498 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
499 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
500 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
501
502 /* Flow Control */
503 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
504 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
505 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
506 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
507 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
508 for (i = 0; i < 8; i++) {
509 switch (hw->mac.type) {
510 case ixgbe_mac_82598EB:
511 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
512 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
513 break;
514 case ixgbe_mac_82599EB:
515 case ixgbe_mac_X540:
516 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
517 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
518 break;
519 default:
520 break;
521 }
522 }
523 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
524 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
525
526 /* Receive DMA */
527 for (i = 0; i < 64; i++)
528 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
529 for (i = 0; i < 64; i++)
530 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
531 for (i = 0; i < 64; i++)
532 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
533 for (i = 0; i < 64; i++)
534 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
535 for (i = 0; i < 64; i++)
536 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
537 for (i = 0; i < 64; i++)
538 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
539 for (i = 0; i < 16; i++)
540 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
541 for (i = 0; i < 16; i++)
542 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
543 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
544 for (i = 0; i < 8; i++)
545 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
546 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
547 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
548
549 /* Receive */
550 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
551 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
552 for (i = 0; i < 16; i++)
553 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
554 for (i = 0; i < 16; i++)
555 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
556 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
557 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
558 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
559 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
560 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
561 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
562 for (i = 0; i < 8; i++)
563 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
564 for (i = 0; i < 8; i++)
565 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
566 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
567
568 /* Transmit */
569 for (i = 0; i < 32; i++)
570 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
571 for (i = 0; i < 32; i++)
572 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
573 for (i = 0; i < 32; i++)
574 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
575 for (i = 0; i < 32; i++)
576 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
577 for (i = 0; i < 32; i++)
578 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
579 for (i = 0; i < 32; i++)
580 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
581 for (i = 0; i < 32; i++)
582 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
583 for (i = 0; i < 32; i++)
584 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
585 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
586 for (i = 0; i < 16; i++)
587 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
588 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
589 for (i = 0; i < 8; i++)
590 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
591 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
592
593 /* Wake Up */
594 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
595 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
596 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
597 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
598 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
599 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
600 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
601 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
602 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
603
604 /* DCB */
605 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
606 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
607 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
608 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
609 for (i = 0; i < 8; i++)
610 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
611 for (i = 0; i < 8; i++)
612 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
613 for (i = 0; i < 8; i++)
614 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
615 for (i = 0; i < 8; i++)
616 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
617 for (i = 0; i < 8; i++)
618 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
619 for (i = 0; i < 8; i++)
620 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
621
622 /* Statistics */
623 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
624 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
625 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
626 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
627 for (i = 0; i < 8; i++)
628 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
629 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
630 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
631 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
632 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
633 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
634 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
635 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
636 for (i = 0; i < 8; i++)
637 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
638 for (i = 0; i < 8; i++)
639 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
640 for (i = 0; i < 8; i++)
641 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
642 for (i = 0; i < 8; i++)
643 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
644 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
645 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
646 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
647 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
648 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
649 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
650 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
651 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
652 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
653 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
654 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
655 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
656 for (i = 0; i < 8; i++)
657 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
658 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
659 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
660 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
661 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
662 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
663 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
664 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
665 regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
666 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
667 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
668 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
669 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
670 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
671 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
672 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
673 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
674 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
675 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
676 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
677 for (i = 0; i < 16; i++)
678 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
679 for (i = 0; i < 16; i++)
680 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
681 for (i = 0; i < 16; i++)
682 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
683 for (i = 0; i < 16; i++)
684 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
685
686 /* MAC */
687 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
688 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
689 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
690 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
691 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
692 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
693 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
694 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
695 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
696 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
697 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
698 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
699 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
700 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
701 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
702 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
703 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
704 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
705 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
706 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
707 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
708 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
709 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
710 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
711 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
712 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
713 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
714 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
715 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
716 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
717 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
718 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
719 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
720
721 /* Diagnostic */
722 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
723 for (i = 0; i < 8; i++)
724 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
725 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
726 for (i = 0; i < 4; i++)
727 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
728 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
729 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
730 for (i = 0; i < 8; i++)
731 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
732 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
733 for (i = 0; i < 4; i++)
734 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
735 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
736 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
737 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
738 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
739 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
740 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
741 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
742 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
743 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
744 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
745 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
746 for (i = 0; i < 8; i++)
747 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
748 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
749 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
750 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
751 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
752 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
753 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
754 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
755 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
756 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
757
758 /* 82599 X540 specific registers */
759 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
760}
761
762static int ixgbe_get_eeprom_len(struct net_device *netdev)
763{
764 struct ixgbe_adapter *adapter = netdev_priv(netdev);
765 return adapter->hw.eeprom.word_size * 2;
766}
767
768static int ixgbe_get_eeprom(struct net_device *netdev,
769 struct ethtool_eeprom *eeprom, u8 *bytes)
770{
771 struct ixgbe_adapter *adapter = netdev_priv(netdev);
772 struct ixgbe_hw *hw = &adapter->hw;
773 u16 *eeprom_buff;
774 int first_word, last_word, eeprom_len;
775 int ret_val = 0;
776 u16 i;
777
778 if (eeprom->len == 0)
779 return -EINVAL;
780
781 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
782
783 first_word = eeprom->offset >> 1;
784 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
785 eeprom_len = last_word - first_word + 1;
786
787 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
788 if (!eeprom_buff)
789 return -ENOMEM;
790
791 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
792 eeprom_buff);
793
794 /* Device's eeprom is always little-endian, word addressable */
795 for (i = 0; i < eeprom_len; i++)
796 le16_to_cpus(&eeprom_buff[i]);
797
798 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
799 kfree(eeprom_buff);
800
801 return ret_val;
802}
803
804static int ixgbe_set_eeprom(struct net_device *netdev,
805 struct ethtool_eeprom *eeprom, u8 *bytes)
806{
807 struct ixgbe_adapter *adapter = netdev_priv(netdev);
808 struct ixgbe_hw *hw = &adapter->hw;
809 u16 *eeprom_buff;
810 void *ptr;
811 int max_len, first_word, last_word, ret_val = 0;
812 u16 i;
813
814 if (eeprom->len == 0)
815 return -EINVAL;
816
817 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
818 return -EINVAL;
819
820 max_len = hw->eeprom.word_size * 2;
821
822 first_word = eeprom->offset >> 1;
823 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
824 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
825 if (!eeprom_buff)
826 return -ENOMEM;
827
828 ptr = eeprom_buff;
829
830 if (eeprom->offset & 1) {
831 /*
832 * need read/modify/write of first changed EEPROM word
833 * only the second byte of the word is being modified
834 */
835 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
836 if (ret_val)
837 goto err;
838
839 ptr++;
840 }
841 if ((eeprom->offset + eeprom->len) & 1) {
842 /*
843 * need read/modify/write of last changed EEPROM word
844 * only the first byte of the word is being modified
845 */
846 ret_val = hw->eeprom.ops.read(hw, last_word,
847 &eeprom_buff[last_word - first_word]);
848 if (ret_val)
849 goto err;
850 }
851
852 /* Device's eeprom is always little-endian, word addressable */
853 for (i = 0; i < last_word - first_word + 1; i++)
854 le16_to_cpus(&eeprom_buff[i]);
855
856 memcpy(ptr, bytes, eeprom->len);
857
858 for (i = 0; i < last_word - first_word + 1; i++)
859 cpu_to_le16s(&eeprom_buff[i]);
860
861 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
862 last_word - first_word + 1,
863 eeprom_buff);
864
865 /* Update the checksum */
866 if (ret_val == 0)
867 hw->eeprom.ops.update_checksum(hw);
868
869err:
870 kfree(eeprom_buff);
871 return ret_val;
872}
873
874static void ixgbe_get_drvinfo(struct net_device *netdev,
875 struct ethtool_drvinfo *drvinfo)
876{
877 struct ixgbe_adapter *adapter = netdev_priv(netdev);
878 u32 nvm_track_id;
879
880 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
881 strlcpy(drvinfo->version, ixgbe_driver_version,
882 sizeof(drvinfo->version));
883
884 nvm_track_id = (adapter->eeprom_verh << 16) |
885 adapter->eeprom_verl;
886 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
887 nvm_track_id);
888
889 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
890 sizeof(drvinfo->bus_info));
891 drvinfo->n_stats = IXGBE_STATS_LEN;
892 drvinfo->testinfo_len = IXGBE_TEST_LEN;
893 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
894}
895
896static void ixgbe_get_ringparam(struct net_device *netdev,
897 struct ethtool_ringparam *ring)
898{
899 struct ixgbe_adapter *adapter = netdev_priv(netdev);
900 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
901 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
902
903 ring->rx_max_pending = IXGBE_MAX_RXD;
904 ring->tx_max_pending = IXGBE_MAX_TXD;
905 ring->rx_pending = rx_ring->count;
906 ring->tx_pending = tx_ring->count;
907}
908
909static int ixgbe_set_ringparam(struct net_device *netdev,
910 struct ethtool_ringparam *ring)
911{
912 struct ixgbe_adapter *adapter = netdev_priv(netdev);
913 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
914 int i, err = 0;
915 u32 new_rx_count, new_tx_count;
916 bool need_update = false;
917
918 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
919 return -EINVAL;
920
921 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
922 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
923 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
924
925 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
926 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
927 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
928
929 if ((new_tx_count == adapter->tx_ring[0]->count) &&
930 (new_rx_count == adapter->rx_ring[0]->count)) {
931 /* nothing to do */
932 return 0;
933 }
934
935 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
936 usleep_range(1000, 2000);
937
938 if (!netif_running(adapter->netdev)) {
939 for (i = 0; i < adapter->num_tx_queues; i++)
940 adapter->tx_ring[i]->count = new_tx_count;
941 for (i = 0; i < adapter->num_rx_queues; i++)
942 adapter->rx_ring[i]->count = new_rx_count;
943 adapter->tx_ring_count = new_tx_count;
944 adapter->rx_ring_count = new_rx_count;
945 goto clear_reset;
946 }
947
948 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
949 if (!temp_tx_ring) {
950 err = -ENOMEM;
951 goto clear_reset;
952 }
953
954 if (new_tx_count != adapter->tx_ring_count) {
955 for (i = 0; i < adapter->num_tx_queues; i++) {
956 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
957 sizeof(struct ixgbe_ring));
958 temp_tx_ring[i].count = new_tx_count;
959 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
960 if (err) {
961 while (i) {
962 i--;
963 ixgbe_free_tx_resources(&temp_tx_ring[i]);
964 }
965 goto clear_reset;
966 }
967 }
968 need_update = true;
969 }
970
971 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
972 if (!temp_rx_ring) {
973 err = -ENOMEM;
974 goto err_setup;
975 }
976
977 if (new_rx_count != adapter->rx_ring_count) {
978 for (i = 0; i < adapter->num_rx_queues; i++) {
979 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
980 sizeof(struct ixgbe_ring));
981 temp_rx_ring[i].count = new_rx_count;
982 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
983 if (err) {
984 while (i) {
985 i--;
986 ixgbe_free_rx_resources(&temp_rx_ring[i]);
987 }
988 goto err_setup;
989 }
990 }
991 need_update = true;
992 }
993
994 /* if rings need to be updated, here's the place to do it in one shot */
995 if (need_update) {
996 ixgbe_down(adapter);
997
998 /* tx */
999 if (new_tx_count != adapter->tx_ring_count) {
1000 for (i = 0; i < adapter->num_tx_queues; i++) {
1001 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1002 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
1003 sizeof(struct ixgbe_ring));
1004 }
1005 adapter->tx_ring_count = new_tx_count;
1006 }
1007
1008 /* rx */
1009 if (new_rx_count != adapter->rx_ring_count) {
1010 for (i = 0; i < adapter->num_rx_queues; i++) {
1011 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1012 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
1013 sizeof(struct ixgbe_ring));
1014 }
1015 adapter->rx_ring_count = new_rx_count;
1016 }
1017 ixgbe_up(adapter);
1018 }
1019
1020 vfree(temp_rx_ring);
1021err_setup:
1022 vfree(temp_tx_ring);
1023clear_reset:
1024 clear_bit(__IXGBE_RESETTING, &adapter->state);
1025 return err;
1026}
1027
1028static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1029{
1030 switch (sset) {
1031 case ETH_SS_TEST:
1032 return IXGBE_TEST_LEN;
1033 case ETH_SS_STATS:
1034 return IXGBE_STATS_LEN;
1035 default:
1036 return -EOPNOTSUPP;
1037 }
1038}
1039
1040static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1041 struct ethtool_stats *stats, u64 *data)
1042{
1043 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1044 struct rtnl_link_stats64 temp;
1045 const struct rtnl_link_stats64 *net_stats;
1046 unsigned int start;
1047 struct ixgbe_ring *ring;
1048 int i, j;
1049 char *p = NULL;
1050
1051 ixgbe_update_stats(adapter);
1052 net_stats = dev_get_stats(netdev, &temp);
1053 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1054 switch (ixgbe_gstrings_stats[i].type) {
1055 case NETDEV_STATS:
1056 p = (char *) net_stats +
1057 ixgbe_gstrings_stats[i].stat_offset;
1058 break;
1059 case IXGBE_STATS:
1060 p = (char *) adapter +
1061 ixgbe_gstrings_stats[i].stat_offset;
1062 break;
1063 }
1064
1065 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1066 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1067 }
1068 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1069 ring = adapter->tx_ring[j];
1070 if (!ring) {
1071 data[i] = 0;
1072 data[i+1] = 0;
1073 i += 2;
1074 continue;
1075 }
1076
1077 do {
1078 start = u64_stats_fetch_begin_bh(&ring->syncp);
1079 data[i] = ring->stats.packets;
1080 data[i+1] = ring->stats.bytes;
1081 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1082 i += 2;
1083 }
1084 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1085 ring = adapter->rx_ring[j];
1086 if (!ring) {
1087 data[i] = 0;
1088 data[i+1] = 0;
1089 i += 2;
1090 continue;
1091 }
1092
1093 do {
1094 start = u64_stats_fetch_begin_bh(&ring->syncp);
1095 data[i] = ring->stats.packets;
1096 data[i+1] = ring->stats.bytes;
1097 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1098 i += 2;
1099 }
1100
1101 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1102 data[i++] = adapter->stats.pxontxc[j];
1103 data[i++] = adapter->stats.pxofftxc[j];
1104 }
1105 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1106 data[i++] = adapter->stats.pxonrxc[j];
1107 data[i++] = adapter->stats.pxoffrxc[j];
1108 }
1109}
1110
1111static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1112 u8 *data)
1113{
1114 char *p = (char *)data;
1115 int i;
1116
1117 switch (stringset) {
1118 case ETH_SS_TEST:
1119 memcpy(data, *ixgbe_gstrings_test,
1120 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
1121 break;
1122 case ETH_SS_STATS:
1123 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1124 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1125 ETH_GSTRING_LEN);
1126 p += ETH_GSTRING_LEN;
1127 }
1128 for (i = 0; i < netdev->num_tx_queues; i++) {
1129 sprintf(p, "tx_queue_%u_packets", i);
1130 p += ETH_GSTRING_LEN;
1131 sprintf(p, "tx_queue_%u_bytes", i);
1132 p += ETH_GSTRING_LEN;
1133 }
1134 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1135 sprintf(p, "rx_queue_%u_packets", i);
1136 p += ETH_GSTRING_LEN;
1137 sprintf(p, "rx_queue_%u_bytes", i);
1138 p += ETH_GSTRING_LEN;
1139 }
1140 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1141 sprintf(p, "tx_pb_%u_pxon", i);
1142 p += ETH_GSTRING_LEN;
1143 sprintf(p, "tx_pb_%u_pxoff", i);
1144 p += ETH_GSTRING_LEN;
1145 }
1146 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1147 sprintf(p, "rx_pb_%u_pxon", i);
1148 p += ETH_GSTRING_LEN;
1149 sprintf(p, "rx_pb_%u_pxoff", i);
1150 p += ETH_GSTRING_LEN;
1151 }
1152 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1153 break;
1154 }
1155}
1156
1157static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1158{
1159 struct ixgbe_hw *hw = &adapter->hw;
1160 bool link_up;
1161 u32 link_speed = 0;
1162 *data = 0;
1163
1164 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1165 if (link_up)
1166 return *data;
1167 else
1168 *data = 1;
1169 return *data;
1170}
1171
1172/* ethtool register test data */
1173struct ixgbe_reg_test {
1174 u16 reg;
1175 u8 array_len;
1176 u8 test_type;
1177 u32 mask;
1178 u32 write;
1179};
1180
1181/* In the hardware, registers are laid out either singly, in arrays
1182 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1183 * most tests take place on arrays or single registers (handled
1184 * as a single-element array) and special-case the tables.
1185 * Table tests are always pattern tests.
1186 *
1187 * We also make provision for some required setup steps by specifying
1188 * registers to be written without any read-back testing.
1189 */
1190
1191#define PATTERN_TEST 1
1192#define SET_READ_TEST 2
1193#define WRITE_NO_TEST 3
1194#define TABLE32_TEST 4
1195#define TABLE64_TEST_LO 5
1196#define TABLE64_TEST_HI 6
1197
1198/* default 82599 register test */
1199static const struct ixgbe_reg_test reg_test_82599[] = {
1200 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1201 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1202 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1203 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1204 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1205 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1206 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1207 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1208 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1209 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1210 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1211 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1212 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1213 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1214 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1215 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1216 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1217 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1218 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1219 { 0, 0, 0, 0 }
1220};
1221
1222/* default 82598 register test */
1223static const struct ixgbe_reg_test reg_test_82598[] = {
1224 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1225 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1226 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1227 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1228 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1229 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1230 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1231 /* Enable all four RX queues before testing. */
1232 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1233 /* RDH is read-only for 82598, only test RDT. */
1234 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1235 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1236 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1237 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1238 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1239 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1240 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1241 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1242 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1243 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1244 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1245 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1246 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1247 { 0, 0, 0, 0 }
1248};
1249
1250static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1251 u32 mask, u32 write)
1252{
1253 u32 pat, val, before;
1254 static const u32 test_pattern[] = {
1255 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1256
1257 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1258 before = readl(adapter->hw.hw_addr + reg);
1259 writel((test_pattern[pat] & write),
1260 (adapter->hw.hw_addr + reg));
1261 val = readl(adapter->hw.hw_addr + reg);
1262 if (val != (test_pattern[pat] & write & mask)) {
1263 e_err(drv, "pattern test reg %04X failed: got "
1264 "0x%08X expected 0x%08X\n",
1265 reg, val, (test_pattern[pat] & write & mask));
1266 *data = reg;
1267 writel(before, adapter->hw.hw_addr + reg);
1268 return 1;
1269 }
1270 writel(before, adapter->hw.hw_addr + reg);
1271 }
1272 return 0;
1273}
1274
1275static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1276 u32 mask, u32 write)
1277{
1278 u32 val, before;
1279 before = readl(adapter->hw.hw_addr + reg);
1280 writel((write & mask), (adapter->hw.hw_addr + reg));
1281 val = readl(adapter->hw.hw_addr + reg);
1282 if ((write & mask) != (val & mask)) {
1283 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1284 "expected 0x%08X\n", reg, (val & mask), (write & mask));
1285 *data = reg;
1286 writel(before, (adapter->hw.hw_addr + reg));
1287 return 1;
1288 }
1289 writel(before, (adapter->hw.hw_addr + reg));
1290 return 0;
1291}
1292
1293#define REG_PATTERN_TEST(reg, mask, write) \
1294 do { \
1295 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1296 return 1; \
1297 } while (0) \
1298
1299
1300#define REG_SET_AND_CHECK(reg, mask, write) \
1301 do { \
1302 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1303 return 1; \
1304 } while (0) \
1305
1306static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1307{
1308 const struct ixgbe_reg_test *test;
1309 u32 value, before, after;
1310 u32 i, toggle;
1311
1312 switch (adapter->hw.mac.type) {
1313 case ixgbe_mac_82598EB:
1314 toggle = 0x7FFFF3FF;
1315 test = reg_test_82598;
1316 break;
1317 case ixgbe_mac_82599EB:
1318 case ixgbe_mac_X540:
1319 toggle = 0x7FFFF30F;
1320 test = reg_test_82599;
1321 break;
1322 default:
1323 *data = 1;
1324 return 1;
1325 break;
1326 }
1327
1328 /*
1329 * Because the status register is such a special case,
1330 * we handle it separately from the rest of the register
1331 * tests. Some bits are read-only, some toggle, and some
1332 * are writeable on newer MACs.
1333 */
1334 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1335 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1337 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1338 if (value != after) {
1339 e_err(drv, "failed STATUS register test got: 0x%08X "
1340 "expected: 0x%08X\n", after, value);
1341 *data = 1;
1342 return 1;
1343 }
1344 /* restore previous status */
1345 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1346
1347 /*
1348 * Perform the remainder of the register test, looping through
1349 * the test table until we either fail or reach the null entry.
1350 */
1351 while (test->reg) {
1352 for (i = 0; i < test->array_len; i++) {
1353 switch (test->test_type) {
1354 case PATTERN_TEST:
1355 REG_PATTERN_TEST(test->reg + (i * 0x40),
1356 test->mask,
1357 test->write);
1358 break;
1359 case SET_READ_TEST:
1360 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1361 test->mask,
1362 test->write);
1363 break;
1364 case WRITE_NO_TEST:
1365 writel(test->write,
1366 (adapter->hw.hw_addr + test->reg)
1367 + (i * 0x40));
1368 break;
1369 case TABLE32_TEST:
1370 REG_PATTERN_TEST(test->reg + (i * 4),
1371 test->mask,
1372 test->write);
1373 break;
1374 case TABLE64_TEST_LO:
1375 REG_PATTERN_TEST(test->reg + (i * 8),
1376 test->mask,
1377 test->write);
1378 break;
1379 case TABLE64_TEST_HI:
1380 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1381 test->mask,
1382 test->write);
1383 break;
1384 }
1385 }
1386 test++;
1387 }
1388
1389 *data = 0;
1390 return 0;
1391}
1392
1393static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1394{
1395 struct ixgbe_hw *hw = &adapter->hw;
1396 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1397 *data = 1;
1398 else
1399 *data = 0;
1400 return *data;
1401}
1402
1403static irqreturn_t ixgbe_test_intr(int irq, void *data)
1404{
1405 struct net_device *netdev = (struct net_device *) data;
1406 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1407
1408 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1409
1410 return IRQ_HANDLED;
1411}
1412
1413static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1414{
1415 struct net_device *netdev = adapter->netdev;
1416 u32 mask, i = 0, shared_int = true;
1417 u32 irq = adapter->pdev->irq;
1418
1419 *data = 0;
1420
1421 /* Hook up test interrupt handler just for this test */
1422 if (adapter->msix_entries) {
1423 /* NOTE: we don't test MSI-X interrupts here, yet */
1424 return 0;
1425 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1426 shared_int = false;
1427 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1428 netdev)) {
1429 *data = 1;
1430 return -1;
1431 }
1432 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1433 netdev->name, netdev)) {
1434 shared_int = false;
1435 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1436 netdev->name, netdev)) {
1437 *data = 1;
1438 return -1;
1439 }
1440 e_info(hw, "testing %s interrupt\n", shared_int ?
1441 "shared" : "unshared");
1442
1443 /* Disable all the interrupts */
1444 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1445 IXGBE_WRITE_FLUSH(&adapter->hw);
1446 usleep_range(10000, 20000);
1447
1448 /* Test each interrupt */
1449 for (; i < 10; i++) {
1450 /* Interrupt to test */
1451 mask = 1 << i;
1452
1453 if (!shared_int) {
1454 /*
1455 * Disable the interrupts to be reported in
1456 * the cause register and then force the same
1457 * interrupt and see if one gets posted. If
1458 * an interrupt was posted to the bus, the
1459 * test failed.
1460 */
1461 adapter->test_icr = 0;
1462 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1463 ~mask & 0x00007FFF);
1464 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1465 ~mask & 0x00007FFF);
1466 IXGBE_WRITE_FLUSH(&adapter->hw);
1467 usleep_range(10000, 20000);
1468
1469 if (adapter->test_icr & mask) {
1470 *data = 3;
1471 break;
1472 }
1473 }
1474
1475 /*
1476 * Enable the interrupt to be reported in the cause
1477 * register and then force the same interrupt and see
1478 * if one gets posted. If an interrupt was not posted
1479 * to the bus, the test failed.
1480 */
1481 adapter->test_icr = 0;
1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1483 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1484 IXGBE_WRITE_FLUSH(&adapter->hw);
1485 usleep_range(10000, 20000);
1486
1487 if (!(adapter->test_icr &mask)) {
1488 *data = 4;
1489 break;
1490 }
1491
1492 if (!shared_int) {
1493 /*
1494 * Disable the other interrupts to be reported in
1495 * the cause register and then force the other
1496 * interrupts and see if any get posted. If
1497 * an interrupt was posted to the bus, the
1498 * test failed.
1499 */
1500 adapter->test_icr = 0;
1501 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1502 ~mask & 0x00007FFF);
1503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1504 ~mask & 0x00007FFF);
1505 IXGBE_WRITE_FLUSH(&adapter->hw);
1506 usleep_range(10000, 20000);
1507
1508 if (adapter->test_icr) {
1509 *data = 5;
1510 break;
1511 }
1512 }
1513 }
1514
1515 /* Disable all the interrupts */
1516 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1517 IXGBE_WRITE_FLUSH(&adapter->hw);
1518 usleep_range(10000, 20000);
1519
1520 /* Unhook test interrupt handler */
1521 free_irq(irq, netdev);
1522
1523 return *data;
1524}
1525
1526static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1527{
1528 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1529 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1530 struct ixgbe_hw *hw = &adapter->hw;
1531 u32 reg_ctl;
1532
1533 /* shut down the DMA engines now so they can be reinitialized later */
1534
1535 /* first Rx */
1536 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1537 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1538 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1539 ixgbe_disable_rx_queue(adapter, rx_ring);
1540
1541 /* now Tx */
1542 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1543 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1544 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1545
1546 switch (hw->mac.type) {
1547 case ixgbe_mac_82599EB:
1548 case ixgbe_mac_X540:
1549 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1550 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1551 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1552 break;
1553 default:
1554 break;
1555 }
1556
1557 ixgbe_reset(adapter);
1558
1559 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1560 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1561}
1562
1563static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1564{
1565 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1566 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1567 u32 rctl, reg_data;
1568 int ret_val;
1569 int err;
1570
1571 /* Setup Tx descriptor ring and Tx buffers */
1572 tx_ring->count = IXGBE_DEFAULT_TXD;
1573 tx_ring->queue_index = 0;
1574 tx_ring->dev = &adapter->pdev->dev;
1575 tx_ring->netdev = adapter->netdev;
1576 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1577
1578 err = ixgbe_setup_tx_resources(tx_ring);
1579 if (err)
1580 return 1;
1581
1582 switch (adapter->hw.mac.type) {
1583 case ixgbe_mac_82599EB:
1584 case ixgbe_mac_X540:
1585 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1586 reg_data |= IXGBE_DMATXCTL_TE;
1587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1588 break;
1589 default:
1590 break;
1591 }
1592
1593 ixgbe_configure_tx_ring(adapter, tx_ring);
1594
1595 /* Setup Rx Descriptor ring and Rx buffers */
1596 rx_ring->count = IXGBE_DEFAULT_RXD;
1597 rx_ring->queue_index = 0;
1598 rx_ring->dev = &adapter->pdev->dev;
1599 rx_ring->netdev = adapter->netdev;
1600 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1601
1602 err = ixgbe_setup_rx_resources(rx_ring);
1603 if (err) {
1604 ret_val = 4;
1605 goto err_nomem;
1606 }
1607
1608 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1609 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1610
1611 ixgbe_configure_rx_ring(adapter, rx_ring);
1612
1613 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1614 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1615
1616 return 0;
1617
1618err_nomem:
1619 ixgbe_free_desc_rings(adapter);
1620 return ret_val;
1621}
1622
1623static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1624{
1625 struct ixgbe_hw *hw = &adapter->hw;
1626 u32 reg_data;
1627
1628 /* X540 needs to set the MACC.FLU bit to force link up */
1629 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1630 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1631 reg_data |= IXGBE_MACC_FLU;
1632 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1633 }
1634
1635 /* right now we only support MAC loopback in the driver */
1636 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1637 /* Setup MAC loopback */
1638 reg_data |= IXGBE_HLREG0_LPBK;
1639 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1640
1641 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1642 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1643 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1644
1645 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1646 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1647 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1648 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1649 IXGBE_WRITE_FLUSH(hw);
1650 usleep_range(10000, 20000);
1651
1652 /* Disable Atlas Tx lanes; re-enabled in reset path */
1653 if (hw->mac.type == ixgbe_mac_82598EB) {
1654 u8 atlas;
1655
1656 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1657 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1658 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1659
1660 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1661 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1662 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1663
1664 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1665 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1666 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1667
1668 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1669 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1670 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1671 }
1672
1673 return 0;
1674}
1675
1676static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1677{
1678 u32 reg_data;
1679
1680 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1681 reg_data &= ~IXGBE_HLREG0_LPBK;
1682 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1683}
1684
1685static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1686 unsigned int frame_size)
1687{
1688 memset(skb->data, 0xFF, frame_size);
1689 frame_size >>= 1;
1690 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1691 memset(&skb->data[frame_size + 10], 0xBE, 1);
1692 memset(&skb->data[frame_size + 12], 0xAF, 1);
1693}
1694
1695static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1696 unsigned int frame_size)
1697{
1698 unsigned char *data;
1699 bool match = true;
1700
1701 frame_size >>= 1;
1702
1703 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1704
1705 if (data[3] != 0xFF ||
1706 data[frame_size + 10] != 0xBE ||
1707 data[frame_size + 12] != 0xAF)
1708 match = false;
1709
1710 kunmap(rx_buffer->page);
1711
1712 return match;
1713}
1714
1715static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1716 struct ixgbe_ring *tx_ring,
1717 unsigned int size)
1718{
1719 union ixgbe_adv_rx_desc *rx_desc;
1720 struct ixgbe_rx_buffer *rx_buffer;
1721 struct ixgbe_tx_buffer *tx_buffer;
1722 u16 rx_ntc, tx_ntc, count = 0;
1723
1724 /* initialize next to clean and descriptor values */
1725 rx_ntc = rx_ring->next_to_clean;
1726 tx_ntc = tx_ring->next_to_clean;
1727 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1728
1729 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1730 /* check Rx buffer */
1731 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1732
1733 /* sync Rx buffer for CPU read */
1734 dma_sync_single_for_cpu(rx_ring->dev,
1735 rx_buffer->dma,
1736 ixgbe_rx_bufsz(rx_ring),
1737 DMA_FROM_DEVICE);
1738
1739 /* verify contents of skb */
1740 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1741 count++;
1742
1743 /* sync Rx buffer for device write */
1744 dma_sync_single_for_device(rx_ring->dev,
1745 rx_buffer->dma,
1746 ixgbe_rx_bufsz(rx_ring),
1747 DMA_FROM_DEVICE);
1748
1749 /* unmap buffer on Tx side */
1750 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1751 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1752
1753 /* increment Rx/Tx next to clean counters */
1754 rx_ntc++;
1755 if (rx_ntc == rx_ring->count)
1756 rx_ntc = 0;
1757 tx_ntc++;
1758 if (tx_ntc == tx_ring->count)
1759 tx_ntc = 0;
1760
1761 /* fetch next descriptor */
1762 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1763 }
1764
1765 netdev_tx_reset_queue(txring_txq(tx_ring));
1766
1767 /* re-map buffers to ring, store next to clean values */
1768 ixgbe_alloc_rx_buffers(rx_ring, count);
1769 rx_ring->next_to_clean = rx_ntc;
1770 tx_ring->next_to_clean = tx_ntc;
1771
1772 return count;
1773}
1774
1775static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1776{
1777 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1778 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1779 int i, j, lc, good_cnt, ret_val = 0;
1780 unsigned int size = 1024;
1781 netdev_tx_t tx_ret_val;
1782 struct sk_buff *skb;
1783
1784 /* allocate test skb */
1785 skb = alloc_skb(size, GFP_KERNEL);
1786 if (!skb)
1787 return 11;
1788
1789 /* place data into test skb */
1790 ixgbe_create_lbtest_frame(skb, size);
1791 skb_put(skb, size);
1792
1793 /*
1794 * Calculate the loop count based on the largest descriptor ring
1795 * The idea is to wrap the largest ring a number of times using 64
1796 * send/receive pairs during each loop
1797 */
1798
1799 if (rx_ring->count <= tx_ring->count)
1800 lc = ((tx_ring->count / 64) * 2) + 1;
1801 else
1802 lc = ((rx_ring->count / 64) * 2) + 1;
1803
1804 for (j = 0; j <= lc; j++) {
1805 /* reset count of good packets */
1806 good_cnt = 0;
1807
1808 /* place 64 packets on the transmit queue*/
1809 for (i = 0; i < 64; i++) {
1810 skb_get(skb);
1811 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1812 adapter,
1813 tx_ring);
1814 if (tx_ret_val == NETDEV_TX_OK)
1815 good_cnt++;
1816 }
1817
1818 if (good_cnt != 64) {
1819 ret_val = 12;
1820 break;
1821 }
1822
1823 /* allow 200 milliseconds for packets to go from Tx to Rx */
1824 msleep(200);
1825
1826 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1827 if (good_cnt != 64) {
1828 ret_val = 13;
1829 break;
1830 }
1831 }
1832
1833 /* free the original skb */
1834 kfree_skb(skb);
1835
1836 return ret_val;
1837}
1838
1839static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1840{
1841 *data = ixgbe_setup_desc_rings(adapter);
1842 if (*data)
1843 goto out;
1844 *data = ixgbe_setup_loopback_test(adapter);
1845 if (*data)
1846 goto err_loopback;
1847 *data = ixgbe_run_loopback_test(adapter);
1848 ixgbe_loopback_cleanup(adapter);
1849
1850err_loopback:
1851 ixgbe_free_desc_rings(adapter);
1852out:
1853 return *data;
1854}
1855
1856static void ixgbe_diag_test(struct net_device *netdev,
1857 struct ethtool_test *eth_test, u64 *data)
1858{
1859 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1860 bool if_running = netif_running(netdev);
1861
1862 set_bit(__IXGBE_TESTING, &adapter->state);
1863 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1864 /* Offline tests */
1865
1866 e_info(hw, "offline testing starting\n");
1867
1868 /* Link test performed before hardware reset so autoneg doesn't
1869 * interfere with test result */
1870 if (ixgbe_link_test(adapter, &data[4]))
1871 eth_test->flags |= ETH_TEST_FL_FAILED;
1872
1873 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1874 int i;
1875 for (i = 0; i < adapter->num_vfs; i++) {
1876 if (adapter->vfinfo[i].clear_to_send) {
1877 netdev_warn(netdev, "%s",
1878 "offline diagnostic is not "
1879 "supported when VFs are "
1880 "present\n");
1881 data[0] = 1;
1882 data[1] = 1;
1883 data[2] = 1;
1884 data[3] = 1;
1885 eth_test->flags |= ETH_TEST_FL_FAILED;
1886 clear_bit(__IXGBE_TESTING,
1887 &adapter->state);
1888 goto skip_ol_tests;
1889 }
1890 }
1891 }
1892
1893 if (if_running)
1894 /* indicate we're in test mode */
1895 dev_close(netdev);
1896 else
1897 ixgbe_reset(adapter);
1898
1899 e_info(hw, "register testing starting\n");
1900 if (ixgbe_reg_test(adapter, &data[0]))
1901 eth_test->flags |= ETH_TEST_FL_FAILED;
1902
1903 ixgbe_reset(adapter);
1904 e_info(hw, "eeprom testing starting\n");
1905 if (ixgbe_eeprom_test(adapter, &data[1]))
1906 eth_test->flags |= ETH_TEST_FL_FAILED;
1907
1908 ixgbe_reset(adapter);
1909 e_info(hw, "interrupt testing starting\n");
1910 if (ixgbe_intr_test(adapter, &data[2]))
1911 eth_test->flags |= ETH_TEST_FL_FAILED;
1912
1913 /* If SRIOV or VMDq is enabled then skip MAC
1914 * loopback diagnostic. */
1915 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1916 IXGBE_FLAG_VMDQ_ENABLED)) {
1917 e_info(hw, "Skip MAC loopback diagnostic in VT "
1918 "mode\n");
1919 data[3] = 0;
1920 goto skip_loopback;
1921 }
1922
1923 ixgbe_reset(adapter);
1924 e_info(hw, "loopback testing starting\n");
1925 if (ixgbe_loopback_test(adapter, &data[3]))
1926 eth_test->flags |= ETH_TEST_FL_FAILED;
1927
1928skip_loopback:
1929 ixgbe_reset(adapter);
1930
1931 clear_bit(__IXGBE_TESTING, &adapter->state);
1932 if (if_running)
1933 dev_open(netdev);
1934 } else {
1935 e_info(hw, "online testing starting\n");
1936 /* Online tests */
1937 if (ixgbe_link_test(adapter, &data[4]))
1938 eth_test->flags |= ETH_TEST_FL_FAILED;
1939
1940 /* Online tests aren't run; pass by default */
1941 data[0] = 0;
1942 data[1] = 0;
1943 data[2] = 0;
1944 data[3] = 0;
1945
1946 clear_bit(__IXGBE_TESTING, &adapter->state);
1947 }
1948skip_ol_tests:
1949 msleep_interruptible(4 * 1000);
1950}
1951
1952static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1953 struct ethtool_wolinfo *wol)
1954{
1955 struct ixgbe_hw *hw = &adapter->hw;
1956 int retval = 0;
1957
1958 /* WOL not supported for all devices */
1959 if (!ixgbe_wol_supported(adapter, hw->device_id,
1960 hw->subsystem_device_id)) {
1961 retval = 1;
1962 wol->supported = 0;
1963 }
1964
1965 return retval;
1966}
1967
1968static void ixgbe_get_wol(struct net_device *netdev,
1969 struct ethtool_wolinfo *wol)
1970{
1971 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1972
1973 wol->supported = WAKE_UCAST | WAKE_MCAST |
1974 WAKE_BCAST | WAKE_MAGIC;
1975 wol->wolopts = 0;
1976
1977 if (ixgbe_wol_exclusion(adapter, wol) ||
1978 !device_can_wakeup(&adapter->pdev->dev))
1979 return;
1980
1981 if (adapter->wol & IXGBE_WUFC_EX)
1982 wol->wolopts |= WAKE_UCAST;
1983 if (adapter->wol & IXGBE_WUFC_MC)
1984 wol->wolopts |= WAKE_MCAST;
1985 if (adapter->wol & IXGBE_WUFC_BC)
1986 wol->wolopts |= WAKE_BCAST;
1987 if (adapter->wol & IXGBE_WUFC_MAG)
1988 wol->wolopts |= WAKE_MAGIC;
1989}
1990
1991static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1992{
1993 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1994
1995 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1996 return -EOPNOTSUPP;
1997
1998 if (ixgbe_wol_exclusion(adapter, wol))
1999 return wol->wolopts ? -EOPNOTSUPP : 0;
2000
2001 adapter->wol = 0;
2002
2003 if (wol->wolopts & WAKE_UCAST)
2004 adapter->wol |= IXGBE_WUFC_EX;
2005 if (wol->wolopts & WAKE_MCAST)
2006 adapter->wol |= IXGBE_WUFC_MC;
2007 if (wol->wolopts & WAKE_BCAST)
2008 adapter->wol |= IXGBE_WUFC_BC;
2009 if (wol->wolopts & WAKE_MAGIC)
2010 adapter->wol |= IXGBE_WUFC_MAG;
2011
2012 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2013
2014 return 0;
2015}
2016
2017static int ixgbe_nway_reset(struct net_device *netdev)
2018{
2019 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2020
2021 if (netif_running(netdev))
2022 ixgbe_reinit_locked(adapter);
2023
2024 return 0;
2025}
2026
2027static int ixgbe_set_phys_id(struct net_device *netdev,
2028 enum ethtool_phys_id_state state)
2029{
2030 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2031 struct ixgbe_hw *hw = &adapter->hw;
2032
2033 switch (state) {
2034 case ETHTOOL_ID_ACTIVE:
2035 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2036 return 2;
2037
2038 case ETHTOOL_ID_ON:
2039 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2040 break;
2041
2042 case ETHTOOL_ID_OFF:
2043 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2044 break;
2045
2046 case ETHTOOL_ID_INACTIVE:
2047 /* Restore LED settings */
2048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2049 break;
2050 }
2051
2052 return 0;
2053}
2054
2055static int ixgbe_get_coalesce(struct net_device *netdev,
2056 struct ethtool_coalesce *ec)
2057{
2058 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2059
2060 /* only valid if in constant ITR mode */
2061 if (adapter->rx_itr_setting <= 1)
2062 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2063 else
2064 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2065
2066 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2067 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2068 return 0;
2069
2070 /* only valid if in constant ITR mode */
2071 if (adapter->tx_itr_setting <= 1)
2072 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2073 else
2074 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2075
2076 return 0;
2077}
2078
2079/*
2080 * this function must be called before setting the new value of
2081 * rx_itr_setting
2082 */
2083static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2084{
2085 struct net_device *netdev = adapter->netdev;
2086
2087 /* nothing to do if LRO or RSC are not enabled */
2088 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2089 !(netdev->features & NETIF_F_LRO))
2090 return false;
2091
2092 /* check the feature flag value and enable RSC if necessary */
2093 if (adapter->rx_itr_setting == 1 ||
2094 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2095 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2096 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2097 e_info(probe, "rx-usecs value high enough "
2098 "to re-enable RSC\n");
2099 return true;
2100 }
2101 /* if interrupt rate is too high then disable RSC */
2102 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2103 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2104 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2105 return true;
2106 }
2107 return false;
2108}
2109
2110static int ixgbe_set_coalesce(struct net_device *netdev,
2111 struct ethtool_coalesce *ec)
2112{
2113 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2114 struct ixgbe_q_vector *q_vector;
2115 int i;
2116 int num_vectors;
2117 u16 tx_itr_param, rx_itr_param;
2118 bool need_reset = false;
2119
2120 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2121 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2122 && ec->tx_coalesce_usecs)
2123 return -EINVAL;
2124
2125 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2126 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2127 return -EINVAL;
2128
2129 if (ec->rx_coalesce_usecs > 1)
2130 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2131 else
2132 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2133
2134 if (adapter->rx_itr_setting == 1)
2135 rx_itr_param = IXGBE_20K_ITR;
2136 else
2137 rx_itr_param = adapter->rx_itr_setting;
2138
2139 if (ec->tx_coalesce_usecs > 1)
2140 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2141 else
2142 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2143
2144 if (adapter->tx_itr_setting == 1)
2145 tx_itr_param = IXGBE_10K_ITR;
2146 else
2147 tx_itr_param = adapter->tx_itr_setting;
2148
2149 /* check the old value and enable RSC if necessary */
2150 need_reset = ixgbe_update_rsc(adapter);
2151
2152 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2153 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2154 else
2155 num_vectors = 1;
2156
2157 for (i = 0; i < num_vectors; i++) {
2158 q_vector = adapter->q_vector[i];
2159 if (q_vector->tx.count && !q_vector->rx.count)
2160 /* tx only */
2161 q_vector->itr = tx_itr_param;
2162 else
2163 /* rx only or mixed */
2164 q_vector->itr = rx_itr_param;
2165 ixgbe_write_eitr(q_vector);
2166 }
2167
2168 /*
2169 * do reset here at the end to make sure EITR==0 case is handled
2170 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2171 * also locks in RSC enable/disable which requires reset
2172 */
2173 if (need_reset)
2174 ixgbe_do_reset(netdev);
2175
2176 return 0;
2177}
2178
2179static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2180 struct ethtool_rxnfc *cmd)
2181{
2182 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2183 struct ethtool_rx_flow_spec *fsp =
2184 (struct ethtool_rx_flow_spec *)&cmd->fs;
2185 struct hlist_node *node, *node2;
2186 struct ixgbe_fdir_filter *rule = NULL;
2187
2188 /* report total rule count */
2189 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2190
2191 hlist_for_each_entry_safe(rule, node, node2,
2192 &adapter->fdir_filter_list, fdir_node) {
2193 if (fsp->location <= rule->sw_idx)
2194 break;
2195 }
2196
2197 if (!rule || fsp->location != rule->sw_idx)
2198 return -EINVAL;
2199
2200 /* fill out the flow spec entry */
2201
2202 /* set flow type field */
2203 switch (rule->filter.formatted.flow_type) {
2204 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2205 fsp->flow_type = TCP_V4_FLOW;
2206 break;
2207 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2208 fsp->flow_type = UDP_V4_FLOW;
2209 break;
2210 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2211 fsp->flow_type = SCTP_V4_FLOW;
2212 break;
2213 case IXGBE_ATR_FLOW_TYPE_IPV4:
2214 fsp->flow_type = IP_USER_FLOW;
2215 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2216 fsp->h_u.usr_ip4_spec.proto = 0;
2217 fsp->m_u.usr_ip4_spec.proto = 0;
2218 break;
2219 default:
2220 return -EINVAL;
2221 }
2222
2223 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2224 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2225 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2226 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2227 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2228 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2229 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2230 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2231 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2232 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2233 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2234 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2235 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2236 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2237 fsp->flow_type |= FLOW_EXT;
2238
2239 /* record action */
2240 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2241 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2242 else
2243 fsp->ring_cookie = rule->action;
2244
2245 return 0;
2246}
2247
2248static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2249 struct ethtool_rxnfc *cmd,
2250 u32 *rule_locs)
2251{
2252 struct hlist_node *node, *node2;
2253 struct ixgbe_fdir_filter *rule;
2254 int cnt = 0;
2255
2256 /* report total rule count */
2257 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2258
2259 hlist_for_each_entry_safe(rule, node, node2,
2260 &adapter->fdir_filter_list, fdir_node) {
2261 if (cnt == cmd->rule_cnt)
2262 return -EMSGSIZE;
2263 rule_locs[cnt] = rule->sw_idx;
2264 cnt++;
2265 }
2266
2267 cmd->rule_cnt = cnt;
2268
2269 return 0;
2270}
2271
2272static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2273 struct ethtool_rxnfc *cmd)
2274{
2275 cmd->data = 0;
2276
2277 /* if RSS is disabled then report no hashing */
2278 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2279 return 0;
2280
2281 /* Report default options for RSS on ixgbe */
2282 switch (cmd->flow_type) {
2283 case TCP_V4_FLOW:
2284 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2285 case UDP_V4_FLOW:
2286 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2287 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2288 case SCTP_V4_FLOW:
2289 case AH_ESP_V4_FLOW:
2290 case AH_V4_FLOW:
2291 case ESP_V4_FLOW:
2292 case IPV4_FLOW:
2293 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2294 break;
2295 case TCP_V6_FLOW:
2296 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2297 case UDP_V6_FLOW:
2298 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2299 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2300 case SCTP_V6_FLOW:
2301 case AH_ESP_V6_FLOW:
2302 case AH_V6_FLOW:
2303 case ESP_V6_FLOW:
2304 case IPV6_FLOW:
2305 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2306 break;
2307 default:
2308 return -EINVAL;
2309 }
2310
2311 return 0;
2312}
2313
2314static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2315 u32 *rule_locs)
2316{
2317 struct ixgbe_adapter *adapter = netdev_priv(dev);
2318 int ret = -EOPNOTSUPP;
2319
2320 switch (cmd->cmd) {
2321 case ETHTOOL_GRXRINGS:
2322 cmd->data = adapter->num_rx_queues;
2323 ret = 0;
2324 break;
2325 case ETHTOOL_GRXCLSRLCNT:
2326 cmd->rule_cnt = adapter->fdir_filter_count;
2327 ret = 0;
2328 break;
2329 case ETHTOOL_GRXCLSRULE:
2330 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2331 break;
2332 case ETHTOOL_GRXCLSRLALL:
2333 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2334 break;
2335 case ETHTOOL_GRXFH:
2336 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2337 break;
2338 default:
2339 break;
2340 }
2341
2342 return ret;
2343}
2344
2345static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2346 struct ixgbe_fdir_filter *input,
2347 u16 sw_idx)
2348{
2349 struct ixgbe_hw *hw = &adapter->hw;
2350 struct hlist_node *node, *node2, *parent;
2351 struct ixgbe_fdir_filter *rule;
2352 int err = -EINVAL;
2353
2354 parent = NULL;
2355 rule = NULL;
2356
2357 hlist_for_each_entry_safe(rule, node, node2,
2358 &adapter->fdir_filter_list, fdir_node) {
2359 /* hash found, or no matching entry */
2360 if (rule->sw_idx >= sw_idx)
2361 break;
2362 parent = node;
2363 }
2364
2365 /* if there is an old rule occupying our place remove it */
2366 if (rule && (rule->sw_idx == sw_idx)) {
2367 if (!input || (rule->filter.formatted.bkt_hash !=
2368 input->filter.formatted.bkt_hash)) {
2369 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2370 &rule->filter,
2371 sw_idx);
2372 }
2373
2374 hlist_del(&rule->fdir_node);
2375 kfree(rule);
2376 adapter->fdir_filter_count--;
2377 }
2378
2379 /*
2380 * If no input this was a delete, err should be 0 if a rule was
2381 * successfully found and removed from the list else -EINVAL
2382 */
2383 if (!input)
2384 return err;
2385
2386 /* initialize node and set software index */
2387 INIT_HLIST_NODE(&input->fdir_node);
2388
2389 /* add filter to the list */
2390 if (parent)
2391 hlist_add_after(parent, &input->fdir_node);
2392 else
2393 hlist_add_head(&input->fdir_node,
2394 &adapter->fdir_filter_list);
2395
2396 /* update counts */
2397 adapter->fdir_filter_count++;
2398
2399 return 0;
2400}
2401
2402static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2403 u8 *flow_type)
2404{
2405 switch (fsp->flow_type & ~FLOW_EXT) {
2406 case TCP_V4_FLOW:
2407 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2408 break;
2409 case UDP_V4_FLOW:
2410 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2411 break;
2412 case SCTP_V4_FLOW:
2413 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2414 break;
2415 case IP_USER_FLOW:
2416 switch (fsp->h_u.usr_ip4_spec.proto) {
2417 case IPPROTO_TCP:
2418 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2419 break;
2420 case IPPROTO_UDP:
2421 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2422 break;
2423 case IPPROTO_SCTP:
2424 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2425 break;
2426 case 0:
2427 if (!fsp->m_u.usr_ip4_spec.proto) {
2428 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2429 break;
2430 }
2431 default:
2432 return 0;
2433 }
2434 break;
2435 default:
2436 return 0;
2437 }
2438
2439 return 1;
2440}
2441
2442static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2443 struct ethtool_rxnfc *cmd)
2444{
2445 struct ethtool_rx_flow_spec *fsp =
2446 (struct ethtool_rx_flow_spec *)&cmd->fs;
2447 struct ixgbe_hw *hw = &adapter->hw;
2448 struct ixgbe_fdir_filter *input;
2449 union ixgbe_atr_input mask;
2450 int err;
2451
2452 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2453 return -EOPNOTSUPP;
2454
2455 /*
2456 * Don't allow programming if the action is a queue greater than
2457 * the number of online Rx queues.
2458 */
2459 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2460 (fsp->ring_cookie >= adapter->num_rx_queues))
2461 return -EINVAL;
2462
2463 /* Don't allow indexes to exist outside of available space */
2464 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2465 e_err(drv, "Location out of range\n");
2466 return -EINVAL;
2467 }
2468
2469 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2470 if (!input)
2471 return -ENOMEM;
2472
2473 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2474
2475 /* set SW index */
2476 input->sw_idx = fsp->location;
2477
2478 /* record flow type */
2479 if (!ixgbe_flowspec_to_flow_type(fsp,
2480 &input->filter.formatted.flow_type)) {
2481 e_err(drv, "Unrecognized flow type\n");
2482 goto err_out;
2483 }
2484
2485 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2486 IXGBE_ATR_L4TYPE_MASK;
2487
2488 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2489 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2490
2491 /* Copy input into formatted structures */
2492 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2493 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2494 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2495 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2496 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2497 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2498 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2499 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2500
2501 if (fsp->flow_type & FLOW_EXT) {
2502 input->filter.formatted.vm_pool =
2503 (unsigned char)ntohl(fsp->h_ext.data[1]);
2504 mask.formatted.vm_pool =
2505 (unsigned char)ntohl(fsp->m_ext.data[1]);
2506 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2507 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2508 input->filter.formatted.flex_bytes =
2509 fsp->h_ext.vlan_etype;
2510 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2511 }
2512
2513 /* determine if we need to drop or route the packet */
2514 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2515 input->action = IXGBE_FDIR_DROP_QUEUE;
2516 else
2517 input->action = fsp->ring_cookie;
2518
2519 spin_lock(&adapter->fdir_perfect_lock);
2520
2521 if (hlist_empty(&adapter->fdir_filter_list)) {
2522 /* save mask and program input mask into HW */
2523 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2524 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2525 if (err) {
2526 e_err(drv, "Error writing mask\n");
2527 goto err_out_w_lock;
2528 }
2529 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2530 e_err(drv, "Only one mask supported per port\n");
2531 goto err_out_w_lock;
2532 }
2533
2534 /* apply mask and compute/store hash */
2535 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2536
2537 /* program filters to filter memory */
2538 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2539 &input->filter, input->sw_idx,
2540 (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2541 IXGBE_FDIR_DROP_QUEUE :
2542 adapter->rx_ring[input->action]->reg_idx);
2543 if (err)
2544 goto err_out_w_lock;
2545
2546 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2547
2548 spin_unlock(&adapter->fdir_perfect_lock);
2549
2550 return err;
2551err_out_w_lock:
2552 spin_unlock(&adapter->fdir_perfect_lock);
2553err_out:
2554 kfree(input);
2555 return -EINVAL;
2556}
2557
2558static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2559 struct ethtool_rxnfc *cmd)
2560{
2561 struct ethtool_rx_flow_spec *fsp =
2562 (struct ethtool_rx_flow_spec *)&cmd->fs;
2563 int err;
2564
2565 spin_lock(&adapter->fdir_perfect_lock);
2566 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2567 spin_unlock(&adapter->fdir_perfect_lock);
2568
2569 return err;
2570}
2571
2572#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2573 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2574static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2575 struct ethtool_rxnfc *nfc)
2576{
2577 u32 flags2 = adapter->flags2;
2578
2579 /*
2580 * RSS does not support anything other than hashing
2581 * to queues on src and dst IPs and ports
2582 */
2583 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2584 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2585 return -EINVAL;
2586
2587 switch (nfc->flow_type) {
2588 case TCP_V4_FLOW:
2589 case TCP_V6_FLOW:
2590 if (!(nfc->data & RXH_IP_SRC) ||
2591 !(nfc->data & RXH_IP_DST) ||
2592 !(nfc->data & RXH_L4_B_0_1) ||
2593 !(nfc->data & RXH_L4_B_2_3))
2594 return -EINVAL;
2595 break;
2596 case UDP_V4_FLOW:
2597 if (!(nfc->data & RXH_IP_SRC) ||
2598 !(nfc->data & RXH_IP_DST))
2599 return -EINVAL;
2600 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2601 case 0:
2602 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2603 break;
2604 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2605 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2606 break;
2607 default:
2608 return -EINVAL;
2609 }
2610 break;
2611 case UDP_V6_FLOW:
2612 if (!(nfc->data & RXH_IP_SRC) ||
2613 !(nfc->data & RXH_IP_DST))
2614 return -EINVAL;
2615 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2616 case 0:
2617 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2618 break;
2619 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2620 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2621 break;
2622 default:
2623 return -EINVAL;
2624 }
2625 break;
2626 case AH_ESP_V4_FLOW:
2627 case AH_V4_FLOW:
2628 case ESP_V4_FLOW:
2629 case SCTP_V4_FLOW:
2630 case AH_ESP_V6_FLOW:
2631 case AH_V6_FLOW:
2632 case ESP_V6_FLOW:
2633 case SCTP_V6_FLOW:
2634 if (!(nfc->data & RXH_IP_SRC) ||
2635 !(nfc->data & RXH_IP_DST) ||
2636 (nfc->data & RXH_L4_B_0_1) ||
2637 (nfc->data & RXH_L4_B_2_3))
2638 return -EINVAL;
2639 break;
2640 default:
2641 return -EINVAL;
2642 }
2643
2644 /* if we changed something we need to update flags */
2645 if (flags2 != adapter->flags2) {
2646 struct ixgbe_hw *hw = &adapter->hw;
2647 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2648
2649 if ((flags2 & UDP_RSS_FLAGS) &&
2650 !(adapter->flags2 & UDP_RSS_FLAGS))
2651 e_warn(drv, "enabling UDP RSS: fragmented packets"
2652 " may arrive out of order to the stack above\n");
2653
2654 adapter->flags2 = flags2;
2655
2656 /* Perform hash on these packet types */
2657 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2658 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2659 | IXGBE_MRQC_RSS_FIELD_IPV6
2660 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2661
2662 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2663 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2664
2665 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2666 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2667
2668 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2669 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2670
2671 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2672 }
2673
2674 return 0;
2675}
2676
2677static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2678{
2679 struct ixgbe_adapter *adapter = netdev_priv(dev);
2680 int ret = -EOPNOTSUPP;
2681
2682 switch (cmd->cmd) {
2683 case ETHTOOL_SRXCLSRLINS:
2684 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2685 break;
2686 case ETHTOOL_SRXCLSRLDEL:
2687 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2688 break;
2689 case ETHTOOL_SRXFH:
2690 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2691 break;
2692 default:
2693 break;
2694 }
2695
2696 return ret;
2697}
2698
2699static int ixgbe_get_ts_info(struct net_device *dev,
2700 struct ethtool_ts_info *info)
2701{
2702 struct ixgbe_adapter *adapter = netdev_priv(dev);
2703
2704 switch (adapter->hw.mac.type) {
2705#ifdef CONFIG_IXGBE_PTP
2706 case ixgbe_mac_X540:
2707 case ixgbe_mac_82599EB:
2708 info->so_timestamping =
2709 SOF_TIMESTAMPING_TX_HARDWARE |
2710 SOF_TIMESTAMPING_RX_HARDWARE |
2711 SOF_TIMESTAMPING_RAW_HARDWARE;
2712
2713 if (adapter->ptp_clock)
2714 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2715 else
2716 info->phc_index = -1;
2717
2718 info->tx_types =
2719 (1 << HWTSTAMP_TX_OFF) |
2720 (1 << HWTSTAMP_TX_ON);
2721
2722 info->rx_filters =
2723 (1 << HWTSTAMP_FILTER_NONE) |
2724 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2725 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2726 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
2727 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
2728 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
2729 (1 << HWTSTAMP_FILTER_SOME);
2730 break;
2731#endif /* CONFIG_IXGBE_PTP */
2732 default:
2733 return ethtool_op_get_ts_info(dev, info);
2734 break;
2735 }
2736 return 0;
2737}
2738
2739static const struct ethtool_ops ixgbe_ethtool_ops = {
2740 .get_settings = ixgbe_get_settings,
2741 .set_settings = ixgbe_set_settings,
2742 .get_drvinfo = ixgbe_get_drvinfo,
2743 .get_regs_len = ixgbe_get_regs_len,
2744 .get_regs = ixgbe_get_regs,
2745 .get_wol = ixgbe_get_wol,
2746 .set_wol = ixgbe_set_wol,
2747 .nway_reset = ixgbe_nway_reset,
2748 .get_link = ethtool_op_get_link,
2749 .get_eeprom_len = ixgbe_get_eeprom_len,
2750 .get_eeprom = ixgbe_get_eeprom,
2751 .set_eeprom = ixgbe_set_eeprom,
2752 .get_ringparam = ixgbe_get_ringparam,
2753 .set_ringparam = ixgbe_set_ringparam,
2754 .get_pauseparam = ixgbe_get_pauseparam,
2755 .set_pauseparam = ixgbe_set_pauseparam,
2756 .get_msglevel = ixgbe_get_msglevel,
2757 .set_msglevel = ixgbe_set_msglevel,
2758 .self_test = ixgbe_diag_test,
2759 .get_strings = ixgbe_get_strings,
2760 .set_phys_id = ixgbe_set_phys_id,
2761 .get_sset_count = ixgbe_get_sset_count,
2762 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2763 .get_coalesce = ixgbe_get_coalesce,
2764 .set_coalesce = ixgbe_set_coalesce,
2765 .get_rxnfc = ixgbe_get_rxnfc,
2766 .set_rxnfc = ixgbe_set_rxnfc,
2767 .get_ts_info = ixgbe_get_ts_info,
2768};
2769
2770void ixgbe_set_ethtool_ops(struct net_device *netdev)
2771{
2772 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
2773}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4/* ethtool support for ixgbe */
5
6#include <linux/interrupt.h>
7#include <linux/types.h>
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/vmalloc.h>
14#include <linux/highmem.h>
15#include <linux/uaccess.h>
16
17#include "ixgbe.h"
18#include "ixgbe_phy.h"
19
20
21#define IXGBE_ALL_RAR_ENTRIES 16
22
23enum {NETDEV_STATS, IXGBE_STATS};
24
25struct ixgbe_stats {
26 char stat_string[ETH_GSTRING_LEN];
27 int type;
28 int sizeof_stat;
29 int stat_offset;
30};
31
32#define IXGBE_STAT(m) IXGBE_STATS, \
33 sizeof(((struct ixgbe_adapter *)0)->m), \
34 offsetof(struct ixgbe_adapter, m)
35#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
36 sizeof(((struct rtnl_link_stats64 *)0)->m), \
37 offsetof(struct rtnl_link_stats64, m)
38
39static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
40 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
41 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
42 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
43 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
44 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
45 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
46 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
47 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
48 {"lsc_int", IXGBE_STAT(lsc_int)},
49 {"tx_busy", IXGBE_STAT(tx_busy)},
50 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
51 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
52 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
53 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
54 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
55 {"multicast", IXGBE_NETDEV_STAT(multicast)},
56 {"broadcast", IXGBE_STAT(stats.bprc)},
57 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
58 {"collisions", IXGBE_NETDEV_STAT(collisions)},
59 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
60 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
61 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
62 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
63 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
64 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
65 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
66 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
67 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
68 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
69 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
70 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
71 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
72 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
73 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
74 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
75 {"rx_length_errors", IXGBE_STAT(stats.rlec)},
76 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
77 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
78 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
79 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
80 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
81 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
82 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
83 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
84 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
85 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
86 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
87 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
88 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
89 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
90 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
91 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
92 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
93 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
94 {"tx_ipsec", IXGBE_STAT(tx_ipsec)},
95 {"rx_ipsec", IXGBE_STAT(rx_ipsec)},
96#ifdef IXGBE_FCOE
97 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
98 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
99 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
100 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
101 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
102 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
103 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
104 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
105#endif /* IXGBE_FCOE */
106};
107
108/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
109 * we set the num_rx_queues to evaluate to num_tx_queues. This is
110 * used because we do not have a good way to get the max number of
111 * rx queues with CONFIG_RPS disabled.
112 */
113#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
114
115#define IXGBE_QUEUE_STATS_LEN ( \
116 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
117 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
118#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
119#define IXGBE_PB_STATS_LEN ( \
120 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
121 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
122 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
123 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
124 / sizeof(u64))
125#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
126 IXGBE_PB_STATS_LEN + \
127 IXGBE_QUEUE_STATS_LEN)
128
129static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
130 "Register test (offline)", "Eeprom test (offline)",
131 "Interrupt test (offline)", "Loopback test (offline)",
132 "Link test (on/offline)"
133};
134#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
135
136static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
137#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
138 "legacy-rx",
139#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
140 "vf-ipsec",
141};
142
143#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
144
145#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
146
147static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
148 struct ethtool_link_ksettings *cmd)
149{
150 if (!ixgbe_isbackplane(hw->phy.media_type)) {
151 ethtool_link_ksettings_add_link_mode(cmd, supported,
152 10000baseT_Full);
153 return;
154 }
155
156 switch (hw->device_id) {
157 case IXGBE_DEV_ID_82598:
158 case IXGBE_DEV_ID_82599_KX4:
159 case IXGBE_DEV_ID_82599_KX4_MEZZ:
160 case IXGBE_DEV_ID_X550EM_X_KX4:
161 ethtool_link_ksettings_add_link_mode
162 (cmd, supported, 10000baseKX4_Full);
163 break;
164 case IXGBE_DEV_ID_82598_BX:
165 case IXGBE_DEV_ID_82599_KR:
166 case IXGBE_DEV_ID_X550EM_X_KR:
167 case IXGBE_DEV_ID_X550EM_X_XFI:
168 ethtool_link_ksettings_add_link_mode
169 (cmd, supported, 10000baseKR_Full);
170 break;
171 default:
172 ethtool_link_ksettings_add_link_mode
173 (cmd, supported, 10000baseKX4_Full);
174 ethtool_link_ksettings_add_link_mode
175 (cmd, supported, 10000baseKR_Full);
176 break;
177 }
178}
179
180static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
181 struct ethtool_link_ksettings *cmd)
182{
183 if (!ixgbe_isbackplane(hw->phy.media_type)) {
184 ethtool_link_ksettings_add_link_mode(cmd, advertising,
185 10000baseT_Full);
186 return;
187 }
188
189 switch (hw->device_id) {
190 case IXGBE_DEV_ID_82598:
191 case IXGBE_DEV_ID_82599_KX4:
192 case IXGBE_DEV_ID_82599_KX4_MEZZ:
193 case IXGBE_DEV_ID_X550EM_X_KX4:
194 ethtool_link_ksettings_add_link_mode
195 (cmd, advertising, 10000baseKX4_Full);
196 break;
197 case IXGBE_DEV_ID_82598_BX:
198 case IXGBE_DEV_ID_82599_KR:
199 case IXGBE_DEV_ID_X550EM_X_KR:
200 case IXGBE_DEV_ID_X550EM_X_XFI:
201 ethtool_link_ksettings_add_link_mode
202 (cmd, advertising, 10000baseKR_Full);
203 break;
204 default:
205 ethtool_link_ksettings_add_link_mode
206 (cmd, advertising, 10000baseKX4_Full);
207 ethtool_link_ksettings_add_link_mode
208 (cmd, advertising, 10000baseKR_Full);
209 break;
210 }
211}
212
213static int ixgbe_get_link_ksettings(struct net_device *netdev,
214 struct ethtool_link_ksettings *cmd)
215{
216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
217 struct ixgbe_hw *hw = &adapter->hw;
218 ixgbe_link_speed supported_link;
219 bool autoneg = false;
220
221 ethtool_link_ksettings_zero_link_mode(cmd, supported);
222 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
223
224 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
225
226 /* set the supported link speeds */
227 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
228 ixgbe_set_supported_10gtypes(hw, cmd);
229 ixgbe_set_advertising_10gtypes(hw, cmd);
230 }
231 if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
232 ethtool_link_ksettings_add_link_mode(cmd, supported,
233 5000baseT_Full);
234
235 if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
236 ethtool_link_ksettings_add_link_mode(cmd, supported,
237 2500baseT_Full);
238
239 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
240 if (ixgbe_isbackplane(hw->phy.media_type)) {
241 ethtool_link_ksettings_add_link_mode(cmd, supported,
242 1000baseKX_Full);
243 ethtool_link_ksettings_add_link_mode(cmd, advertising,
244 1000baseKX_Full);
245 } else {
246 ethtool_link_ksettings_add_link_mode(cmd, supported,
247 1000baseT_Full);
248 ethtool_link_ksettings_add_link_mode(cmd, advertising,
249 1000baseT_Full);
250 }
251 }
252 if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
253 ethtool_link_ksettings_add_link_mode(cmd, supported,
254 100baseT_Full);
255 ethtool_link_ksettings_add_link_mode(cmd, advertising,
256 100baseT_Full);
257 }
258 if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
259 ethtool_link_ksettings_add_link_mode(cmd, supported,
260 10baseT_Full);
261 ethtool_link_ksettings_add_link_mode(cmd, advertising,
262 10baseT_Full);
263 }
264
265 /* set the advertised speeds */
266 if (hw->phy.autoneg_advertised) {
267 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
268 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
269 ethtool_link_ksettings_add_link_mode(cmd, advertising,
270 10baseT_Full);
271 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
272 ethtool_link_ksettings_add_link_mode(cmd, advertising,
273 100baseT_Full);
274 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
275 ixgbe_set_advertising_10gtypes(hw, cmd);
276 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
277 if (ethtool_link_ksettings_test_link_mode
278 (cmd, supported, 1000baseKX_Full))
279 ethtool_link_ksettings_add_link_mode
280 (cmd, advertising, 1000baseKX_Full);
281 else
282 ethtool_link_ksettings_add_link_mode
283 (cmd, advertising, 1000baseT_Full);
284 }
285 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
286 ethtool_link_ksettings_add_link_mode(cmd, advertising,
287 5000baseT_Full);
288 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
289 ethtool_link_ksettings_add_link_mode(cmd, advertising,
290 2500baseT_Full);
291 } else {
292 if (hw->phy.multispeed_fiber && !autoneg) {
293 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
294 ethtool_link_ksettings_add_link_mode
295 (cmd, advertising, 10000baseT_Full);
296 }
297 }
298
299 if (autoneg) {
300 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
301 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
302 cmd->base.autoneg = AUTONEG_ENABLE;
303 } else
304 cmd->base.autoneg = AUTONEG_DISABLE;
305
306 /* Determine the remaining settings based on the PHY type. */
307 switch (adapter->hw.phy.type) {
308 case ixgbe_phy_tn:
309 case ixgbe_phy_aq:
310 case ixgbe_phy_x550em_ext_t:
311 case ixgbe_phy_fw:
312 case ixgbe_phy_cu_unknown:
313 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
314 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
315 cmd->base.port = PORT_TP;
316 break;
317 case ixgbe_phy_qt:
318 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
319 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
320 cmd->base.port = PORT_FIBRE;
321 break;
322 case ixgbe_phy_nl:
323 case ixgbe_phy_sfp_passive_tyco:
324 case ixgbe_phy_sfp_passive_unknown:
325 case ixgbe_phy_sfp_ftl:
326 case ixgbe_phy_sfp_avago:
327 case ixgbe_phy_sfp_intel:
328 case ixgbe_phy_sfp_unknown:
329 case ixgbe_phy_qsfp_passive_unknown:
330 case ixgbe_phy_qsfp_active_unknown:
331 case ixgbe_phy_qsfp_intel:
332 case ixgbe_phy_qsfp_unknown:
333 /* SFP+ devices, further checking needed */
334 switch (adapter->hw.phy.sfp_type) {
335 case ixgbe_sfp_type_da_cu:
336 case ixgbe_sfp_type_da_cu_core0:
337 case ixgbe_sfp_type_da_cu_core1:
338 ethtool_link_ksettings_add_link_mode(cmd, supported,
339 FIBRE);
340 ethtool_link_ksettings_add_link_mode(cmd, advertising,
341 FIBRE);
342 cmd->base.port = PORT_DA;
343 break;
344 case ixgbe_sfp_type_sr:
345 case ixgbe_sfp_type_lr:
346 case ixgbe_sfp_type_srlr_core0:
347 case ixgbe_sfp_type_srlr_core1:
348 case ixgbe_sfp_type_1g_sx_core0:
349 case ixgbe_sfp_type_1g_sx_core1:
350 case ixgbe_sfp_type_1g_lx_core0:
351 case ixgbe_sfp_type_1g_lx_core1:
352 ethtool_link_ksettings_add_link_mode(cmd, supported,
353 FIBRE);
354 ethtool_link_ksettings_add_link_mode(cmd, advertising,
355 FIBRE);
356 cmd->base.port = PORT_FIBRE;
357 break;
358 case ixgbe_sfp_type_not_present:
359 ethtool_link_ksettings_add_link_mode(cmd, supported,
360 FIBRE);
361 ethtool_link_ksettings_add_link_mode(cmd, advertising,
362 FIBRE);
363 cmd->base.port = PORT_NONE;
364 break;
365 case ixgbe_sfp_type_1g_cu_core0:
366 case ixgbe_sfp_type_1g_cu_core1:
367 ethtool_link_ksettings_add_link_mode(cmd, supported,
368 TP);
369 ethtool_link_ksettings_add_link_mode(cmd, advertising,
370 TP);
371 cmd->base.port = PORT_TP;
372 break;
373 case ixgbe_sfp_type_unknown:
374 default:
375 ethtool_link_ksettings_add_link_mode(cmd, supported,
376 FIBRE);
377 ethtool_link_ksettings_add_link_mode(cmd, advertising,
378 FIBRE);
379 cmd->base.port = PORT_OTHER;
380 break;
381 }
382 break;
383 case ixgbe_phy_xaui:
384 ethtool_link_ksettings_add_link_mode(cmd, supported,
385 FIBRE);
386 ethtool_link_ksettings_add_link_mode(cmd, advertising,
387 FIBRE);
388 cmd->base.port = PORT_NONE;
389 break;
390 case ixgbe_phy_unknown:
391 case ixgbe_phy_generic:
392 case ixgbe_phy_sfp_unsupported:
393 default:
394 ethtool_link_ksettings_add_link_mode(cmd, supported,
395 FIBRE);
396 ethtool_link_ksettings_add_link_mode(cmd, advertising,
397 FIBRE);
398 cmd->base.port = PORT_OTHER;
399 break;
400 }
401
402 /* Indicate pause support */
403 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
404
405 switch (hw->fc.requested_mode) {
406 case ixgbe_fc_full:
407 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
408 break;
409 case ixgbe_fc_rx_pause:
410 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
411 ethtool_link_ksettings_add_link_mode(cmd, advertising,
412 Asym_Pause);
413 break;
414 case ixgbe_fc_tx_pause:
415 ethtool_link_ksettings_add_link_mode(cmd, advertising,
416 Asym_Pause);
417 break;
418 default:
419 ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
420 ethtool_link_ksettings_del_link_mode(cmd, advertising,
421 Asym_Pause);
422 }
423
424 if (netif_carrier_ok(netdev)) {
425 switch (adapter->link_speed) {
426 case IXGBE_LINK_SPEED_10GB_FULL:
427 cmd->base.speed = SPEED_10000;
428 break;
429 case IXGBE_LINK_SPEED_5GB_FULL:
430 cmd->base.speed = SPEED_5000;
431 break;
432 case IXGBE_LINK_SPEED_2_5GB_FULL:
433 cmd->base.speed = SPEED_2500;
434 break;
435 case IXGBE_LINK_SPEED_1GB_FULL:
436 cmd->base.speed = SPEED_1000;
437 break;
438 case IXGBE_LINK_SPEED_100_FULL:
439 cmd->base.speed = SPEED_100;
440 break;
441 case IXGBE_LINK_SPEED_10_FULL:
442 cmd->base.speed = SPEED_10;
443 break;
444 default:
445 break;
446 }
447 cmd->base.duplex = DUPLEX_FULL;
448 } else {
449 cmd->base.speed = SPEED_UNKNOWN;
450 cmd->base.duplex = DUPLEX_UNKNOWN;
451 }
452
453 return 0;
454}
455
456static int ixgbe_set_link_ksettings(struct net_device *netdev,
457 const struct ethtool_link_ksettings *cmd)
458{
459 struct ixgbe_adapter *adapter = netdev_priv(netdev);
460 struct ixgbe_hw *hw = &adapter->hw;
461 u32 advertised, old;
462 s32 err = 0;
463
464 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
465 (hw->phy.multispeed_fiber)) {
466 /*
467 * this function does not support duplex forcing, but can
468 * limit the advertising of the adapter to the specified speed
469 */
470 if (!bitmap_subset(cmd->link_modes.advertising,
471 cmd->link_modes.supported,
472 __ETHTOOL_LINK_MODE_MASK_NBITS))
473 return -EINVAL;
474
475 /* only allow one speed at a time if no autoneg */
476 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
477 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
478 10000baseT_Full) &&
479 ethtool_link_ksettings_test_link_mode(cmd, advertising,
480 1000baseT_Full))
481 return -EINVAL;
482 }
483
484 old = hw->phy.autoneg_advertised;
485 advertised = 0;
486 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
487 10000baseT_Full))
488 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
489 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
490 5000baseT_Full))
491 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
492 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
493 2500baseT_Full))
494 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
495 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
496 1000baseT_Full))
497 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
498
499 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
500 100baseT_Full))
501 advertised |= IXGBE_LINK_SPEED_100_FULL;
502
503 if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
504 10baseT_Full))
505 advertised |= IXGBE_LINK_SPEED_10_FULL;
506
507 if (old == advertised)
508 return err;
509 /* this sets the link speed and restarts auto-neg */
510 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
511 usleep_range(1000, 2000);
512
513 hw->mac.autotry_restart = true;
514 err = hw->mac.ops.setup_link(hw, advertised, true);
515 if (err) {
516 e_info(probe, "setup link failed with code %d\n", err);
517 hw->mac.ops.setup_link(hw, old, true);
518 }
519 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
520 } else {
521 /* in this case we currently only support 10Gb/FULL */
522 u32 speed = cmd->base.speed;
523
524 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
525 (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
526 10000baseT_Full)) ||
527 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
528 return -EINVAL;
529 }
530
531 return err;
532}
533
534static void ixgbe_get_pause_stats(struct net_device *netdev,
535 struct ethtool_pause_stats *stats)
536{
537 struct ixgbe_adapter *adapter = netdev_priv(netdev);
538 struct ixgbe_hw_stats *hwstats = &adapter->stats;
539
540 stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
541 stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
542}
543
544static void ixgbe_get_pauseparam(struct net_device *netdev,
545 struct ethtool_pauseparam *pause)
546{
547 struct ixgbe_adapter *adapter = netdev_priv(netdev);
548 struct ixgbe_hw *hw = &adapter->hw;
549
550 if (ixgbe_device_supports_autoneg_fc(hw) &&
551 !hw->fc.disable_fc_autoneg)
552 pause->autoneg = 1;
553 else
554 pause->autoneg = 0;
555
556 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
557 pause->rx_pause = 1;
558 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
559 pause->tx_pause = 1;
560 } else if (hw->fc.current_mode == ixgbe_fc_full) {
561 pause->rx_pause = 1;
562 pause->tx_pause = 1;
563 }
564}
565
566static int ixgbe_set_pauseparam(struct net_device *netdev,
567 struct ethtool_pauseparam *pause)
568{
569 struct ixgbe_adapter *adapter = netdev_priv(netdev);
570 struct ixgbe_hw *hw = &adapter->hw;
571 struct ixgbe_fc_info fc = hw->fc;
572
573 /* 82598 does no support link flow control with DCB enabled */
574 if ((hw->mac.type == ixgbe_mac_82598EB) &&
575 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
576 return -EINVAL;
577
578 /* some devices do not support autoneg of link flow control */
579 if ((pause->autoneg == AUTONEG_ENABLE) &&
580 !ixgbe_device_supports_autoneg_fc(hw))
581 return -EINVAL;
582
583 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
584
585 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
586 fc.requested_mode = ixgbe_fc_full;
587 else if (pause->rx_pause && !pause->tx_pause)
588 fc.requested_mode = ixgbe_fc_rx_pause;
589 else if (!pause->rx_pause && pause->tx_pause)
590 fc.requested_mode = ixgbe_fc_tx_pause;
591 else
592 fc.requested_mode = ixgbe_fc_none;
593
594 /* if the thing changed then we'll update and use new autoneg */
595 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
596 hw->fc = fc;
597 if (netif_running(netdev))
598 ixgbe_reinit_locked(adapter);
599 else
600 ixgbe_reset(adapter);
601 }
602
603 return 0;
604}
605
606static u32 ixgbe_get_msglevel(struct net_device *netdev)
607{
608 struct ixgbe_adapter *adapter = netdev_priv(netdev);
609 return adapter->msg_enable;
610}
611
612static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
613{
614 struct ixgbe_adapter *adapter = netdev_priv(netdev);
615 adapter->msg_enable = data;
616}
617
618static int ixgbe_get_regs_len(struct net_device *netdev)
619{
620#define IXGBE_REGS_LEN 1145
621 return IXGBE_REGS_LEN * sizeof(u32);
622}
623
624#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
625
626static void ixgbe_get_regs(struct net_device *netdev,
627 struct ethtool_regs *regs, void *p)
628{
629 struct ixgbe_adapter *adapter = netdev_priv(netdev);
630 struct ixgbe_hw *hw = &adapter->hw;
631 u32 *regs_buff = p;
632 u8 i;
633
634 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
635
636 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
637 hw->device_id;
638
639 /* General Registers */
640 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
641 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
642 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
643 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
644 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
645 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
646 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
647 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
648
649 /* NVM Register */
650 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
651 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
652 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
653 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
654 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
655 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
656 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
657 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
658 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
659 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
660
661 /* Interrupt */
662 /* don't read EICR because it can clear interrupt causes, instead
663 * read EICS which is a shadow but doesn't clear EICR */
664 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
665 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
666 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
667 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
668 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
669 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
670 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
671 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
672 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
673 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
674 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
675 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
676
677 /* Flow Control */
678 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
679 for (i = 0; i < 4; i++)
680 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
681 for (i = 0; i < 8; i++) {
682 switch (hw->mac.type) {
683 case ixgbe_mac_82598EB:
684 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
685 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
686 break;
687 case ixgbe_mac_82599EB:
688 case ixgbe_mac_X540:
689 case ixgbe_mac_X550:
690 case ixgbe_mac_X550EM_x:
691 case ixgbe_mac_x550em_a:
692 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
693 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
694 break;
695 default:
696 break;
697 }
698 }
699 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
700 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
701
702 /* Receive DMA */
703 for (i = 0; i < 64; i++)
704 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
705 for (i = 0; i < 64; i++)
706 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
707 for (i = 0; i < 64; i++)
708 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
709 for (i = 0; i < 64; i++)
710 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
711 for (i = 0; i < 64; i++)
712 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
713 for (i = 0; i < 64; i++)
714 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
715 for (i = 0; i < 16; i++)
716 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
717 for (i = 0; i < 16; i++)
718 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
719 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
720 for (i = 0; i < 8; i++)
721 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
722 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
723 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
724
725 /* Receive */
726 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
727 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
728 for (i = 0; i < 16; i++)
729 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
730 for (i = 0; i < 16; i++)
731 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
732 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
733 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
734 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
735 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
736 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
737 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
738 for (i = 0; i < 8; i++)
739 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
740 for (i = 0; i < 8; i++)
741 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
742 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
743
744 /* Transmit */
745 for (i = 0; i < 32; i++)
746 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
747 for (i = 0; i < 32; i++)
748 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
749 for (i = 0; i < 32; i++)
750 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
751 for (i = 0; i < 32; i++)
752 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
753 for (i = 0; i < 32; i++)
754 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
755 for (i = 0; i < 32; i++)
756 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
757 for (i = 0; i < 32; i++)
758 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
759 for (i = 0; i < 32; i++)
760 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
761 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
762 for (i = 0; i < 16; i++)
763 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
764 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
765 for (i = 0; i < 8; i++)
766 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
767 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
768
769 /* Wake Up */
770 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
771 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
772 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
773 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
774 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
775 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
776 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
777 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
778 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
779
780 /* DCB */
781 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
782 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
783
784 switch (hw->mac.type) {
785 case ixgbe_mac_82598EB:
786 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
787 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
788 for (i = 0; i < 8; i++)
789 regs_buff[833 + i] =
790 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
791 for (i = 0; i < 8; i++)
792 regs_buff[841 + i] =
793 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
794 for (i = 0; i < 8; i++)
795 regs_buff[849 + i] =
796 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
797 for (i = 0; i < 8; i++)
798 regs_buff[857 + i] =
799 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
800 break;
801 case ixgbe_mac_82599EB:
802 case ixgbe_mac_X540:
803 case ixgbe_mac_X550:
804 case ixgbe_mac_X550EM_x:
805 case ixgbe_mac_x550em_a:
806 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
807 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
808 for (i = 0; i < 8; i++)
809 regs_buff[833 + i] =
810 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
811 for (i = 0; i < 8; i++)
812 regs_buff[841 + i] =
813 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
814 for (i = 0; i < 8; i++)
815 regs_buff[849 + i] =
816 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
817 for (i = 0; i < 8; i++)
818 regs_buff[857 + i] =
819 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
820 break;
821 default:
822 break;
823 }
824
825 for (i = 0; i < 8; i++)
826 regs_buff[865 + i] =
827 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
828 for (i = 0; i < 8; i++)
829 regs_buff[873 + i] =
830 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
831
832 /* Statistics */
833 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
834 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
835 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
836 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
837 for (i = 0; i < 8; i++)
838 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
839 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
840 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
841 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
842 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
843 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
844 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
845 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
846 for (i = 0; i < 8; i++)
847 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
848 for (i = 0; i < 8; i++)
849 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
850 for (i = 0; i < 8; i++)
851 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
852 for (i = 0; i < 8; i++)
853 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
854 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
855 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
856 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
857 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
858 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
859 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
860 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
861 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
862 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
863 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
864 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
865 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
866 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
867 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
868 for (i = 0; i < 8; i++)
869 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
870 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
871 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
872 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
873 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
874 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
875 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
876 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
877 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
878 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
879 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
880 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
881 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
882 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
883 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
884 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
885 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
886 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
887 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
888 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
889 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
890 for (i = 0; i < 16; i++)
891 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
892 for (i = 0; i < 16; i++)
893 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
894 for (i = 0; i < 16; i++)
895 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
896 for (i = 0; i < 16; i++)
897 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
898
899 /* MAC */
900 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
901 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
902 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
903 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
904 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
905 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
906 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
907 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
908 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
909 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
910 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
911 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
912 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
913 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
914 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
915 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
916 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
917 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
918 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
919 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
920 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
921 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
922 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
923 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
924 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
925 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
926 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
927 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
928 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
929 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
930 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
931 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
932 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
933
934 /* Diagnostic */
935 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
936 for (i = 0; i < 8; i++)
937 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
938 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
939 for (i = 0; i < 4; i++)
940 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
941 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
942 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
943 for (i = 0; i < 8; i++)
944 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
945 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
946 for (i = 0; i < 4; i++)
947 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
948 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
949 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
950 for (i = 0; i < 4; i++)
951 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
952 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
953 for (i = 0; i < 4; i++)
954 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
955 for (i = 0; i < 8; i++)
956 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
957 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
958 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
959 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
960 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
961 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
962 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
963 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
964 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
965 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
966
967 /* 82599 X540 specific registers */
968 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
969
970 /* 82599 X540 specific DCB registers */
971 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
972 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
973 for (i = 0; i < 4; i++)
974 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
975 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
976 /* same as RTTQCNRM */
977 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
978 /* same as RTTQCNRR */
979
980 /* X540 specific DCB registers */
981 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
982 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
983
984 /* Security config registers */
985 regs_buff[1139] = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
986 regs_buff[1140] = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
987 regs_buff[1141] = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
988 regs_buff[1142] = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
989 regs_buff[1143] = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
990 regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
991}
992
993static int ixgbe_get_eeprom_len(struct net_device *netdev)
994{
995 struct ixgbe_adapter *adapter = netdev_priv(netdev);
996 return adapter->hw.eeprom.word_size * 2;
997}
998
999static int ixgbe_get_eeprom(struct net_device *netdev,
1000 struct ethtool_eeprom *eeprom, u8 *bytes)
1001{
1002 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1003 struct ixgbe_hw *hw = &adapter->hw;
1004 u16 *eeprom_buff;
1005 int first_word, last_word, eeprom_len;
1006 int ret_val = 0;
1007 u16 i;
1008
1009 if (eeprom->len == 0)
1010 return -EINVAL;
1011
1012 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1013
1014 first_word = eeprom->offset >> 1;
1015 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1016 eeprom_len = last_word - first_word + 1;
1017
1018 eeprom_buff = kmalloc_array(eeprom_len, sizeof(u16), GFP_KERNEL);
1019 if (!eeprom_buff)
1020 return -ENOMEM;
1021
1022 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
1023 eeprom_buff);
1024
1025 /* Device's eeprom is always little-endian, word addressable */
1026 for (i = 0; i < eeprom_len; i++)
1027 le16_to_cpus(&eeprom_buff[i]);
1028
1029 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1030 kfree(eeprom_buff);
1031
1032 return ret_val;
1033}
1034
1035static int ixgbe_set_eeprom(struct net_device *netdev,
1036 struct ethtool_eeprom *eeprom, u8 *bytes)
1037{
1038 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1039 struct ixgbe_hw *hw = &adapter->hw;
1040 u16 *eeprom_buff;
1041 void *ptr;
1042 int max_len, first_word, last_word, ret_val = 0;
1043 u16 i;
1044
1045 if (eeprom->len == 0)
1046 return -EINVAL;
1047
1048 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
1049 return -EINVAL;
1050
1051 max_len = hw->eeprom.word_size * 2;
1052
1053 first_word = eeprom->offset >> 1;
1054 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1055 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1056 if (!eeprom_buff)
1057 return -ENOMEM;
1058
1059 ptr = eeprom_buff;
1060
1061 if (eeprom->offset & 1) {
1062 /*
1063 * need read/modify/write of first changed EEPROM word
1064 * only the second byte of the word is being modified
1065 */
1066 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
1067 if (ret_val)
1068 goto err;
1069
1070 ptr++;
1071 }
1072 if ((eeprom->offset + eeprom->len) & 1) {
1073 /*
1074 * need read/modify/write of last changed EEPROM word
1075 * only the first byte of the word is being modified
1076 */
1077 ret_val = hw->eeprom.ops.read(hw, last_word,
1078 &eeprom_buff[last_word - first_word]);
1079 if (ret_val)
1080 goto err;
1081 }
1082
1083 /* Device's eeprom is always little-endian, word addressable */
1084 for (i = 0; i < last_word - first_word + 1; i++)
1085 le16_to_cpus(&eeprom_buff[i]);
1086
1087 memcpy(ptr, bytes, eeprom->len);
1088
1089 for (i = 0; i < last_word - first_word + 1; i++)
1090 cpu_to_le16s(&eeprom_buff[i]);
1091
1092 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1093 last_word - first_word + 1,
1094 eeprom_buff);
1095
1096 /* Update the checksum */
1097 if (ret_val == 0)
1098 hw->eeprom.ops.update_checksum(hw);
1099
1100err:
1101 kfree(eeprom_buff);
1102 return ret_val;
1103}
1104
1105static void ixgbe_get_drvinfo(struct net_device *netdev,
1106 struct ethtool_drvinfo *drvinfo)
1107{
1108 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1109
1110 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1111
1112 strlcpy(drvinfo->fw_version, adapter->eeprom_id,
1113 sizeof(drvinfo->fw_version));
1114
1115 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1116 sizeof(drvinfo->bus_info));
1117
1118 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1119}
1120
1121static void ixgbe_get_ringparam(struct net_device *netdev,
1122 struct ethtool_ringparam *ring)
1123{
1124 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1125 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1126 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1127
1128 ring->rx_max_pending = IXGBE_MAX_RXD;
1129 ring->tx_max_pending = IXGBE_MAX_TXD;
1130 ring->rx_pending = rx_ring->count;
1131 ring->tx_pending = tx_ring->count;
1132}
1133
1134static int ixgbe_set_ringparam(struct net_device *netdev,
1135 struct ethtool_ringparam *ring)
1136{
1137 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1138 struct ixgbe_ring *temp_ring;
1139 int i, j, err = 0;
1140 u32 new_rx_count, new_tx_count;
1141
1142 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1143 return -EINVAL;
1144
1145 new_tx_count = clamp_t(u32, ring->tx_pending,
1146 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1147 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1148
1149 new_rx_count = clamp_t(u32, ring->rx_pending,
1150 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1151 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1152
1153 if ((new_tx_count == adapter->tx_ring_count) &&
1154 (new_rx_count == adapter->rx_ring_count)) {
1155 /* nothing to do */
1156 return 0;
1157 }
1158
1159 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1160 usleep_range(1000, 2000);
1161
1162 if (!netif_running(adapter->netdev)) {
1163 for (i = 0; i < adapter->num_tx_queues; i++)
1164 adapter->tx_ring[i]->count = new_tx_count;
1165 for (i = 0; i < adapter->num_xdp_queues; i++)
1166 adapter->xdp_ring[i]->count = new_tx_count;
1167 for (i = 0; i < adapter->num_rx_queues; i++)
1168 adapter->rx_ring[i]->count = new_rx_count;
1169 adapter->tx_ring_count = new_tx_count;
1170 adapter->xdp_ring_count = new_tx_count;
1171 adapter->rx_ring_count = new_rx_count;
1172 goto clear_reset;
1173 }
1174
1175 /* allocate temporary buffer to store rings in */
1176 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1177 adapter->num_rx_queues);
1178 temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
1179
1180 if (!temp_ring) {
1181 err = -ENOMEM;
1182 goto clear_reset;
1183 }
1184
1185 ixgbe_down(adapter);
1186
1187 /*
1188 * Setup new Tx resources and free the old Tx resources in that order.
1189 * We can then assign the new resources to the rings via a memcpy.
1190 * The advantage to this approach is that we are guaranteed to still
1191 * have resources even in the case of an allocation failure.
1192 */
1193 if (new_tx_count != adapter->tx_ring_count) {
1194 for (i = 0; i < adapter->num_tx_queues; i++) {
1195 memcpy(&temp_ring[i], adapter->tx_ring[i],
1196 sizeof(struct ixgbe_ring));
1197
1198 temp_ring[i].count = new_tx_count;
1199 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1200 if (err) {
1201 while (i) {
1202 i--;
1203 ixgbe_free_tx_resources(&temp_ring[i]);
1204 }
1205 goto err_setup;
1206 }
1207 }
1208
1209 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1210 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1211 sizeof(struct ixgbe_ring));
1212
1213 temp_ring[i].count = new_tx_count;
1214 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1215 if (err) {
1216 while (i) {
1217 i--;
1218 ixgbe_free_tx_resources(&temp_ring[i]);
1219 }
1220 goto err_setup;
1221 }
1222 }
1223
1224 for (i = 0; i < adapter->num_tx_queues; i++) {
1225 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1226
1227 memcpy(adapter->tx_ring[i], &temp_ring[i],
1228 sizeof(struct ixgbe_ring));
1229 }
1230 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1231 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1232
1233 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1234 sizeof(struct ixgbe_ring));
1235 }
1236
1237 adapter->tx_ring_count = new_tx_count;
1238 }
1239
1240 /* Repeat the process for the Rx rings if needed */
1241 if (new_rx_count != adapter->rx_ring_count) {
1242 for (i = 0; i < adapter->num_rx_queues; i++) {
1243 memcpy(&temp_ring[i], adapter->rx_ring[i],
1244 sizeof(struct ixgbe_ring));
1245
1246 /* Clear copied XDP RX-queue info */
1247 memset(&temp_ring[i].xdp_rxq, 0,
1248 sizeof(temp_ring[i].xdp_rxq));
1249
1250 temp_ring[i].count = new_rx_count;
1251 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1252 if (err) {
1253 while (i) {
1254 i--;
1255 ixgbe_free_rx_resources(&temp_ring[i]);
1256 }
1257 goto err_setup;
1258 }
1259
1260 }
1261
1262 for (i = 0; i < adapter->num_rx_queues; i++) {
1263 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1264
1265 memcpy(adapter->rx_ring[i], &temp_ring[i],
1266 sizeof(struct ixgbe_ring));
1267 }
1268
1269 adapter->rx_ring_count = new_rx_count;
1270 }
1271
1272err_setup:
1273 ixgbe_up(adapter);
1274 vfree(temp_ring);
1275clear_reset:
1276 clear_bit(__IXGBE_RESETTING, &adapter->state);
1277 return err;
1278}
1279
1280static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1281{
1282 switch (sset) {
1283 case ETH_SS_TEST:
1284 return IXGBE_TEST_LEN;
1285 case ETH_SS_STATS:
1286 return IXGBE_STATS_LEN;
1287 case ETH_SS_PRIV_FLAGS:
1288 return IXGBE_PRIV_FLAGS_STR_LEN;
1289 default:
1290 return -EOPNOTSUPP;
1291 }
1292}
1293
1294static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1295 struct ethtool_stats *stats, u64 *data)
1296{
1297 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1298 struct rtnl_link_stats64 temp;
1299 const struct rtnl_link_stats64 *net_stats;
1300 unsigned int start;
1301 struct ixgbe_ring *ring;
1302 int i, j;
1303 char *p = NULL;
1304
1305 ixgbe_update_stats(adapter);
1306 net_stats = dev_get_stats(netdev, &temp);
1307 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1308 switch (ixgbe_gstrings_stats[i].type) {
1309 case NETDEV_STATS:
1310 p = (char *) net_stats +
1311 ixgbe_gstrings_stats[i].stat_offset;
1312 break;
1313 case IXGBE_STATS:
1314 p = (char *) adapter +
1315 ixgbe_gstrings_stats[i].stat_offset;
1316 break;
1317 default:
1318 data[i] = 0;
1319 continue;
1320 }
1321
1322 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1323 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1324 }
1325 for (j = 0; j < netdev->num_tx_queues; j++) {
1326 ring = adapter->tx_ring[j];
1327 if (!ring) {
1328 data[i] = 0;
1329 data[i+1] = 0;
1330 i += 2;
1331 continue;
1332 }
1333
1334 do {
1335 start = u64_stats_fetch_begin_irq(&ring->syncp);
1336 data[i] = ring->stats.packets;
1337 data[i+1] = ring->stats.bytes;
1338 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1339 i += 2;
1340 }
1341 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1342 ring = adapter->rx_ring[j];
1343 if (!ring) {
1344 data[i] = 0;
1345 data[i+1] = 0;
1346 i += 2;
1347 continue;
1348 }
1349
1350 do {
1351 start = u64_stats_fetch_begin_irq(&ring->syncp);
1352 data[i] = ring->stats.packets;
1353 data[i+1] = ring->stats.bytes;
1354 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1355 i += 2;
1356 }
1357
1358 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1359 data[i++] = adapter->stats.pxontxc[j];
1360 data[i++] = adapter->stats.pxofftxc[j];
1361 }
1362 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1363 data[i++] = adapter->stats.pxonrxc[j];
1364 data[i++] = adapter->stats.pxoffrxc[j];
1365 }
1366}
1367
1368static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1369 u8 *data)
1370{
1371 unsigned int i;
1372 u8 *p = data;
1373
1374 switch (stringset) {
1375 case ETH_SS_TEST:
1376 for (i = 0; i < IXGBE_TEST_LEN; i++)
1377 ethtool_sprintf(&p, ixgbe_gstrings_test[i]);
1378 break;
1379 case ETH_SS_STATS:
1380 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
1381 ethtool_sprintf(&p,
1382 ixgbe_gstrings_stats[i].stat_string);
1383 for (i = 0; i < netdev->num_tx_queues; i++) {
1384 ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1385 ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1386 }
1387 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1388 ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1389 ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1390 }
1391 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1392 ethtool_sprintf(&p, "tx_pb_%u_pxon", i);
1393 ethtool_sprintf(&p, "tx_pb_%u_pxoff", i);
1394 }
1395 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1396 ethtool_sprintf(&p, "rx_pb_%u_pxon", i);
1397 ethtool_sprintf(&p, "rx_pb_%u_pxoff", i);
1398 }
1399 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1400 break;
1401 case ETH_SS_PRIV_FLAGS:
1402 memcpy(data, ixgbe_priv_flags_strings,
1403 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1404 }
1405}
1406
1407static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1408{
1409 struct ixgbe_hw *hw = &adapter->hw;
1410 bool link_up;
1411 u32 link_speed = 0;
1412
1413 if (ixgbe_removed(hw->hw_addr)) {
1414 *data = 1;
1415 return 1;
1416 }
1417 *data = 0;
1418
1419 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1420 if (link_up)
1421 return *data;
1422 else
1423 *data = 1;
1424 return *data;
1425}
1426
1427/* ethtool register test data */
1428struct ixgbe_reg_test {
1429 u16 reg;
1430 u8 array_len;
1431 u8 test_type;
1432 u32 mask;
1433 u32 write;
1434};
1435
1436/* In the hardware, registers are laid out either singly, in arrays
1437 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1438 * most tests take place on arrays or single registers (handled
1439 * as a single-element array) and special-case the tables.
1440 * Table tests are always pattern tests.
1441 *
1442 * We also make provision for some required setup steps by specifying
1443 * registers to be written without any read-back testing.
1444 */
1445
1446#define PATTERN_TEST 1
1447#define SET_READ_TEST 2
1448#define WRITE_NO_TEST 3
1449#define TABLE32_TEST 4
1450#define TABLE64_TEST_LO 5
1451#define TABLE64_TEST_HI 6
1452
1453/* default 82599 register test */
1454static const struct ixgbe_reg_test reg_test_82599[] = {
1455 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1456 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1457 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1458 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1459 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1460 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1461 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1462 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1463 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1464 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1465 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1466 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1467 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1468 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1469 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1470 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1471 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1472 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1473 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1474 { .reg = 0 }
1475};
1476
1477/* default 82598 register test */
1478static const struct ixgbe_reg_test reg_test_82598[] = {
1479 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1480 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1481 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1482 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1483 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1484 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1485 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1486 /* Enable all four RX queues before testing. */
1487 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1488 /* RDH is read-only for 82598, only test RDT. */
1489 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1490 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1491 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1492 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1493 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1494 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1495 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1496 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1497 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1498 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1499 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1500 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1501 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1502 { .reg = 0 }
1503};
1504
1505static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1506 u32 mask, u32 write)
1507{
1508 u32 pat, val, before;
1509 static const u32 test_pattern[] = {
1510 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1511
1512 if (ixgbe_removed(adapter->hw.hw_addr)) {
1513 *data = 1;
1514 return true;
1515 }
1516 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1517 before = ixgbe_read_reg(&adapter->hw, reg);
1518 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1519 val = ixgbe_read_reg(&adapter->hw, reg);
1520 if (val != (test_pattern[pat] & write & mask)) {
1521 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1522 reg, val, (test_pattern[pat] & write & mask));
1523 *data = reg;
1524 ixgbe_write_reg(&adapter->hw, reg, before);
1525 return true;
1526 }
1527 ixgbe_write_reg(&adapter->hw, reg, before);
1528 }
1529 return false;
1530}
1531
1532static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1533 u32 mask, u32 write)
1534{
1535 u32 val, before;
1536
1537 if (ixgbe_removed(adapter->hw.hw_addr)) {
1538 *data = 1;
1539 return true;
1540 }
1541 before = ixgbe_read_reg(&adapter->hw, reg);
1542 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1543 val = ixgbe_read_reg(&adapter->hw, reg);
1544 if ((write & mask) != (val & mask)) {
1545 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1546 reg, (val & mask), (write & mask));
1547 *data = reg;
1548 ixgbe_write_reg(&adapter->hw, reg, before);
1549 return true;
1550 }
1551 ixgbe_write_reg(&adapter->hw, reg, before);
1552 return false;
1553}
1554
1555static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1556{
1557 const struct ixgbe_reg_test *test;
1558 u32 value, before, after;
1559 u32 i, toggle;
1560
1561 if (ixgbe_removed(adapter->hw.hw_addr)) {
1562 e_err(drv, "Adapter removed - register test blocked\n");
1563 *data = 1;
1564 return 1;
1565 }
1566 switch (adapter->hw.mac.type) {
1567 case ixgbe_mac_82598EB:
1568 toggle = 0x7FFFF3FF;
1569 test = reg_test_82598;
1570 break;
1571 case ixgbe_mac_82599EB:
1572 case ixgbe_mac_X540:
1573 case ixgbe_mac_X550:
1574 case ixgbe_mac_X550EM_x:
1575 case ixgbe_mac_x550em_a:
1576 toggle = 0x7FFFF30F;
1577 test = reg_test_82599;
1578 break;
1579 default:
1580 *data = 1;
1581 return 1;
1582 }
1583
1584 /*
1585 * Because the status register is such a special case,
1586 * we handle it separately from the rest of the register
1587 * tests. Some bits are read-only, some toggle, and some
1588 * are writeable on newer MACs.
1589 */
1590 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1591 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1592 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1593 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1594 if (value != after) {
1595 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1596 after, value);
1597 *data = 1;
1598 return 1;
1599 }
1600 /* restore previous status */
1601 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1602
1603 /*
1604 * Perform the remainder of the register test, looping through
1605 * the test table until we either fail or reach the null entry.
1606 */
1607 while (test->reg) {
1608 for (i = 0; i < test->array_len; i++) {
1609 bool b = false;
1610
1611 switch (test->test_type) {
1612 case PATTERN_TEST:
1613 b = reg_pattern_test(adapter, data,
1614 test->reg + (i * 0x40),
1615 test->mask,
1616 test->write);
1617 break;
1618 case SET_READ_TEST:
1619 b = reg_set_and_check(adapter, data,
1620 test->reg + (i * 0x40),
1621 test->mask,
1622 test->write);
1623 break;
1624 case WRITE_NO_TEST:
1625 ixgbe_write_reg(&adapter->hw,
1626 test->reg + (i * 0x40),
1627 test->write);
1628 break;
1629 case TABLE32_TEST:
1630 b = reg_pattern_test(adapter, data,
1631 test->reg + (i * 4),
1632 test->mask,
1633 test->write);
1634 break;
1635 case TABLE64_TEST_LO:
1636 b = reg_pattern_test(adapter, data,
1637 test->reg + (i * 8),
1638 test->mask,
1639 test->write);
1640 break;
1641 case TABLE64_TEST_HI:
1642 b = reg_pattern_test(adapter, data,
1643 (test->reg + 4) + (i * 8),
1644 test->mask,
1645 test->write);
1646 break;
1647 }
1648 if (b)
1649 return 1;
1650 }
1651 test++;
1652 }
1653
1654 *data = 0;
1655 return 0;
1656}
1657
1658static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1659{
1660 struct ixgbe_hw *hw = &adapter->hw;
1661 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1662 *data = 1;
1663 else
1664 *data = 0;
1665 return *data;
1666}
1667
1668static irqreturn_t ixgbe_test_intr(int irq, void *data)
1669{
1670 struct net_device *netdev = (struct net_device *) data;
1671 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1672
1673 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1674
1675 return IRQ_HANDLED;
1676}
1677
1678static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1679{
1680 struct net_device *netdev = adapter->netdev;
1681 u32 mask, i = 0, shared_int = true;
1682 u32 irq = adapter->pdev->irq;
1683
1684 *data = 0;
1685
1686 /* Hook up test interrupt handler just for this test */
1687 if (adapter->msix_entries) {
1688 /* NOTE: we don't test MSI-X interrupts here, yet */
1689 return 0;
1690 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1691 shared_int = false;
1692 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1693 netdev)) {
1694 *data = 1;
1695 return -1;
1696 }
1697 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1698 netdev->name, netdev)) {
1699 shared_int = false;
1700 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1701 netdev->name, netdev)) {
1702 *data = 1;
1703 return -1;
1704 }
1705 e_info(hw, "testing %s interrupt\n", shared_int ?
1706 "shared" : "unshared");
1707
1708 /* Disable all the interrupts */
1709 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1710 IXGBE_WRITE_FLUSH(&adapter->hw);
1711 usleep_range(10000, 20000);
1712
1713 /* Test each interrupt */
1714 for (; i < 10; i++) {
1715 /* Interrupt to test */
1716 mask = BIT(i);
1717
1718 if (!shared_int) {
1719 /*
1720 * Disable the interrupts to be reported in
1721 * the cause register and then force the same
1722 * interrupt and see if one gets posted. If
1723 * an interrupt was posted to the bus, the
1724 * test failed.
1725 */
1726 adapter->test_icr = 0;
1727 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1728 ~mask & 0x00007FFF);
1729 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1730 ~mask & 0x00007FFF);
1731 IXGBE_WRITE_FLUSH(&adapter->hw);
1732 usleep_range(10000, 20000);
1733
1734 if (adapter->test_icr & mask) {
1735 *data = 3;
1736 break;
1737 }
1738 }
1739
1740 /*
1741 * Enable the interrupt to be reported in the cause
1742 * register and then force the same interrupt and see
1743 * if one gets posted. If an interrupt was not posted
1744 * to the bus, the test failed.
1745 */
1746 adapter->test_icr = 0;
1747 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1749 IXGBE_WRITE_FLUSH(&adapter->hw);
1750 usleep_range(10000, 20000);
1751
1752 if (!(adapter->test_icr & mask)) {
1753 *data = 4;
1754 break;
1755 }
1756
1757 if (!shared_int) {
1758 /*
1759 * Disable the other interrupts to be reported in
1760 * the cause register and then force the other
1761 * interrupts and see if any get posted. If
1762 * an interrupt was posted to the bus, the
1763 * test failed.
1764 */
1765 adapter->test_icr = 0;
1766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1767 ~mask & 0x00007FFF);
1768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1769 ~mask & 0x00007FFF);
1770 IXGBE_WRITE_FLUSH(&adapter->hw);
1771 usleep_range(10000, 20000);
1772
1773 if (adapter->test_icr) {
1774 *data = 5;
1775 break;
1776 }
1777 }
1778 }
1779
1780 /* Disable all the interrupts */
1781 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1782 IXGBE_WRITE_FLUSH(&adapter->hw);
1783 usleep_range(10000, 20000);
1784
1785 /* Unhook test interrupt handler */
1786 free_irq(irq, netdev);
1787
1788 return *data;
1789}
1790
1791static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1792{
1793 /* Shut down the DMA engines now so they can be reinitialized later,
1794 * since the test rings and normally used rings should overlap on
1795 * queue 0 we can just use the standard disable Rx/Tx calls and they
1796 * will take care of disabling the test rings for us.
1797 */
1798
1799 /* first Rx */
1800 ixgbe_disable_rx(adapter);
1801
1802 /* now Tx */
1803 ixgbe_disable_tx(adapter);
1804
1805 ixgbe_reset(adapter);
1806
1807 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1808 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1809}
1810
1811static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1812{
1813 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1814 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1815 struct ixgbe_hw *hw = &adapter->hw;
1816 u32 rctl, reg_data;
1817 int ret_val;
1818 int err;
1819
1820 /* Setup Tx descriptor ring and Tx buffers */
1821 tx_ring->count = IXGBE_DEFAULT_TXD;
1822 tx_ring->queue_index = 0;
1823 tx_ring->dev = &adapter->pdev->dev;
1824 tx_ring->netdev = adapter->netdev;
1825 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1826
1827 err = ixgbe_setup_tx_resources(tx_ring);
1828 if (err)
1829 return 1;
1830
1831 switch (adapter->hw.mac.type) {
1832 case ixgbe_mac_82599EB:
1833 case ixgbe_mac_X540:
1834 case ixgbe_mac_X550:
1835 case ixgbe_mac_X550EM_x:
1836 case ixgbe_mac_x550em_a:
1837 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1838 reg_data |= IXGBE_DMATXCTL_TE;
1839 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1840 break;
1841 default:
1842 break;
1843 }
1844
1845 ixgbe_configure_tx_ring(adapter, tx_ring);
1846
1847 /* Setup Rx Descriptor ring and Rx buffers */
1848 rx_ring->count = IXGBE_DEFAULT_RXD;
1849 rx_ring->queue_index = 0;
1850 rx_ring->dev = &adapter->pdev->dev;
1851 rx_ring->netdev = adapter->netdev;
1852 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1853
1854 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1855 if (err) {
1856 ret_val = 4;
1857 goto err_nomem;
1858 }
1859
1860 hw->mac.ops.disable_rx(hw);
1861
1862 ixgbe_configure_rx_ring(adapter, rx_ring);
1863
1864 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1865 rctl |= IXGBE_RXCTRL_DMBYPS;
1866 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1867
1868 hw->mac.ops.enable_rx(hw);
1869
1870 return 0;
1871
1872err_nomem:
1873 ixgbe_free_desc_rings(adapter);
1874 return ret_val;
1875}
1876
1877static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1878{
1879 struct ixgbe_hw *hw = &adapter->hw;
1880 u32 reg_data;
1881
1882
1883 /* Setup MAC loopback */
1884 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1885 reg_data |= IXGBE_HLREG0_LPBK;
1886 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1887
1888 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1889 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1890 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1891
1892 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1893 switch (adapter->hw.mac.type) {
1894 case ixgbe_mac_X540:
1895 case ixgbe_mac_X550:
1896 case ixgbe_mac_X550EM_x:
1897 case ixgbe_mac_x550em_a:
1898 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1899 reg_data |= IXGBE_MACC_FLU;
1900 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1901 break;
1902 default:
1903 if (hw->mac.orig_autoc) {
1904 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1905 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1906 } else {
1907 return 10;
1908 }
1909 }
1910 IXGBE_WRITE_FLUSH(hw);
1911 usleep_range(10000, 20000);
1912
1913 /* Disable Atlas Tx lanes; re-enabled in reset path */
1914 if (hw->mac.type == ixgbe_mac_82598EB) {
1915 u8 atlas;
1916
1917 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1918 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1919 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1920
1921 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1922 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1923 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1924
1925 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1926 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1927 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1928
1929 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1930 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1931 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1932 }
1933
1934 return 0;
1935}
1936
1937static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1938{
1939 u32 reg_data;
1940
1941 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1942 reg_data &= ~IXGBE_HLREG0_LPBK;
1943 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1944}
1945
1946static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1947 unsigned int frame_size)
1948{
1949 memset(skb->data, 0xFF, frame_size);
1950 frame_size >>= 1;
1951 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1952 skb->data[frame_size + 10] = 0xBE;
1953 skb->data[frame_size + 12] = 0xAF;
1954}
1955
1956static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1957 unsigned int frame_size)
1958{
1959 unsigned char *data;
1960 bool match = true;
1961
1962 frame_size >>= 1;
1963
1964 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1965
1966 if (data[3] != 0xFF ||
1967 data[frame_size + 10] != 0xBE ||
1968 data[frame_size + 12] != 0xAF)
1969 match = false;
1970
1971 kunmap(rx_buffer->page);
1972
1973 return match;
1974}
1975
1976static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1977 struct ixgbe_ring *tx_ring,
1978 unsigned int size)
1979{
1980 union ixgbe_adv_rx_desc *rx_desc;
1981 u16 rx_ntc, tx_ntc, count = 0;
1982
1983 /* initialize next to clean and descriptor values */
1984 rx_ntc = rx_ring->next_to_clean;
1985 tx_ntc = tx_ring->next_to_clean;
1986 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1987
1988 while (tx_ntc != tx_ring->next_to_use) {
1989 union ixgbe_adv_tx_desc *tx_desc;
1990 struct ixgbe_tx_buffer *tx_buffer;
1991
1992 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
1993
1994 /* if DD is not set transmit has not completed */
1995 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1996 return count;
1997
1998 /* unmap buffer on Tx side */
1999 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
2000
2001 /* Free all the Tx ring sk_buffs */
2002 dev_kfree_skb_any(tx_buffer->skb);
2003
2004 /* unmap skb header data */
2005 dma_unmap_single(tx_ring->dev,
2006 dma_unmap_addr(tx_buffer, dma),
2007 dma_unmap_len(tx_buffer, len),
2008 DMA_TO_DEVICE);
2009 dma_unmap_len_set(tx_buffer, len, 0);
2010
2011 /* increment Tx next to clean counter */
2012 tx_ntc++;
2013 if (tx_ntc == tx_ring->count)
2014 tx_ntc = 0;
2015 }
2016
2017 while (rx_desc->wb.upper.length) {
2018 struct ixgbe_rx_buffer *rx_buffer;
2019
2020 /* check Rx buffer */
2021 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
2022
2023 /* sync Rx buffer for CPU read */
2024 dma_sync_single_for_cpu(rx_ring->dev,
2025 rx_buffer->dma,
2026 ixgbe_rx_bufsz(rx_ring),
2027 DMA_FROM_DEVICE);
2028
2029 /* verify contents of skb */
2030 if (ixgbe_check_lbtest_frame(rx_buffer, size))
2031 count++;
2032 else
2033 break;
2034
2035 /* sync Rx buffer for device write */
2036 dma_sync_single_for_device(rx_ring->dev,
2037 rx_buffer->dma,
2038 ixgbe_rx_bufsz(rx_ring),
2039 DMA_FROM_DEVICE);
2040
2041 /* increment Rx next to clean counter */
2042 rx_ntc++;
2043 if (rx_ntc == rx_ring->count)
2044 rx_ntc = 0;
2045
2046 /* fetch next descriptor */
2047 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
2048 }
2049
2050 netdev_tx_reset_queue(txring_txq(tx_ring));
2051
2052 /* re-map buffers to ring, store next to clean values */
2053 ixgbe_alloc_rx_buffers(rx_ring, count);
2054 rx_ring->next_to_clean = rx_ntc;
2055 tx_ring->next_to_clean = tx_ntc;
2056
2057 return count;
2058}
2059
2060static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2061{
2062 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2063 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2064 int i, j, lc, good_cnt, ret_val = 0;
2065 unsigned int size = 1024;
2066 netdev_tx_t tx_ret_val;
2067 struct sk_buff *skb;
2068 u32 flags_orig = adapter->flags;
2069
2070 /* DCB can modify the frames on Tx */
2071 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2072
2073 /* allocate test skb */
2074 skb = alloc_skb(size, GFP_KERNEL);
2075 if (!skb)
2076 return 11;
2077
2078 /* place data into test skb */
2079 ixgbe_create_lbtest_frame(skb, size);
2080 skb_put(skb, size);
2081
2082 /*
2083 * Calculate the loop count based on the largest descriptor ring
2084 * The idea is to wrap the largest ring a number of times using 64
2085 * send/receive pairs during each loop
2086 */
2087
2088 if (rx_ring->count <= tx_ring->count)
2089 lc = ((tx_ring->count / 64) * 2) + 1;
2090 else
2091 lc = ((rx_ring->count / 64) * 2) + 1;
2092
2093 for (j = 0; j <= lc; j++) {
2094 /* reset count of good packets */
2095 good_cnt = 0;
2096
2097 /* place 64 packets on the transmit queue*/
2098 for (i = 0; i < 64; i++) {
2099 skb_get(skb);
2100 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2101 adapter,
2102 tx_ring);
2103 if (tx_ret_val == NETDEV_TX_OK)
2104 good_cnt++;
2105 }
2106
2107 if (good_cnt != 64) {
2108 ret_val = 12;
2109 break;
2110 }
2111
2112 /* allow 200 milliseconds for packets to go from Tx to Rx */
2113 msleep(200);
2114
2115 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2116 if (good_cnt != 64) {
2117 ret_val = 13;
2118 break;
2119 }
2120 }
2121
2122 /* free the original skb */
2123 kfree_skb(skb);
2124 adapter->flags = flags_orig;
2125
2126 return ret_val;
2127}
2128
2129static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2130{
2131 *data = ixgbe_setup_desc_rings(adapter);
2132 if (*data)
2133 goto out;
2134 *data = ixgbe_setup_loopback_test(adapter);
2135 if (*data)
2136 goto err_loopback;
2137 *data = ixgbe_run_loopback_test(adapter);
2138 ixgbe_loopback_cleanup(adapter);
2139
2140err_loopback:
2141 ixgbe_free_desc_rings(adapter);
2142out:
2143 return *data;
2144}
2145
2146static void ixgbe_diag_test(struct net_device *netdev,
2147 struct ethtool_test *eth_test, u64 *data)
2148{
2149 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2150 bool if_running = netif_running(netdev);
2151
2152 if (ixgbe_removed(adapter->hw.hw_addr)) {
2153 e_err(hw, "Adapter removed - test blocked\n");
2154 data[0] = 1;
2155 data[1] = 1;
2156 data[2] = 1;
2157 data[3] = 1;
2158 data[4] = 1;
2159 eth_test->flags |= ETH_TEST_FL_FAILED;
2160 return;
2161 }
2162 set_bit(__IXGBE_TESTING, &adapter->state);
2163 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2164 struct ixgbe_hw *hw = &adapter->hw;
2165
2166 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2167 int i;
2168 for (i = 0; i < adapter->num_vfs; i++) {
2169 if (adapter->vfinfo[i].clear_to_send) {
2170 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2171 data[0] = 1;
2172 data[1] = 1;
2173 data[2] = 1;
2174 data[3] = 1;
2175 data[4] = 1;
2176 eth_test->flags |= ETH_TEST_FL_FAILED;
2177 clear_bit(__IXGBE_TESTING,
2178 &adapter->state);
2179 return;
2180 }
2181 }
2182 }
2183
2184 /* Offline tests */
2185 e_info(hw, "offline testing starting\n");
2186
2187 /* Link test performed before hardware reset so autoneg doesn't
2188 * interfere with test result
2189 */
2190 if (ixgbe_link_test(adapter, &data[4]))
2191 eth_test->flags |= ETH_TEST_FL_FAILED;
2192
2193 if (if_running)
2194 /* indicate we're in test mode */
2195 ixgbe_close(netdev);
2196 else
2197 ixgbe_reset(adapter);
2198
2199 e_info(hw, "register testing starting\n");
2200 if (ixgbe_reg_test(adapter, &data[0]))
2201 eth_test->flags |= ETH_TEST_FL_FAILED;
2202
2203 ixgbe_reset(adapter);
2204 e_info(hw, "eeprom testing starting\n");
2205 if (ixgbe_eeprom_test(adapter, &data[1]))
2206 eth_test->flags |= ETH_TEST_FL_FAILED;
2207
2208 ixgbe_reset(adapter);
2209 e_info(hw, "interrupt testing starting\n");
2210 if (ixgbe_intr_test(adapter, &data[2]))
2211 eth_test->flags |= ETH_TEST_FL_FAILED;
2212
2213 /* If SRIOV or VMDq is enabled then skip MAC
2214 * loopback diagnostic. */
2215 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2216 IXGBE_FLAG_VMDQ_ENABLED)) {
2217 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2218 data[3] = 0;
2219 goto skip_loopback;
2220 }
2221
2222 ixgbe_reset(adapter);
2223 e_info(hw, "loopback testing starting\n");
2224 if (ixgbe_loopback_test(adapter, &data[3]))
2225 eth_test->flags |= ETH_TEST_FL_FAILED;
2226
2227skip_loopback:
2228 ixgbe_reset(adapter);
2229
2230 /* clear testing bit and return adapter to previous state */
2231 clear_bit(__IXGBE_TESTING, &adapter->state);
2232 if (if_running)
2233 ixgbe_open(netdev);
2234 else if (hw->mac.ops.disable_tx_laser)
2235 hw->mac.ops.disable_tx_laser(hw);
2236 } else {
2237 e_info(hw, "online testing starting\n");
2238
2239 /* Online tests */
2240 if (ixgbe_link_test(adapter, &data[4]))
2241 eth_test->flags |= ETH_TEST_FL_FAILED;
2242
2243 /* Offline tests aren't run; pass by default */
2244 data[0] = 0;
2245 data[1] = 0;
2246 data[2] = 0;
2247 data[3] = 0;
2248
2249 clear_bit(__IXGBE_TESTING, &adapter->state);
2250 }
2251}
2252
2253static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2254 struct ethtool_wolinfo *wol)
2255{
2256 struct ixgbe_hw *hw = &adapter->hw;
2257 int retval = 0;
2258
2259 /* WOL not supported for all devices */
2260 if (!ixgbe_wol_supported(adapter, hw->device_id,
2261 hw->subsystem_device_id)) {
2262 retval = 1;
2263 wol->supported = 0;
2264 }
2265
2266 return retval;
2267}
2268
2269static void ixgbe_get_wol(struct net_device *netdev,
2270 struct ethtool_wolinfo *wol)
2271{
2272 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2273
2274 wol->supported = WAKE_UCAST | WAKE_MCAST |
2275 WAKE_BCAST | WAKE_MAGIC;
2276 wol->wolopts = 0;
2277
2278 if (ixgbe_wol_exclusion(adapter, wol) ||
2279 !device_can_wakeup(&adapter->pdev->dev))
2280 return;
2281
2282 if (adapter->wol & IXGBE_WUFC_EX)
2283 wol->wolopts |= WAKE_UCAST;
2284 if (adapter->wol & IXGBE_WUFC_MC)
2285 wol->wolopts |= WAKE_MCAST;
2286 if (adapter->wol & IXGBE_WUFC_BC)
2287 wol->wolopts |= WAKE_BCAST;
2288 if (adapter->wol & IXGBE_WUFC_MAG)
2289 wol->wolopts |= WAKE_MAGIC;
2290}
2291
2292static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2293{
2294 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2295
2296 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
2297 WAKE_FILTER))
2298 return -EOPNOTSUPP;
2299
2300 if (ixgbe_wol_exclusion(adapter, wol))
2301 return wol->wolopts ? -EOPNOTSUPP : 0;
2302
2303 adapter->wol = 0;
2304
2305 if (wol->wolopts & WAKE_UCAST)
2306 adapter->wol |= IXGBE_WUFC_EX;
2307 if (wol->wolopts & WAKE_MCAST)
2308 adapter->wol |= IXGBE_WUFC_MC;
2309 if (wol->wolopts & WAKE_BCAST)
2310 adapter->wol |= IXGBE_WUFC_BC;
2311 if (wol->wolopts & WAKE_MAGIC)
2312 adapter->wol |= IXGBE_WUFC_MAG;
2313
2314 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2315
2316 return 0;
2317}
2318
2319static int ixgbe_nway_reset(struct net_device *netdev)
2320{
2321 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2322
2323 if (netif_running(netdev))
2324 ixgbe_reinit_locked(adapter);
2325
2326 return 0;
2327}
2328
2329static int ixgbe_set_phys_id(struct net_device *netdev,
2330 enum ethtool_phys_id_state state)
2331{
2332 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2333 struct ixgbe_hw *hw = &adapter->hw;
2334
2335 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2336 return -EOPNOTSUPP;
2337
2338 switch (state) {
2339 case ETHTOOL_ID_ACTIVE:
2340 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2341 return 2;
2342
2343 case ETHTOOL_ID_ON:
2344 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2345 break;
2346
2347 case ETHTOOL_ID_OFF:
2348 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2349 break;
2350
2351 case ETHTOOL_ID_INACTIVE:
2352 /* Restore LED settings */
2353 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2354 break;
2355 }
2356
2357 return 0;
2358}
2359
2360static int ixgbe_get_coalesce(struct net_device *netdev,
2361 struct ethtool_coalesce *ec)
2362{
2363 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2364
2365 /* only valid if in constant ITR mode */
2366 if (adapter->rx_itr_setting <= 1)
2367 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2368 else
2369 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2370
2371 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2372 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2373 return 0;
2374
2375 /* only valid if in constant ITR mode */
2376 if (adapter->tx_itr_setting <= 1)
2377 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2378 else
2379 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2380
2381 return 0;
2382}
2383
2384/*
2385 * this function must be called before setting the new value of
2386 * rx_itr_setting
2387 */
2388static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2389{
2390 struct net_device *netdev = adapter->netdev;
2391
2392 /* nothing to do if LRO or RSC are not enabled */
2393 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2394 !(netdev->features & NETIF_F_LRO))
2395 return false;
2396
2397 /* check the feature flag value and enable RSC if necessary */
2398 if (adapter->rx_itr_setting == 1 ||
2399 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2400 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2401 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2402 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2403 return true;
2404 }
2405 /* if interrupt rate is too high then disable RSC */
2406 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2407 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2408 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2409 return true;
2410 }
2411 return false;
2412}
2413
2414static int ixgbe_set_coalesce(struct net_device *netdev,
2415 struct ethtool_coalesce *ec)
2416{
2417 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2418 struct ixgbe_q_vector *q_vector;
2419 int i;
2420 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2421 bool need_reset = false;
2422
2423 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2424 /* reject Tx specific changes in case of mixed RxTx vectors */
2425 if (ec->tx_coalesce_usecs)
2426 return -EINVAL;
2427 tx_itr_prev = adapter->rx_itr_setting;
2428 } else {
2429 tx_itr_prev = adapter->tx_itr_setting;
2430 }
2431
2432 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2433 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2434 return -EINVAL;
2435
2436 if (ec->rx_coalesce_usecs > 1)
2437 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2438 else
2439 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2440
2441 if (adapter->rx_itr_setting == 1)
2442 rx_itr_param = IXGBE_20K_ITR;
2443 else
2444 rx_itr_param = adapter->rx_itr_setting;
2445
2446 if (ec->tx_coalesce_usecs > 1)
2447 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2448 else
2449 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2450
2451 if (adapter->tx_itr_setting == 1)
2452 tx_itr_param = IXGBE_12K_ITR;
2453 else
2454 tx_itr_param = adapter->tx_itr_setting;
2455
2456 /* mixed Rx/Tx */
2457 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2458 adapter->tx_itr_setting = adapter->rx_itr_setting;
2459
2460 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2461 if ((adapter->tx_itr_setting != 1) &&
2462 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2463 if ((tx_itr_prev == 1) ||
2464 (tx_itr_prev >= IXGBE_100K_ITR))
2465 need_reset = true;
2466 } else {
2467 if ((tx_itr_prev != 1) &&
2468 (tx_itr_prev < IXGBE_100K_ITR))
2469 need_reset = true;
2470 }
2471
2472 /* check the old value and enable RSC if necessary */
2473 need_reset |= ixgbe_update_rsc(adapter);
2474
2475 for (i = 0; i < adapter->num_q_vectors; i++) {
2476 q_vector = adapter->q_vector[i];
2477 if (q_vector->tx.count && !q_vector->rx.count)
2478 /* tx only */
2479 q_vector->itr = tx_itr_param;
2480 else
2481 /* rx only or mixed */
2482 q_vector->itr = rx_itr_param;
2483 ixgbe_write_eitr(q_vector);
2484 }
2485
2486 /*
2487 * do reset here at the end to make sure EITR==0 case is handled
2488 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2489 * also locks in RSC enable/disable which requires reset
2490 */
2491 if (need_reset)
2492 ixgbe_do_reset(netdev);
2493
2494 return 0;
2495}
2496
2497static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2498 struct ethtool_rxnfc *cmd)
2499{
2500 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2501 struct ethtool_rx_flow_spec *fsp =
2502 (struct ethtool_rx_flow_spec *)&cmd->fs;
2503 struct hlist_node *node2;
2504 struct ixgbe_fdir_filter *rule = NULL;
2505
2506 /* report total rule count */
2507 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2508
2509 hlist_for_each_entry_safe(rule, node2,
2510 &adapter->fdir_filter_list, fdir_node) {
2511 if (fsp->location <= rule->sw_idx)
2512 break;
2513 }
2514
2515 if (!rule || fsp->location != rule->sw_idx)
2516 return -EINVAL;
2517
2518 /* fill out the flow spec entry */
2519
2520 /* set flow type field */
2521 switch (rule->filter.formatted.flow_type) {
2522 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2523 fsp->flow_type = TCP_V4_FLOW;
2524 break;
2525 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2526 fsp->flow_type = UDP_V4_FLOW;
2527 break;
2528 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2529 fsp->flow_type = SCTP_V4_FLOW;
2530 break;
2531 case IXGBE_ATR_FLOW_TYPE_IPV4:
2532 fsp->flow_type = IP_USER_FLOW;
2533 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2534 fsp->h_u.usr_ip4_spec.proto = 0;
2535 fsp->m_u.usr_ip4_spec.proto = 0;
2536 break;
2537 default:
2538 return -EINVAL;
2539 }
2540
2541 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2542 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2543 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2544 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2545 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2546 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2547 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2548 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2549 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2550 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2551 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2552 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2553 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2554 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2555 fsp->flow_type |= FLOW_EXT;
2556
2557 /* record action */
2558 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2559 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2560 else
2561 fsp->ring_cookie = rule->action;
2562
2563 return 0;
2564}
2565
2566static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2567 struct ethtool_rxnfc *cmd,
2568 u32 *rule_locs)
2569{
2570 struct hlist_node *node2;
2571 struct ixgbe_fdir_filter *rule;
2572 int cnt = 0;
2573
2574 /* report total rule count */
2575 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2576
2577 hlist_for_each_entry_safe(rule, node2,
2578 &adapter->fdir_filter_list, fdir_node) {
2579 if (cnt == cmd->rule_cnt)
2580 return -EMSGSIZE;
2581 rule_locs[cnt] = rule->sw_idx;
2582 cnt++;
2583 }
2584
2585 cmd->rule_cnt = cnt;
2586
2587 return 0;
2588}
2589
2590static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2591 struct ethtool_rxnfc *cmd)
2592{
2593 cmd->data = 0;
2594
2595 /* Report default options for RSS on ixgbe */
2596 switch (cmd->flow_type) {
2597 case TCP_V4_FLOW:
2598 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2599 fallthrough;
2600 case UDP_V4_FLOW:
2601 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2602 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2603 fallthrough;
2604 case SCTP_V4_FLOW:
2605 case AH_ESP_V4_FLOW:
2606 case AH_V4_FLOW:
2607 case ESP_V4_FLOW:
2608 case IPV4_FLOW:
2609 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2610 break;
2611 case TCP_V6_FLOW:
2612 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2613 fallthrough;
2614 case UDP_V6_FLOW:
2615 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2616 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2617 fallthrough;
2618 case SCTP_V6_FLOW:
2619 case AH_ESP_V6_FLOW:
2620 case AH_V6_FLOW:
2621 case ESP_V6_FLOW:
2622 case IPV6_FLOW:
2623 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2624 break;
2625 default:
2626 return -EINVAL;
2627 }
2628
2629 return 0;
2630}
2631
2632static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2633 u32 *rule_locs)
2634{
2635 struct ixgbe_adapter *adapter = netdev_priv(dev);
2636 int ret = -EOPNOTSUPP;
2637
2638 switch (cmd->cmd) {
2639 case ETHTOOL_GRXRINGS:
2640 cmd->data = adapter->num_rx_queues;
2641 ret = 0;
2642 break;
2643 case ETHTOOL_GRXCLSRLCNT:
2644 cmd->rule_cnt = adapter->fdir_filter_count;
2645 ret = 0;
2646 break;
2647 case ETHTOOL_GRXCLSRULE:
2648 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2649 break;
2650 case ETHTOOL_GRXCLSRLALL:
2651 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2652 break;
2653 case ETHTOOL_GRXFH:
2654 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2655 break;
2656 default:
2657 break;
2658 }
2659
2660 return ret;
2661}
2662
2663int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2664 struct ixgbe_fdir_filter *input,
2665 u16 sw_idx)
2666{
2667 struct ixgbe_hw *hw = &adapter->hw;
2668 struct hlist_node *node2;
2669 struct ixgbe_fdir_filter *rule, *parent;
2670 int err = -EINVAL;
2671
2672 parent = NULL;
2673 rule = NULL;
2674
2675 hlist_for_each_entry_safe(rule, node2,
2676 &adapter->fdir_filter_list, fdir_node) {
2677 /* hash found, or no matching entry */
2678 if (rule->sw_idx >= sw_idx)
2679 break;
2680 parent = rule;
2681 }
2682
2683 /* if there is an old rule occupying our place remove it */
2684 if (rule && (rule->sw_idx == sw_idx)) {
2685 if (!input || (rule->filter.formatted.bkt_hash !=
2686 input->filter.formatted.bkt_hash)) {
2687 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2688 &rule->filter,
2689 sw_idx);
2690 }
2691
2692 hlist_del(&rule->fdir_node);
2693 kfree(rule);
2694 adapter->fdir_filter_count--;
2695 }
2696
2697 /*
2698 * If no input this was a delete, err should be 0 if a rule was
2699 * successfully found and removed from the list else -EINVAL
2700 */
2701 if (!input)
2702 return err;
2703
2704 /* initialize node and set software index */
2705 INIT_HLIST_NODE(&input->fdir_node);
2706
2707 /* add filter to the list */
2708 if (parent)
2709 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2710 else
2711 hlist_add_head(&input->fdir_node,
2712 &adapter->fdir_filter_list);
2713
2714 /* update counts */
2715 adapter->fdir_filter_count++;
2716
2717 return 0;
2718}
2719
2720static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2721 u8 *flow_type)
2722{
2723 switch (fsp->flow_type & ~FLOW_EXT) {
2724 case TCP_V4_FLOW:
2725 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2726 break;
2727 case UDP_V4_FLOW:
2728 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2729 break;
2730 case SCTP_V4_FLOW:
2731 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2732 break;
2733 case IP_USER_FLOW:
2734 switch (fsp->h_u.usr_ip4_spec.proto) {
2735 case IPPROTO_TCP:
2736 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2737 break;
2738 case IPPROTO_UDP:
2739 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2740 break;
2741 case IPPROTO_SCTP:
2742 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2743 break;
2744 case 0:
2745 if (!fsp->m_u.usr_ip4_spec.proto) {
2746 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2747 break;
2748 }
2749 fallthrough;
2750 default:
2751 return 0;
2752 }
2753 break;
2754 default:
2755 return 0;
2756 }
2757
2758 return 1;
2759}
2760
2761static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2762 struct ethtool_rxnfc *cmd)
2763{
2764 struct ethtool_rx_flow_spec *fsp =
2765 (struct ethtool_rx_flow_spec *)&cmd->fs;
2766 struct ixgbe_hw *hw = &adapter->hw;
2767 struct ixgbe_fdir_filter *input;
2768 union ixgbe_atr_input mask;
2769 u8 queue;
2770 int err;
2771
2772 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2773 return -EOPNOTSUPP;
2774
2775 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2776 * we use the drop index.
2777 */
2778 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2779 queue = IXGBE_FDIR_DROP_QUEUE;
2780 } else {
2781 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2782 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2783
2784 if (!vf && (ring >= adapter->num_rx_queues))
2785 return -EINVAL;
2786 else if (vf &&
2787 ((vf > adapter->num_vfs) ||
2788 ring >= adapter->num_rx_queues_per_pool))
2789 return -EINVAL;
2790
2791 /* Map the ring onto the absolute queue index */
2792 if (!vf)
2793 queue = adapter->rx_ring[ring]->reg_idx;
2794 else
2795 queue = ((vf - 1) *
2796 adapter->num_rx_queues_per_pool) + ring;
2797 }
2798
2799 /* Don't allow indexes to exist outside of available space */
2800 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2801 e_err(drv, "Location out of range\n");
2802 return -EINVAL;
2803 }
2804
2805 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2806 if (!input)
2807 return -ENOMEM;
2808
2809 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2810
2811 /* set SW index */
2812 input->sw_idx = fsp->location;
2813
2814 /* record flow type */
2815 if (!ixgbe_flowspec_to_flow_type(fsp,
2816 &input->filter.formatted.flow_type)) {
2817 e_err(drv, "Unrecognized flow type\n");
2818 goto err_out;
2819 }
2820
2821 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2822 IXGBE_ATR_L4TYPE_MASK;
2823
2824 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2825 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2826
2827 /* Copy input into formatted structures */
2828 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2829 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2830 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2831 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2832 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2833 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2834 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2835 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2836
2837 if (fsp->flow_type & FLOW_EXT) {
2838 input->filter.formatted.vm_pool =
2839 (unsigned char)ntohl(fsp->h_ext.data[1]);
2840 mask.formatted.vm_pool =
2841 (unsigned char)ntohl(fsp->m_ext.data[1]);
2842 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2843 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2844 input->filter.formatted.flex_bytes =
2845 fsp->h_ext.vlan_etype;
2846 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2847 }
2848
2849 /* determine if we need to drop or route the packet */
2850 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2851 input->action = IXGBE_FDIR_DROP_QUEUE;
2852 else
2853 input->action = fsp->ring_cookie;
2854
2855 spin_lock(&adapter->fdir_perfect_lock);
2856
2857 if (hlist_empty(&adapter->fdir_filter_list)) {
2858 /* save mask and program input mask into HW */
2859 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2860 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2861 if (err) {
2862 e_err(drv, "Error writing mask\n");
2863 goto err_out_w_lock;
2864 }
2865 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2866 e_err(drv, "Only one mask supported per port\n");
2867 goto err_out_w_lock;
2868 }
2869
2870 /* apply mask and compute/store hash */
2871 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2872
2873 /* program filters to filter memory */
2874 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2875 &input->filter, input->sw_idx, queue);
2876 if (err)
2877 goto err_out_w_lock;
2878
2879 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2880
2881 spin_unlock(&adapter->fdir_perfect_lock);
2882
2883 return err;
2884err_out_w_lock:
2885 spin_unlock(&adapter->fdir_perfect_lock);
2886err_out:
2887 kfree(input);
2888 return -EINVAL;
2889}
2890
2891static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2892 struct ethtool_rxnfc *cmd)
2893{
2894 struct ethtool_rx_flow_spec *fsp =
2895 (struct ethtool_rx_flow_spec *)&cmd->fs;
2896 int err;
2897
2898 spin_lock(&adapter->fdir_perfect_lock);
2899 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2900 spin_unlock(&adapter->fdir_perfect_lock);
2901
2902 return err;
2903}
2904
2905#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2906 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2907static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2908 struct ethtool_rxnfc *nfc)
2909{
2910 u32 flags2 = adapter->flags2;
2911
2912 /*
2913 * RSS does not support anything other than hashing
2914 * to queues on src and dst IPs and ports
2915 */
2916 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2917 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2918 return -EINVAL;
2919
2920 switch (nfc->flow_type) {
2921 case TCP_V4_FLOW:
2922 case TCP_V6_FLOW:
2923 if (!(nfc->data & RXH_IP_SRC) ||
2924 !(nfc->data & RXH_IP_DST) ||
2925 !(nfc->data & RXH_L4_B_0_1) ||
2926 !(nfc->data & RXH_L4_B_2_3))
2927 return -EINVAL;
2928 break;
2929 case UDP_V4_FLOW:
2930 if (!(nfc->data & RXH_IP_SRC) ||
2931 !(nfc->data & RXH_IP_DST))
2932 return -EINVAL;
2933 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2934 case 0:
2935 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2936 break;
2937 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2938 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2939 break;
2940 default:
2941 return -EINVAL;
2942 }
2943 break;
2944 case UDP_V6_FLOW:
2945 if (!(nfc->data & RXH_IP_SRC) ||
2946 !(nfc->data & RXH_IP_DST))
2947 return -EINVAL;
2948 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2949 case 0:
2950 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2951 break;
2952 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2953 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2954 break;
2955 default:
2956 return -EINVAL;
2957 }
2958 break;
2959 case AH_ESP_V4_FLOW:
2960 case AH_V4_FLOW:
2961 case ESP_V4_FLOW:
2962 case SCTP_V4_FLOW:
2963 case AH_ESP_V6_FLOW:
2964 case AH_V6_FLOW:
2965 case ESP_V6_FLOW:
2966 case SCTP_V6_FLOW:
2967 if (!(nfc->data & RXH_IP_SRC) ||
2968 !(nfc->data & RXH_IP_DST) ||
2969 (nfc->data & RXH_L4_B_0_1) ||
2970 (nfc->data & RXH_L4_B_2_3))
2971 return -EINVAL;
2972 break;
2973 default:
2974 return -EINVAL;
2975 }
2976
2977 /* if we changed something we need to update flags */
2978 if (flags2 != adapter->flags2) {
2979 struct ixgbe_hw *hw = &adapter->hw;
2980 u32 mrqc;
2981 unsigned int pf_pool = adapter->num_vfs;
2982
2983 if ((hw->mac.type >= ixgbe_mac_X550) &&
2984 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2985 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2986 else
2987 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2988
2989 if ((flags2 & UDP_RSS_FLAGS) &&
2990 !(adapter->flags2 & UDP_RSS_FLAGS))
2991 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2992
2993 adapter->flags2 = flags2;
2994
2995 /* Perform hash on these packet types */
2996 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2997 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2998 | IXGBE_MRQC_RSS_FIELD_IPV6
2999 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
3000
3001 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
3002 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
3003
3004 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
3005 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
3006
3007 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
3008 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
3009
3010 if ((hw->mac.type >= ixgbe_mac_X550) &&
3011 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3012 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
3013 else
3014 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
3015 }
3016
3017 return 0;
3018}
3019
3020static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
3021{
3022 struct ixgbe_adapter *adapter = netdev_priv(dev);
3023 int ret = -EOPNOTSUPP;
3024
3025 switch (cmd->cmd) {
3026 case ETHTOOL_SRXCLSRLINS:
3027 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
3028 break;
3029 case ETHTOOL_SRXCLSRLDEL:
3030 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
3031 break;
3032 case ETHTOOL_SRXFH:
3033 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
3034 break;
3035 default:
3036 break;
3037 }
3038
3039 return ret;
3040}
3041
3042static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
3043{
3044 if (adapter->hw.mac.type < ixgbe_mac_X550)
3045 return 16;
3046 else
3047 return 64;
3048}
3049
3050static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
3051{
3052 return IXGBE_RSS_KEY_SIZE;
3053}
3054
3055static u32 ixgbe_rss_indir_size(struct net_device *netdev)
3056{
3057 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3058
3059 return ixgbe_rss_indir_tbl_entries(adapter);
3060}
3061
3062static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3063{
3064 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3065 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3066
3067 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3068 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3069
3070 for (i = 0; i < reta_size; i++)
3071 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3072}
3073
3074static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3075 u8 *hfunc)
3076{
3077 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3078
3079 if (hfunc)
3080 *hfunc = ETH_RSS_HASH_TOP;
3081
3082 if (indir)
3083 ixgbe_get_reta(adapter, indir);
3084
3085 if (key)
3086 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3087
3088 return 0;
3089}
3090
3091static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3092 const u8 *key, const u8 hfunc)
3093{
3094 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3095 int i;
3096 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3097
3098 if (hfunc)
3099 return -EINVAL;
3100
3101 /* Fill out the redirection table */
3102 if (indir) {
3103 int max_queues = min_t(int, adapter->num_rx_queues,
3104 ixgbe_rss_indir_tbl_max(adapter));
3105
3106 /*Allow at least 2 queues w/ SR-IOV.*/
3107 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3108 (max_queues < 2))
3109 max_queues = 2;
3110
3111 /* Verify user input. */
3112 for (i = 0; i < reta_entries; i++)
3113 if (indir[i] >= max_queues)
3114 return -EINVAL;
3115
3116 for (i = 0; i < reta_entries; i++)
3117 adapter->rss_indir_tbl[i] = indir[i];
3118
3119 ixgbe_store_reta(adapter);
3120 }
3121
3122 /* Fill out the rss hash key */
3123 if (key) {
3124 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3125 ixgbe_store_key(adapter);
3126 }
3127
3128 return 0;
3129}
3130
3131static int ixgbe_get_ts_info(struct net_device *dev,
3132 struct ethtool_ts_info *info)
3133{
3134 struct ixgbe_adapter *adapter = netdev_priv(dev);
3135
3136 /* we always support timestamping disabled */
3137 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3138
3139 switch (adapter->hw.mac.type) {
3140 case ixgbe_mac_X550:
3141 case ixgbe_mac_X550EM_x:
3142 case ixgbe_mac_x550em_a:
3143 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3144 break;
3145 case ixgbe_mac_X540:
3146 case ixgbe_mac_82599EB:
3147 info->rx_filters |=
3148 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3149 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3150 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3151 break;
3152 default:
3153 return ethtool_op_get_ts_info(dev, info);
3154 }
3155
3156 info->so_timestamping =
3157 SOF_TIMESTAMPING_TX_SOFTWARE |
3158 SOF_TIMESTAMPING_RX_SOFTWARE |
3159 SOF_TIMESTAMPING_SOFTWARE |
3160 SOF_TIMESTAMPING_TX_HARDWARE |
3161 SOF_TIMESTAMPING_RX_HARDWARE |
3162 SOF_TIMESTAMPING_RAW_HARDWARE;
3163
3164 if (adapter->ptp_clock)
3165 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3166 else
3167 info->phc_index = -1;
3168
3169 info->tx_types =
3170 BIT(HWTSTAMP_TX_OFF) |
3171 BIT(HWTSTAMP_TX_ON);
3172
3173 return 0;
3174}
3175
3176static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3177{
3178 unsigned int max_combined;
3179 u8 tcs = adapter->hw_tcs;
3180
3181 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3182 /* We only support one q_vector without MSI-X */
3183 max_combined = 1;
3184 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3185 /* Limit value based on the queue mask */
3186 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3187 } else if (tcs > 1) {
3188 /* For DCB report channels per traffic class */
3189 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3190 /* 8 TC w/ 4 queues per TC */
3191 max_combined = 4;
3192 } else if (tcs > 4) {
3193 /* 8 TC w/ 8 queues per TC */
3194 max_combined = 8;
3195 } else {
3196 /* 4 TC w/ 16 queues per TC */
3197 max_combined = 16;
3198 }
3199 } else if (adapter->atr_sample_rate) {
3200 /* support up to 64 queues with ATR */
3201 max_combined = IXGBE_MAX_FDIR_INDICES;
3202 } else {
3203 /* support up to 16 queues with RSS */
3204 max_combined = ixgbe_max_rss_indices(adapter);
3205 }
3206
3207 return min_t(int, max_combined, num_online_cpus());
3208}
3209
3210static void ixgbe_get_channels(struct net_device *dev,
3211 struct ethtool_channels *ch)
3212{
3213 struct ixgbe_adapter *adapter = netdev_priv(dev);
3214
3215 /* report maximum channels */
3216 ch->max_combined = ixgbe_max_channels(adapter);
3217
3218 /* report info for other vector */
3219 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3220 ch->max_other = NON_Q_VECTORS;
3221 ch->other_count = NON_Q_VECTORS;
3222 }
3223
3224 /* record RSS queues */
3225 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3226
3227 /* nothing else to report if RSS is disabled */
3228 if (ch->combined_count == 1)
3229 return;
3230
3231 /* we do not support ATR queueing if SR-IOV is enabled */
3232 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3233 return;
3234
3235 /* same thing goes for being DCB enabled */
3236 if (adapter->hw_tcs > 1)
3237 return;
3238
3239 /* if ATR is disabled we can exit */
3240 if (!adapter->atr_sample_rate)
3241 return;
3242
3243 /* report flow director queues as maximum channels */
3244 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3245}
3246
3247static int ixgbe_set_channels(struct net_device *dev,
3248 struct ethtool_channels *ch)
3249{
3250 struct ixgbe_adapter *adapter = netdev_priv(dev);
3251 unsigned int count = ch->combined_count;
3252 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3253
3254 /* verify they are not requesting separate vectors */
3255 if (!count || ch->rx_count || ch->tx_count)
3256 return -EINVAL;
3257
3258 /* verify other_count has not changed */
3259 if (ch->other_count != NON_Q_VECTORS)
3260 return -EINVAL;
3261
3262 /* verify the number of channels does not exceed hardware limits */
3263 if (count > ixgbe_max_channels(adapter))
3264 return -EINVAL;
3265
3266 /* update feature limits from largest to smallest supported values */
3267 adapter->ring_feature[RING_F_FDIR].limit = count;
3268
3269 /* cap RSS limit */
3270 if (count > max_rss_indices)
3271 count = max_rss_indices;
3272 adapter->ring_feature[RING_F_RSS].limit = count;
3273
3274#ifdef IXGBE_FCOE
3275 /* cap FCoE limit at 8 */
3276 if (count > IXGBE_FCRETA_SIZE)
3277 count = IXGBE_FCRETA_SIZE;
3278 adapter->ring_feature[RING_F_FCOE].limit = count;
3279
3280#endif
3281 /* use setup TC to update any traffic class queue mapping */
3282 return ixgbe_setup_tc(dev, adapter->hw_tcs);
3283}
3284
3285static int ixgbe_get_module_info(struct net_device *dev,
3286 struct ethtool_modinfo *modinfo)
3287{
3288 struct ixgbe_adapter *adapter = netdev_priv(dev);
3289 struct ixgbe_hw *hw = &adapter->hw;
3290 s32 status;
3291 u8 sff8472_rev, addr_mode;
3292 bool page_swap = false;
3293
3294 if (hw->phy.type == ixgbe_phy_fw)
3295 return -ENXIO;
3296
3297 /* Check whether we support SFF-8472 or not */
3298 status = hw->phy.ops.read_i2c_eeprom(hw,
3299 IXGBE_SFF_SFF_8472_COMP,
3300 &sff8472_rev);
3301 if (status)
3302 return -EIO;
3303
3304 /* addressing mode is not supported */
3305 status = hw->phy.ops.read_i2c_eeprom(hw,
3306 IXGBE_SFF_SFF_8472_SWAP,
3307 &addr_mode);
3308 if (status)
3309 return -EIO;
3310
3311 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3312 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3313 page_swap = true;
3314 }
3315
3316 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
3317 !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
3318 /* We have a SFP, but it does not support SFF-8472 */
3319 modinfo->type = ETH_MODULE_SFF_8079;
3320 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3321 } else {
3322 /* We have a SFP which supports a revision of SFF-8472. */
3323 modinfo->type = ETH_MODULE_SFF_8472;
3324 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3325 }
3326
3327 return 0;
3328}
3329
3330static int ixgbe_get_module_eeprom(struct net_device *dev,
3331 struct ethtool_eeprom *ee,
3332 u8 *data)
3333{
3334 struct ixgbe_adapter *adapter = netdev_priv(dev);
3335 struct ixgbe_hw *hw = &adapter->hw;
3336 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3337 u8 databyte = 0xFF;
3338 int i = 0;
3339
3340 if (ee->len == 0)
3341 return -EINVAL;
3342
3343 if (hw->phy.type == ixgbe_phy_fw)
3344 return -ENXIO;
3345
3346 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3347 /* I2C reads can take long time */
3348 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3349 return -EBUSY;
3350
3351 if (i < ETH_MODULE_SFF_8079_LEN)
3352 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3353 else
3354 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3355
3356 if (status)
3357 return -EIO;
3358
3359 data[i - ee->offset] = databyte;
3360 }
3361
3362 return 0;
3363}
3364
3365static const struct {
3366 ixgbe_link_speed mac_speed;
3367 u32 supported;
3368} ixgbe_ls_map[] = {
3369 { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3370 { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3371 { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3372 { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3373 { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3374};
3375
3376static const struct {
3377 u32 lp_advertised;
3378 u32 mac_speed;
3379} ixgbe_lp_map[] = {
3380 { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3381 { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3382 { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3383 { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3384 { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3385 { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3386};
3387
3388static int
3389ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3390{
3391 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3392 struct ixgbe_hw *hw = &adapter->hw;
3393 s32 rc;
3394 u16 i;
3395
3396 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3397 if (rc)
3398 return rc;
3399
3400 edata->lp_advertised = 0;
3401 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3402 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3403 edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3404 }
3405
3406 edata->supported = 0;
3407 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3408 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3409 edata->supported |= ixgbe_ls_map[i].supported;
3410 }
3411
3412 edata->advertised = 0;
3413 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3414 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3415 edata->advertised |= ixgbe_ls_map[i].supported;
3416 }
3417
3418 edata->eee_enabled = !!edata->advertised;
3419 edata->tx_lpi_enabled = edata->eee_enabled;
3420 if (edata->advertised & edata->lp_advertised)
3421 edata->eee_active = true;
3422
3423 return 0;
3424}
3425
3426static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3427{
3428 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3429 struct ixgbe_hw *hw = &adapter->hw;
3430
3431 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3432 return -EOPNOTSUPP;
3433
3434 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3435 return ixgbe_get_eee_fw(adapter, edata);
3436
3437 return -EOPNOTSUPP;
3438}
3439
3440static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3441{
3442 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3443 struct ixgbe_hw *hw = &adapter->hw;
3444 struct ethtool_eee eee_data;
3445 s32 ret_val;
3446
3447 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3448 return -EOPNOTSUPP;
3449
3450 memset(&eee_data, 0, sizeof(struct ethtool_eee));
3451
3452 ret_val = ixgbe_get_eee(netdev, &eee_data);
3453 if (ret_val)
3454 return ret_val;
3455
3456 if (eee_data.eee_enabled && !edata->eee_enabled) {
3457 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3458 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3459 return -EINVAL;
3460 }
3461
3462 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3463 e_err(drv,
3464 "Setting EEE Tx LPI timer is not supported\n");
3465 return -EINVAL;
3466 }
3467
3468 if (eee_data.advertised != edata->advertised) {
3469 e_err(drv,
3470 "Setting EEE advertised speeds is not supported\n");
3471 return -EINVAL;
3472 }
3473 }
3474
3475 if (eee_data.eee_enabled != edata->eee_enabled) {
3476 if (edata->eee_enabled) {
3477 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3478 hw->phy.eee_speeds_advertised =
3479 hw->phy.eee_speeds_supported;
3480 } else {
3481 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3482 hw->phy.eee_speeds_advertised = 0;
3483 }
3484
3485 /* reset link */
3486 if (netif_running(netdev))
3487 ixgbe_reinit_locked(adapter);
3488 else
3489 ixgbe_reset(adapter);
3490 }
3491
3492 return 0;
3493}
3494
3495static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3496{
3497 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3498 u32 priv_flags = 0;
3499
3500 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3501 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3502
3503 if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
3504 priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
3505
3506 return priv_flags;
3507}
3508
3509static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3510{
3511 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3512 unsigned int flags2 = adapter->flags2;
3513
3514 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3515 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3516 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3517
3518 flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED;
3519 if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
3520 flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
3521
3522 if (flags2 != adapter->flags2) {
3523 adapter->flags2 = flags2;
3524
3525 /* reset interface to repopulate queues */
3526 if (netif_running(netdev))
3527 ixgbe_reinit_locked(adapter);
3528 }
3529
3530 return 0;
3531}
3532
3533static const struct ethtool_ops ixgbe_ethtool_ops = {
3534 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
3535 .get_drvinfo = ixgbe_get_drvinfo,
3536 .get_regs_len = ixgbe_get_regs_len,
3537 .get_regs = ixgbe_get_regs,
3538 .get_wol = ixgbe_get_wol,
3539 .set_wol = ixgbe_set_wol,
3540 .nway_reset = ixgbe_nway_reset,
3541 .get_link = ethtool_op_get_link,
3542 .get_eeprom_len = ixgbe_get_eeprom_len,
3543 .get_eeprom = ixgbe_get_eeprom,
3544 .set_eeprom = ixgbe_set_eeprom,
3545 .get_ringparam = ixgbe_get_ringparam,
3546 .set_ringparam = ixgbe_set_ringparam,
3547 .get_pause_stats = ixgbe_get_pause_stats,
3548 .get_pauseparam = ixgbe_get_pauseparam,
3549 .set_pauseparam = ixgbe_set_pauseparam,
3550 .get_msglevel = ixgbe_get_msglevel,
3551 .set_msglevel = ixgbe_set_msglevel,
3552 .self_test = ixgbe_diag_test,
3553 .get_strings = ixgbe_get_strings,
3554 .set_phys_id = ixgbe_set_phys_id,
3555 .get_sset_count = ixgbe_get_sset_count,
3556 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3557 .get_coalesce = ixgbe_get_coalesce,
3558 .set_coalesce = ixgbe_set_coalesce,
3559 .get_rxnfc = ixgbe_get_rxnfc,
3560 .set_rxnfc = ixgbe_set_rxnfc,
3561 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3562 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3563 .get_rxfh = ixgbe_get_rxfh,
3564 .set_rxfh = ixgbe_set_rxfh,
3565 .get_eee = ixgbe_get_eee,
3566 .set_eee = ixgbe_set_eee,
3567 .get_channels = ixgbe_get_channels,
3568 .set_channels = ixgbe_set_channels,
3569 .get_priv_flags = ixgbe_get_priv_flags,
3570 .set_priv_flags = ixgbe_set_priv_flags,
3571 .get_ts_info = ixgbe_get_ts_info,
3572 .get_module_info = ixgbe_get_module_info,
3573 .get_module_eeprom = ixgbe_get_module_eeprom,
3574 .get_link_ksettings = ixgbe_get_link_ksettings,
3575 .set_link_ksettings = ixgbe_set_link_ksettings,
3576};
3577
3578void ixgbe_set_ethtool_ops(struct net_device *netdev)
3579{
3580 netdev->ethtool_ops = &ixgbe_ethtool_ops;
3581}