Loading...
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2018 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
17
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
20
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25*******************************************************************************/
26
27/* ethtool support for ixgbevf */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/types.h>
32#include <linux/module.h>
33#include <linux/slab.h>
34#include <linux/pci.h>
35#include <linux/netdevice.h>
36#include <linux/ethtool.h>
37#include <linux/vmalloc.h>
38#include <linux/if_vlan.h>
39#include <linux/uaccess.h>
40
41#include "ixgbevf.h"
42
43#define IXGBE_ALL_RAR_ENTRIES 16
44
45enum {NETDEV_STATS, IXGBEVF_STATS};
46
47struct ixgbe_stats {
48 char stat_string[ETH_GSTRING_LEN];
49 int type;
50 int sizeof_stat;
51 int stat_offset;
52};
53
54#define IXGBEVF_STAT(_name, _stat) { \
55 .stat_string = _name, \
56 .type = IXGBEVF_STATS, \
57 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
58 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
59}
60
61#define IXGBEVF_NETDEV_STAT(_net_stat) { \
62 .stat_string = #_net_stat, \
63 .type = NETDEV_STATS, \
64 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
65 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
66}
67
68static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
69 IXGBEVF_NETDEV_STAT(rx_packets),
70 IXGBEVF_NETDEV_STAT(tx_packets),
71 IXGBEVF_NETDEV_STAT(rx_bytes),
72 IXGBEVF_NETDEV_STAT(tx_bytes),
73 IXGBEVF_STAT("tx_busy", tx_busy),
74 IXGBEVF_STAT("tx_restart_queue", restart_queue),
75 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
76 IXGBEVF_NETDEV_STAT(multicast),
77 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
78 IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
79 IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
80 IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
81};
82
83#define IXGBEVF_QUEUE_STATS_LEN ( \
84 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
85 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
86 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
87 (sizeof(struct ixgbevf_stats) / sizeof(u64)))
88#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
89
90#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
91static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
92 "Register test (offline)",
93 "Link test (on/offline)"
94};
95
96#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
97
98static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
99#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
100 "legacy-rx",
101};
102
103#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
104
105static int ixgbevf_get_link_ksettings(struct net_device *netdev,
106 struct ethtool_link_ksettings *cmd)
107{
108 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
109 struct ixgbe_hw *hw = &adapter->hw;
110 u32 link_speed = 0;
111 bool link_up;
112
113 ethtool_link_ksettings_zero_link_mode(cmd, supported);
114 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
115 cmd->base.autoneg = AUTONEG_DISABLE;
116 cmd->base.port = -1;
117
118 hw->mac.get_link_status = 1;
119 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
120
121 if (link_up) {
122 __u32 speed = SPEED_10000;
123
124 switch (link_speed) {
125 case IXGBE_LINK_SPEED_10GB_FULL:
126 speed = SPEED_10000;
127 break;
128 case IXGBE_LINK_SPEED_1GB_FULL:
129 speed = SPEED_1000;
130 break;
131 case IXGBE_LINK_SPEED_100_FULL:
132 speed = SPEED_100;
133 break;
134 }
135
136 cmd->base.speed = speed;
137 cmd->base.duplex = DUPLEX_FULL;
138 } else {
139 cmd->base.speed = SPEED_UNKNOWN;
140 cmd->base.duplex = DUPLEX_UNKNOWN;
141 }
142
143 return 0;
144}
145
146static u32 ixgbevf_get_msglevel(struct net_device *netdev)
147{
148 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
149
150 return adapter->msg_enable;
151}
152
153static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
154{
155 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
156
157 adapter->msg_enable = data;
158}
159
160#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
161
162static int ixgbevf_get_regs_len(struct net_device *netdev)
163{
164#define IXGBE_REGS_LEN 45
165 return IXGBE_REGS_LEN * sizeof(u32);
166}
167
168static void ixgbevf_get_regs(struct net_device *netdev,
169 struct ethtool_regs *regs,
170 void *p)
171{
172 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
173 struct ixgbe_hw *hw = &adapter->hw;
174 u32 *regs_buff = p;
175 u32 regs_len = ixgbevf_get_regs_len(netdev);
176 u8 i;
177
178 memset(p, 0, regs_len);
179
180 /* generate a number suitable for ethtool's register version */
181 regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
182
183 /* General Registers */
184 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
185 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
186 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
187 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
188 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
189
190 /* Interrupt */
191 /* don't read EICR because it can clear interrupt causes, instead
192 * read EICS which is a shadow but doesn't clear EICR
193 */
194 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
195 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
196 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
197 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
198 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
199 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
200 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
201 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
202 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
203
204 /* Receive DMA */
205 for (i = 0; i < 2; i++)
206 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
207 for (i = 0; i < 2; i++)
208 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
209 for (i = 0; i < 2; i++)
210 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
211 for (i = 0; i < 2; i++)
212 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
213 for (i = 0; i < 2; i++)
214 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
215 for (i = 0; i < 2; i++)
216 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
217 for (i = 0; i < 2; i++)
218 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
219
220 /* Receive */
221 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
222
223 /* Transmit */
224 for (i = 0; i < 2; i++)
225 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
226 for (i = 0; i < 2; i++)
227 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
228 for (i = 0; i < 2; i++)
229 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
230 for (i = 0; i < 2; i++)
231 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
232 for (i = 0; i < 2; i++)
233 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
234 for (i = 0; i < 2; i++)
235 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
236 for (i = 0; i < 2; i++)
237 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
238 for (i = 0; i < 2; i++)
239 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
240}
241
242static void ixgbevf_get_drvinfo(struct net_device *netdev,
243 struct ethtool_drvinfo *drvinfo)
244{
245 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
246
247 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
248 strlcpy(drvinfo->version, ixgbevf_driver_version,
249 sizeof(drvinfo->version));
250 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
251 sizeof(drvinfo->bus_info));
252
253 drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
254}
255
256static void ixgbevf_get_ringparam(struct net_device *netdev,
257 struct ethtool_ringparam *ring)
258{
259 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
260
261 ring->rx_max_pending = IXGBEVF_MAX_RXD;
262 ring->tx_max_pending = IXGBEVF_MAX_TXD;
263 ring->rx_pending = adapter->rx_ring_count;
264 ring->tx_pending = adapter->tx_ring_count;
265}
266
267static int ixgbevf_set_ringparam(struct net_device *netdev,
268 struct ethtool_ringparam *ring)
269{
270 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
271 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
272 u32 new_rx_count, new_tx_count;
273 int i, j, err = 0;
274
275 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
276 return -EINVAL;
277
278 new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
279 new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
280 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
281
282 new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
283 new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
284 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
285
286 /* if nothing to do return success */
287 if ((new_tx_count == adapter->tx_ring_count) &&
288 (new_rx_count == adapter->rx_ring_count))
289 return 0;
290
291 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
292 usleep_range(1000, 2000);
293
294 if (!netif_running(adapter->netdev)) {
295 for (i = 0; i < adapter->num_tx_queues; i++)
296 adapter->tx_ring[i]->count = new_tx_count;
297 for (i = 0; i < adapter->num_xdp_queues; i++)
298 adapter->xdp_ring[i]->count = new_tx_count;
299 for (i = 0; i < adapter->num_rx_queues; i++)
300 adapter->rx_ring[i]->count = new_rx_count;
301 adapter->tx_ring_count = new_tx_count;
302 adapter->xdp_ring_count = new_tx_count;
303 adapter->rx_ring_count = new_rx_count;
304 goto clear_reset;
305 }
306
307 if (new_tx_count != adapter->tx_ring_count) {
308 tx_ring = vmalloc((adapter->num_tx_queues +
309 adapter->num_xdp_queues) * sizeof(*tx_ring));
310 if (!tx_ring) {
311 err = -ENOMEM;
312 goto clear_reset;
313 }
314
315 for (i = 0; i < adapter->num_tx_queues; i++) {
316 /* clone ring and setup updated count */
317 tx_ring[i] = *adapter->tx_ring[i];
318 tx_ring[i].count = new_tx_count;
319 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
320 if (err) {
321 while (i) {
322 i--;
323 ixgbevf_free_tx_resources(&tx_ring[i]);
324 }
325
326 vfree(tx_ring);
327 tx_ring = NULL;
328
329 goto clear_reset;
330 }
331 }
332
333 for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
334 /* clone ring and setup updated count */
335 tx_ring[i] = *adapter->xdp_ring[j];
336 tx_ring[i].count = new_tx_count;
337 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
338 if (err) {
339 while (i) {
340 i--;
341 ixgbevf_free_tx_resources(&tx_ring[i]);
342 }
343
344 vfree(tx_ring);
345 tx_ring = NULL;
346
347 goto clear_reset;
348 }
349 }
350 }
351
352 if (new_rx_count != adapter->rx_ring_count) {
353 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
354 if (!rx_ring) {
355 err = -ENOMEM;
356 goto clear_reset;
357 }
358
359 for (i = 0; i < adapter->num_rx_queues; i++) {
360 /* clone ring and setup updated count */
361 rx_ring[i] = *adapter->rx_ring[i];
362
363 /* Clear copied XDP RX-queue info */
364 memset(&rx_ring[i].xdp_rxq, 0,
365 sizeof(rx_ring[i].xdp_rxq));
366
367 rx_ring[i].count = new_rx_count;
368 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
369 if (err) {
370 while (i) {
371 i--;
372 ixgbevf_free_rx_resources(&rx_ring[i]);
373 }
374
375 vfree(rx_ring);
376 rx_ring = NULL;
377
378 goto clear_reset;
379 }
380 }
381 }
382
383 /* bring interface down to prepare for update */
384 ixgbevf_down(adapter);
385
386 /* Tx */
387 if (tx_ring) {
388 for (i = 0; i < adapter->num_tx_queues; i++) {
389 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
390 *adapter->tx_ring[i] = tx_ring[i];
391 }
392 adapter->tx_ring_count = new_tx_count;
393
394 for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
395 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
396 *adapter->xdp_ring[j] = tx_ring[i];
397 }
398 adapter->xdp_ring_count = new_tx_count;
399
400 vfree(tx_ring);
401 tx_ring = NULL;
402 }
403
404 /* Rx */
405 if (rx_ring) {
406 for (i = 0; i < adapter->num_rx_queues; i++) {
407 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
408 *adapter->rx_ring[i] = rx_ring[i];
409 }
410 adapter->rx_ring_count = new_rx_count;
411
412 vfree(rx_ring);
413 rx_ring = NULL;
414 }
415
416 /* restore interface using new values */
417 ixgbevf_up(adapter);
418
419clear_reset:
420 /* free Tx resources if Rx error is encountered */
421 if (tx_ring) {
422 for (i = 0;
423 i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
424 ixgbevf_free_tx_resources(&tx_ring[i]);
425 vfree(tx_ring);
426 }
427
428 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
429 return err;
430}
431
432static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
433{
434 switch (stringset) {
435 case ETH_SS_TEST:
436 return IXGBEVF_TEST_LEN;
437 case ETH_SS_STATS:
438 return IXGBEVF_STATS_LEN;
439 case ETH_SS_PRIV_FLAGS:
440 return IXGBEVF_PRIV_FLAGS_STR_LEN;
441 default:
442 return -EINVAL;
443 }
444}
445
446static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
447 struct ethtool_stats *stats, u64 *data)
448{
449 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
450 struct rtnl_link_stats64 temp;
451 const struct rtnl_link_stats64 *net_stats;
452 unsigned int start;
453 struct ixgbevf_ring *ring;
454 int i, j;
455 char *p;
456
457 ixgbevf_update_stats(adapter);
458 net_stats = dev_get_stats(netdev, &temp);
459 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
460 switch (ixgbevf_gstrings_stats[i].type) {
461 case NETDEV_STATS:
462 p = (char *)net_stats +
463 ixgbevf_gstrings_stats[i].stat_offset;
464 break;
465 case IXGBEVF_STATS:
466 p = (char *)adapter +
467 ixgbevf_gstrings_stats[i].stat_offset;
468 break;
469 default:
470 data[i] = 0;
471 continue;
472 }
473
474 data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
475 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
476 }
477
478 /* populate Tx queue data */
479 for (j = 0; j < adapter->num_tx_queues; j++) {
480 ring = adapter->tx_ring[j];
481 if (!ring) {
482 data[i++] = 0;
483 data[i++] = 0;
484 continue;
485 }
486
487 do {
488 start = u64_stats_fetch_begin_irq(&ring->syncp);
489 data[i] = ring->stats.packets;
490 data[i + 1] = ring->stats.bytes;
491 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
492 i += 2;
493 }
494
495 /* populate XDP queue data */
496 for (j = 0; j < adapter->num_xdp_queues; j++) {
497 ring = adapter->xdp_ring[j];
498 if (!ring) {
499 data[i++] = 0;
500 data[i++] = 0;
501 continue;
502 }
503
504 do {
505 start = u64_stats_fetch_begin_irq(&ring->syncp);
506 data[i] = ring->stats.packets;
507 data[i + 1] = ring->stats.bytes;
508 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
509 i += 2;
510 }
511
512 /* populate Rx queue data */
513 for (j = 0; j < adapter->num_rx_queues; j++) {
514 ring = adapter->rx_ring[j];
515 if (!ring) {
516 data[i++] = 0;
517 data[i++] = 0;
518 continue;
519 }
520
521 do {
522 start = u64_stats_fetch_begin_irq(&ring->syncp);
523 data[i] = ring->stats.packets;
524 data[i + 1] = ring->stats.bytes;
525 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
526 i += 2;
527 }
528}
529
530static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
531 u8 *data)
532{
533 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
534 char *p = (char *)data;
535 int i;
536
537 switch (stringset) {
538 case ETH_SS_TEST:
539 memcpy(data, *ixgbe_gstrings_test,
540 IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
541 break;
542 case ETH_SS_STATS:
543 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
544 memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
545 ETH_GSTRING_LEN);
546 p += ETH_GSTRING_LEN;
547 }
548
549 for (i = 0; i < adapter->num_tx_queues; i++) {
550 sprintf(p, "tx_queue_%u_packets", i);
551 p += ETH_GSTRING_LEN;
552 sprintf(p, "tx_queue_%u_bytes", i);
553 p += ETH_GSTRING_LEN;
554 }
555 for (i = 0; i < adapter->num_xdp_queues; i++) {
556 sprintf(p, "xdp_queue_%u_packets", i);
557 p += ETH_GSTRING_LEN;
558 sprintf(p, "xdp_queue_%u_bytes", i);
559 p += ETH_GSTRING_LEN;
560 }
561 for (i = 0; i < adapter->num_rx_queues; i++) {
562 sprintf(p, "rx_queue_%u_packets", i);
563 p += ETH_GSTRING_LEN;
564 sprintf(p, "rx_queue_%u_bytes", i);
565 p += ETH_GSTRING_LEN;
566 }
567 break;
568 case ETH_SS_PRIV_FLAGS:
569 memcpy(data, ixgbevf_priv_flags_strings,
570 IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
571 break;
572 }
573}
574
575static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
576{
577 struct ixgbe_hw *hw = &adapter->hw;
578 bool link_up;
579 u32 link_speed = 0;
580 *data = 0;
581
582 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
583 if (!link_up)
584 *data = 1;
585
586 return *data;
587}
588
589/* ethtool register test data */
590struct ixgbevf_reg_test {
591 u16 reg;
592 u8 array_len;
593 u8 test_type;
594 u32 mask;
595 u32 write;
596};
597
598/* In the hardware, registers are laid out either singly, in arrays
599 * spaced 0x40 bytes apart, or in contiguous tables. We assume
600 * most tests take place on arrays or single registers (handled
601 * as a single-element array) and special-case the tables.
602 * Table tests are always pattern tests.
603 *
604 * We also make provision for some required setup steps by specifying
605 * registers to be written without any read-back testing.
606 */
607
608#define PATTERN_TEST 1
609#define SET_READ_TEST 2
610#define WRITE_NO_TEST 3
611#define TABLE32_TEST 4
612#define TABLE64_TEST_LO 5
613#define TABLE64_TEST_HI 6
614
615/* default VF register test */
616static const struct ixgbevf_reg_test reg_test_vf[] = {
617 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
618 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
619 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
620 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
621 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
622 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
623 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
624 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
625 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
626 { .reg = 0 }
627};
628
629static const u32 register_test_patterns[] = {
630 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
631};
632
633static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
634 int reg, u32 mask, u32 write)
635{
636 u32 pat, val, before;
637
638 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
639 *data = 1;
640 return true;
641 }
642 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
643 before = ixgbevf_read_reg(&adapter->hw, reg);
644 ixgbe_write_reg(&adapter->hw, reg,
645 register_test_patterns[pat] & write);
646 val = ixgbevf_read_reg(&adapter->hw, reg);
647 if (val != (register_test_patterns[pat] & write & mask)) {
648 hw_dbg(&adapter->hw,
649 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
650 reg, val,
651 register_test_patterns[pat] & write & mask);
652 *data = reg;
653 ixgbe_write_reg(&adapter->hw, reg, before);
654 return true;
655 }
656 ixgbe_write_reg(&adapter->hw, reg, before);
657 }
658 return false;
659}
660
661static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
662 int reg, u32 mask, u32 write)
663{
664 u32 val, before;
665
666 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
667 *data = 1;
668 return true;
669 }
670 before = ixgbevf_read_reg(&adapter->hw, reg);
671 ixgbe_write_reg(&adapter->hw, reg, write & mask);
672 val = ixgbevf_read_reg(&adapter->hw, reg);
673 if ((write & mask) != (val & mask)) {
674 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
675 reg, (val & mask), write & mask);
676 *data = reg;
677 ixgbe_write_reg(&adapter->hw, reg, before);
678 return true;
679 }
680 ixgbe_write_reg(&adapter->hw, reg, before);
681 return false;
682}
683
684static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
685{
686 const struct ixgbevf_reg_test *test;
687 u32 i;
688
689 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
690 dev_err(&adapter->pdev->dev,
691 "Adapter removed - register test blocked\n");
692 *data = 1;
693 return 1;
694 }
695 test = reg_test_vf;
696
697 /* Perform the register test, looping through the test table
698 * until we either fail or reach the null entry.
699 */
700 while (test->reg) {
701 for (i = 0; i < test->array_len; i++) {
702 bool b = false;
703
704 switch (test->test_type) {
705 case PATTERN_TEST:
706 b = reg_pattern_test(adapter, data,
707 test->reg + (i * 0x40),
708 test->mask,
709 test->write);
710 break;
711 case SET_READ_TEST:
712 b = reg_set_and_check(adapter, data,
713 test->reg + (i * 0x40),
714 test->mask,
715 test->write);
716 break;
717 case WRITE_NO_TEST:
718 ixgbe_write_reg(&adapter->hw,
719 test->reg + (i * 0x40),
720 test->write);
721 break;
722 case TABLE32_TEST:
723 b = reg_pattern_test(adapter, data,
724 test->reg + (i * 4),
725 test->mask,
726 test->write);
727 break;
728 case TABLE64_TEST_LO:
729 b = reg_pattern_test(adapter, data,
730 test->reg + (i * 8),
731 test->mask,
732 test->write);
733 break;
734 case TABLE64_TEST_HI:
735 b = reg_pattern_test(adapter, data,
736 test->reg + 4 + (i * 8),
737 test->mask,
738 test->write);
739 break;
740 }
741 if (b)
742 return 1;
743 }
744 test++;
745 }
746
747 *data = 0;
748 return *data;
749}
750
751static void ixgbevf_diag_test(struct net_device *netdev,
752 struct ethtool_test *eth_test, u64 *data)
753{
754 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
755 bool if_running = netif_running(netdev);
756
757 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
758 dev_err(&adapter->pdev->dev,
759 "Adapter removed - test blocked\n");
760 data[0] = 1;
761 data[1] = 1;
762 eth_test->flags |= ETH_TEST_FL_FAILED;
763 return;
764 }
765 set_bit(__IXGBEVF_TESTING, &adapter->state);
766 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
767 /* Offline tests */
768
769 hw_dbg(&adapter->hw, "offline testing starting\n");
770
771 /* Link test performed before hardware reset so autoneg doesn't
772 * interfere with test result
773 */
774 if (ixgbevf_link_test(adapter, &data[1]))
775 eth_test->flags |= ETH_TEST_FL_FAILED;
776
777 if (if_running)
778 /* indicate we're in test mode */
779 ixgbevf_close(netdev);
780 else
781 ixgbevf_reset(adapter);
782
783 hw_dbg(&adapter->hw, "register testing starting\n");
784 if (ixgbevf_reg_test(adapter, &data[0]))
785 eth_test->flags |= ETH_TEST_FL_FAILED;
786
787 ixgbevf_reset(adapter);
788
789 clear_bit(__IXGBEVF_TESTING, &adapter->state);
790 if (if_running)
791 ixgbevf_open(netdev);
792 } else {
793 hw_dbg(&adapter->hw, "online testing starting\n");
794 /* Online tests */
795 if (ixgbevf_link_test(adapter, &data[1]))
796 eth_test->flags |= ETH_TEST_FL_FAILED;
797
798 /* Online tests aren't run; pass by default */
799 data[0] = 0;
800
801 clear_bit(__IXGBEVF_TESTING, &adapter->state);
802 }
803 msleep_interruptible(4 * 1000);
804}
805
806static int ixgbevf_nway_reset(struct net_device *netdev)
807{
808 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
809
810 if (netif_running(netdev))
811 ixgbevf_reinit_locked(adapter);
812
813 return 0;
814}
815
816static int ixgbevf_get_coalesce(struct net_device *netdev,
817 struct ethtool_coalesce *ec)
818{
819 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
820
821 /* only valid if in constant ITR mode */
822 if (adapter->rx_itr_setting <= 1)
823 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
824 else
825 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
826
827 /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
828 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
829 return 0;
830
831 /* only valid if in constant ITR mode */
832 if (adapter->tx_itr_setting <= 1)
833 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
834 else
835 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
836
837 return 0;
838}
839
840static int ixgbevf_set_coalesce(struct net_device *netdev,
841 struct ethtool_coalesce *ec)
842{
843 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
844 struct ixgbevf_q_vector *q_vector;
845 int num_vectors, i;
846 u16 tx_itr_param, rx_itr_param;
847
848 /* don't accept Tx specific changes if we've got mixed RxTx vectors */
849 if (adapter->q_vector[0]->tx.count &&
850 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
851 return -EINVAL;
852
853 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
854 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
855 return -EINVAL;
856
857 if (ec->rx_coalesce_usecs > 1)
858 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
859 else
860 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
861
862 if (adapter->rx_itr_setting == 1)
863 rx_itr_param = IXGBE_20K_ITR;
864 else
865 rx_itr_param = adapter->rx_itr_setting;
866
867 if (ec->tx_coalesce_usecs > 1)
868 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
869 else
870 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
871
872 if (adapter->tx_itr_setting == 1)
873 tx_itr_param = IXGBE_12K_ITR;
874 else
875 tx_itr_param = adapter->tx_itr_setting;
876
877 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
878
879 for (i = 0; i < num_vectors; i++) {
880 q_vector = adapter->q_vector[i];
881 if (q_vector->tx.count && !q_vector->rx.count)
882 /* Tx only */
883 q_vector->itr = tx_itr_param;
884 else
885 /* Rx only or mixed */
886 q_vector->itr = rx_itr_param;
887 ixgbevf_write_eitr(q_vector);
888 }
889
890 return 0;
891}
892
893static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
894 u32 *rules __always_unused)
895{
896 struct ixgbevf_adapter *adapter = netdev_priv(dev);
897
898 switch (info->cmd) {
899 case ETHTOOL_GRXRINGS:
900 info->data = adapter->num_rx_queues;
901 return 0;
902 default:
903 hw_dbg(&adapter->hw, "Command parameters not supported\n");
904 return -EOPNOTSUPP;
905 }
906}
907
908static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
909{
910 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
911
912 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
913 return IXGBEVF_X550_VFRETA_SIZE;
914
915 return IXGBEVF_82599_RETA_SIZE;
916}
917
918static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
919{
920 return IXGBEVF_RSS_HASH_KEY_SIZE;
921}
922
923static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
924 u8 *hfunc)
925{
926 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
927 int err = 0;
928
929 if (hfunc)
930 *hfunc = ETH_RSS_HASH_TOP;
931
932 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
933 if (key)
934 memcpy(key, adapter->rss_key,
935 ixgbevf_get_rxfh_key_size(netdev));
936
937 if (indir) {
938 int i;
939
940 for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
941 indir[i] = adapter->rss_indir_tbl[i];
942 }
943 } else {
944 /* If neither indirection table nor hash key was requested
945 * - just return a success avoiding taking any locks.
946 */
947 if (!indir && !key)
948 return 0;
949
950 spin_lock_bh(&adapter->mbx_lock);
951 if (indir)
952 err = ixgbevf_get_reta_locked(&adapter->hw, indir,
953 adapter->num_rx_queues);
954
955 if (!err && key)
956 err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
957
958 spin_unlock_bh(&adapter->mbx_lock);
959 }
960
961 return err;
962}
963
964static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
965{
966 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
967 u32 priv_flags = 0;
968
969 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
970 priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
971
972 return priv_flags;
973}
974
975static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
976{
977 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
978 unsigned int flags = adapter->flags;
979
980 flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
981 if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
982 flags |= IXGBEVF_FLAGS_LEGACY_RX;
983
984 if (flags != adapter->flags) {
985 adapter->flags = flags;
986
987 /* reset interface to repopulate queues */
988 if (netif_running(netdev))
989 ixgbevf_reinit_locked(adapter);
990 }
991
992 return 0;
993}
994
995static const struct ethtool_ops ixgbevf_ethtool_ops = {
996 .get_drvinfo = ixgbevf_get_drvinfo,
997 .get_regs_len = ixgbevf_get_regs_len,
998 .get_regs = ixgbevf_get_regs,
999 .nway_reset = ixgbevf_nway_reset,
1000 .get_link = ethtool_op_get_link,
1001 .get_ringparam = ixgbevf_get_ringparam,
1002 .set_ringparam = ixgbevf_set_ringparam,
1003 .get_msglevel = ixgbevf_get_msglevel,
1004 .set_msglevel = ixgbevf_set_msglevel,
1005 .self_test = ixgbevf_diag_test,
1006 .get_sset_count = ixgbevf_get_sset_count,
1007 .get_strings = ixgbevf_get_strings,
1008 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
1009 .get_coalesce = ixgbevf_get_coalesce,
1010 .set_coalesce = ixgbevf_set_coalesce,
1011 .get_rxnfc = ixgbevf_get_rxnfc,
1012 .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
1013 .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
1014 .get_rxfh = ixgbevf_get_rxfh,
1015 .get_link_ksettings = ixgbevf_get_link_ksettings,
1016 .get_priv_flags = ixgbevf_get_priv_flags,
1017 .set_priv_flags = ixgbevf_set_priv_flags,
1018};
1019
1020void ixgbevf_set_ethtool_ops(struct net_device *netdev)
1021{
1022 netdev->ethtool_ops = &ixgbevf_ethtool_ops;
1023}
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* ethtool support for ixgbevf */
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/types.h>
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/ethtool.h>
38#include <linux/vmalloc.h>
39#include <linux/if_vlan.h>
40#include <linux/uaccess.h>
41
42#include "ixgbevf.h"
43
44#define IXGBE_ALL_RAR_ENTRIES 16
45
46#ifdef ETHTOOL_GSTATS
47struct ixgbe_stats {
48 char stat_string[ETH_GSTRING_LEN];
49 int sizeof_stat;
50 int stat_offset;
51 int base_stat_offset;
52 int saved_reset_offset;
53};
54
55#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
56 offsetof(struct ixgbevf_adapter, m), \
57 offsetof(struct ixgbevf_adapter, b), \
58 offsetof(struct ixgbevf_adapter, r)
59
60static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
61 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
62 stats.saved_reset_vfgprc)},
63 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
64 stats.saved_reset_vfgptc)},
65 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
66 stats.saved_reset_vfgorc)},
67 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
68 stats.saved_reset_vfgotc)},
69 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
70 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
71 stats.saved_reset_vfmprc)},
72 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
73 zero_base)},
74 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
75 zero_base)},
76 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
77 zero_base)},
78 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
79};
80
81#define IXGBE_QUEUE_STATS_LEN 0
82#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
83
84#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
85#endif /* ETHTOOL_GSTATS */
86#ifdef ETHTOOL_TEST
87static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
88 "Register test (offline)",
89 "Link test (on/offline)"
90};
91#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
92#endif /* ETHTOOL_TEST */
93
94static int ixgbevf_get_settings(struct net_device *netdev,
95 struct ethtool_cmd *ecmd)
96{
97 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
98 struct ixgbe_hw *hw = &adapter->hw;
99 u32 link_speed = 0;
100 bool link_up;
101
102 ecmd->supported = SUPPORTED_10000baseT_Full;
103 ecmd->autoneg = AUTONEG_DISABLE;
104 ecmd->transceiver = XCVR_DUMMY1;
105 ecmd->port = -1;
106
107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
108
109 if (link_up) {
110 __u32 speed = SPEED_10000;
111 switch (link_speed) {
112 case IXGBE_LINK_SPEED_10GB_FULL:
113 speed = SPEED_10000;
114 break;
115 case IXGBE_LINK_SPEED_1GB_FULL:
116 speed = SPEED_1000;
117 break;
118 case IXGBE_LINK_SPEED_100_FULL:
119 speed = SPEED_100;
120 break;
121 }
122
123 ethtool_cmd_speed_set(ecmd, speed);
124 ecmd->duplex = DUPLEX_FULL;
125 } else {
126 ethtool_cmd_speed_set(ecmd, -1);
127 ecmd->duplex = -1;
128 }
129
130 return 0;
131}
132
133static u32 ixgbevf_get_msglevel(struct net_device *netdev)
134{
135 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
136 return adapter->msg_enable;
137}
138
139static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
140{
141 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
142 adapter->msg_enable = data;
143}
144
145#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
146
147static char *ixgbevf_reg_names[] = {
148 "IXGBE_VFCTRL",
149 "IXGBE_VFSTATUS",
150 "IXGBE_VFLINKS",
151 "IXGBE_VFRXMEMWRAP",
152 "IXGBE_VFFRTIMER",
153 "IXGBE_VTEICR",
154 "IXGBE_VTEICS",
155 "IXGBE_VTEIMS",
156 "IXGBE_VTEIMC",
157 "IXGBE_VTEIAC",
158 "IXGBE_VTEIAM",
159 "IXGBE_VTEITR",
160 "IXGBE_VTIVAR",
161 "IXGBE_VTIVAR_MISC",
162 "IXGBE_VFRDBAL0",
163 "IXGBE_VFRDBAL1",
164 "IXGBE_VFRDBAH0",
165 "IXGBE_VFRDBAH1",
166 "IXGBE_VFRDLEN0",
167 "IXGBE_VFRDLEN1",
168 "IXGBE_VFRDH0",
169 "IXGBE_VFRDH1",
170 "IXGBE_VFRDT0",
171 "IXGBE_VFRDT1",
172 "IXGBE_VFRXDCTL0",
173 "IXGBE_VFRXDCTL1",
174 "IXGBE_VFSRRCTL0",
175 "IXGBE_VFSRRCTL1",
176 "IXGBE_VFPSRTYPE",
177 "IXGBE_VFTDBAL0",
178 "IXGBE_VFTDBAL1",
179 "IXGBE_VFTDBAH0",
180 "IXGBE_VFTDBAH1",
181 "IXGBE_VFTDLEN0",
182 "IXGBE_VFTDLEN1",
183 "IXGBE_VFTDH0",
184 "IXGBE_VFTDH1",
185 "IXGBE_VFTDT0",
186 "IXGBE_VFTDT1",
187 "IXGBE_VFTXDCTL0",
188 "IXGBE_VFTXDCTL1",
189 "IXGBE_VFTDWBAL0",
190 "IXGBE_VFTDWBAL1",
191 "IXGBE_VFTDWBAH0",
192 "IXGBE_VFTDWBAH1"
193};
194
195
196static int ixgbevf_get_regs_len(struct net_device *netdev)
197{
198 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
199}
200
201static void ixgbevf_get_regs(struct net_device *netdev,
202 struct ethtool_regs *regs,
203 void *p)
204{
205 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
206 struct ixgbe_hw *hw = &adapter->hw;
207 u32 *regs_buff = p;
208 u32 regs_len = ixgbevf_get_regs_len(netdev);
209 u8 i;
210
211 memset(p, 0, regs_len);
212
213 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
214
215 /* General Registers */
216 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
217 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
218 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
219 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
220 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
221
222 /* Interrupt */
223 /* don't read EICR because it can clear interrupt causes, instead
224 * read EICS which is a shadow but doesn't clear EICR */
225 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
226 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
227 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
228 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
229 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
230 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
231 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
232 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
233 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
234
235 /* Receive DMA */
236 for (i = 0; i < 2; i++)
237 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
238 for (i = 0; i < 2; i++)
239 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
240 for (i = 0; i < 2; i++)
241 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
242 for (i = 0; i < 2; i++)
243 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
244 for (i = 0; i < 2; i++)
245 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
246 for (i = 0; i < 2; i++)
247 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
248 for (i = 0; i < 2; i++)
249 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
250
251 /* Receive */
252 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
253
254 /* Transmit */
255 for (i = 0; i < 2; i++)
256 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
257 for (i = 0; i < 2; i++)
258 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
259 for (i = 0; i < 2; i++)
260 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
261 for (i = 0; i < 2; i++)
262 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
263 for (i = 0; i < 2; i++)
264 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
265 for (i = 0; i < 2; i++)
266 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
267 for (i = 0; i < 2; i++)
268 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
269 for (i = 0; i < 2; i++)
270 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
271
272 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
273 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
274}
275
276static void ixgbevf_get_drvinfo(struct net_device *netdev,
277 struct ethtool_drvinfo *drvinfo)
278{
279 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
280
281 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
282 strlcpy(drvinfo->version, ixgbevf_driver_version,
283 sizeof(drvinfo->version));
284 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
285 sizeof(drvinfo->bus_info));
286}
287
288static void ixgbevf_get_ringparam(struct net_device *netdev,
289 struct ethtool_ringparam *ring)
290{
291 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
292 struct ixgbevf_ring *tx_ring = adapter->tx_ring;
293 struct ixgbevf_ring *rx_ring = adapter->rx_ring;
294
295 ring->rx_max_pending = IXGBEVF_MAX_RXD;
296 ring->tx_max_pending = IXGBEVF_MAX_TXD;
297 ring->rx_pending = rx_ring->count;
298 ring->tx_pending = tx_ring->count;
299}
300
301static int ixgbevf_set_ringparam(struct net_device *netdev,
302 struct ethtool_ringparam *ring)
303{
304 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
305 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
306 int i, err = 0;
307 u32 new_rx_count, new_tx_count;
308
309 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
310 return -EINVAL;
311
312 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
313 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
314 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
315
316 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
317 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
318 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
319
320 if ((new_tx_count == adapter->tx_ring->count) &&
321 (new_rx_count == adapter->rx_ring->count)) {
322 /* nothing to do */
323 return 0;
324 }
325
326 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
327 msleep(1);
328
329 /*
330 * If the adapter isn't up and running then just set the
331 * new parameters and scurry for the exits.
332 */
333 if (!netif_running(adapter->netdev)) {
334 for (i = 0; i < adapter->num_tx_queues; i++)
335 adapter->tx_ring[i].count = new_tx_count;
336 for (i = 0; i < adapter->num_rx_queues; i++)
337 adapter->rx_ring[i].count = new_rx_count;
338 adapter->tx_ring_count = new_tx_count;
339 adapter->rx_ring_count = new_rx_count;
340 goto clear_reset;
341 }
342
343 tx_ring = kcalloc(adapter->num_tx_queues,
344 sizeof(struct ixgbevf_ring), GFP_KERNEL);
345 if (!tx_ring) {
346 err = -ENOMEM;
347 goto clear_reset;
348 }
349
350 rx_ring = kcalloc(adapter->num_rx_queues,
351 sizeof(struct ixgbevf_ring), GFP_KERNEL);
352 if (!rx_ring) {
353 err = -ENOMEM;
354 goto err_rx_setup;
355 }
356
357 ixgbevf_down(adapter);
358
359 memcpy(tx_ring, adapter->tx_ring,
360 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
361 for (i = 0; i < adapter->num_tx_queues; i++) {
362 tx_ring[i].count = new_tx_count;
363 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
364 if (err) {
365 while (i) {
366 i--;
367 ixgbevf_free_tx_resources(adapter,
368 &tx_ring[i]);
369 }
370 goto err_tx_ring_setup;
371 }
372 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
373 }
374
375 memcpy(rx_ring, adapter->rx_ring,
376 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
377 for (i = 0; i < adapter->num_rx_queues; i++) {
378 rx_ring[i].count = new_rx_count;
379 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
380 if (err) {
381 while (i) {
382 i--;
383 ixgbevf_free_rx_resources(adapter,
384 &rx_ring[i]);
385 }
386 goto err_rx_ring_setup;
387 }
388 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
389 }
390
391 /*
392 * Only switch to new rings if all the prior allocations
393 * and ring setups have succeeded.
394 */
395 kfree(adapter->tx_ring);
396 adapter->tx_ring = tx_ring;
397 adapter->tx_ring_count = new_tx_count;
398
399 kfree(adapter->rx_ring);
400 adapter->rx_ring = rx_ring;
401 adapter->rx_ring_count = new_rx_count;
402
403 /* success! */
404 ixgbevf_up(adapter);
405
406 goto clear_reset;
407
408err_rx_ring_setup:
409 for(i = 0; i < adapter->num_tx_queues; i++)
410 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
411
412err_tx_ring_setup:
413 kfree(rx_ring);
414
415err_rx_setup:
416 kfree(tx_ring);
417
418clear_reset:
419 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
420 return err;
421}
422
423static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
424{
425 switch (stringset) {
426 case ETH_SS_TEST:
427 return IXGBE_TEST_LEN;
428 case ETH_SS_STATS:
429 return IXGBE_GLOBAL_STATS_LEN;
430 default:
431 return -EINVAL;
432 }
433}
434
435static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
436 struct ethtool_stats *stats, u64 *data)
437{
438 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
439 int i;
440
441 ixgbevf_update_stats(adapter);
442 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
443 char *p = (char *)adapter +
444 ixgbe_gstrings_stats[i].stat_offset;
445 char *b = (char *)adapter +
446 ixgbe_gstrings_stats[i].base_stat_offset;
447 char *r = (char *)adapter +
448 ixgbe_gstrings_stats[i].saved_reset_offset;
449 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
450 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
451 ((ixgbe_gstrings_stats[i].sizeof_stat ==
452 sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
453 ((ixgbe_gstrings_stats[i].sizeof_stat ==
454 sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
455 }
456}
457
458static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
459 u8 *data)
460{
461 char *p = (char *)data;
462 int i;
463
464 switch (stringset) {
465 case ETH_SS_TEST:
466 memcpy(data, *ixgbe_gstrings_test,
467 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
468 break;
469 case ETH_SS_STATS:
470 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
471 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
472 ETH_GSTRING_LEN);
473 p += ETH_GSTRING_LEN;
474 }
475 break;
476 }
477}
478
479static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
480{
481 struct ixgbe_hw *hw = &adapter->hw;
482 bool link_up;
483 u32 link_speed = 0;
484 *data = 0;
485
486 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
487 if (!link_up)
488 *data = 1;
489
490 return *data;
491}
492
493/* ethtool register test data */
494struct ixgbevf_reg_test {
495 u16 reg;
496 u8 array_len;
497 u8 test_type;
498 u32 mask;
499 u32 write;
500};
501
502/* In the hardware, registers are laid out either singly, in arrays
503 * spaced 0x40 bytes apart, or in contiguous tables. We assume
504 * most tests take place on arrays or single registers (handled
505 * as a single-element array) and special-case the tables.
506 * Table tests are always pattern tests.
507 *
508 * We also make provision for some required setup steps by specifying
509 * registers to be written without any read-back testing.
510 */
511
512#define PATTERN_TEST 1
513#define SET_READ_TEST 2
514#define WRITE_NO_TEST 3
515#define TABLE32_TEST 4
516#define TABLE64_TEST_LO 5
517#define TABLE64_TEST_HI 6
518
519/* default VF register test */
520static const struct ixgbevf_reg_test reg_test_vf[] = {
521 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
522 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
523 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
524 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
525 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
526 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
527 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
528 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
529 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
530 { 0, 0, 0, 0 }
531};
532
533static const u32 register_test_patterns[] = {
534 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
535};
536
537#define REG_PATTERN_TEST(R, M, W) \
538{ \
539 u32 pat, val, before; \
540 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
541 before = readl(adapter->hw.hw_addr + R); \
542 writel((register_test_patterns[pat] & W), \
543 (adapter->hw.hw_addr + R)); \
544 val = readl(adapter->hw.hw_addr + R); \
545 if (val != (register_test_patterns[pat] & W & M)) { \
546 hw_dbg(&adapter->hw, \
547 "pattern test reg %04X failed: got " \
548 "0x%08X expected 0x%08X\n", \
549 R, val, (register_test_patterns[pat] & W & M)); \
550 *data = R; \
551 writel(before, adapter->hw.hw_addr + R); \
552 return 1; \
553 } \
554 writel(before, adapter->hw.hw_addr + R); \
555 } \
556}
557
558#define REG_SET_AND_CHECK(R, M, W) \
559{ \
560 u32 val, before; \
561 before = readl(adapter->hw.hw_addr + R); \
562 writel((W & M), (adapter->hw.hw_addr + R)); \
563 val = readl(adapter->hw.hw_addr + R); \
564 if ((W & M) != (val & M)) { \
565 pr_err("set/check reg %04X test failed: got 0x%08X expected " \
566 "0x%08X\n", R, (val & M), (W & M)); \
567 *data = R; \
568 writel(before, (adapter->hw.hw_addr + R)); \
569 return 1; \
570 } \
571 writel(before, (adapter->hw.hw_addr + R)); \
572}
573
574static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
575{
576 const struct ixgbevf_reg_test *test;
577 u32 i;
578
579 test = reg_test_vf;
580
581 /*
582 * Perform the register test, looping through the test table
583 * until we either fail or reach the null entry.
584 */
585 while (test->reg) {
586 for (i = 0; i < test->array_len; i++) {
587 switch (test->test_type) {
588 case PATTERN_TEST:
589 REG_PATTERN_TEST(test->reg + (i * 0x40),
590 test->mask,
591 test->write);
592 break;
593 case SET_READ_TEST:
594 REG_SET_AND_CHECK(test->reg + (i * 0x40),
595 test->mask,
596 test->write);
597 break;
598 case WRITE_NO_TEST:
599 writel(test->write,
600 (adapter->hw.hw_addr + test->reg)
601 + (i * 0x40));
602 break;
603 case TABLE32_TEST:
604 REG_PATTERN_TEST(test->reg + (i * 4),
605 test->mask,
606 test->write);
607 break;
608 case TABLE64_TEST_LO:
609 REG_PATTERN_TEST(test->reg + (i * 8),
610 test->mask,
611 test->write);
612 break;
613 case TABLE64_TEST_HI:
614 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
615 test->mask,
616 test->write);
617 break;
618 }
619 }
620 test++;
621 }
622
623 *data = 0;
624 return *data;
625}
626
627static void ixgbevf_diag_test(struct net_device *netdev,
628 struct ethtool_test *eth_test, u64 *data)
629{
630 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
631 bool if_running = netif_running(netdev);
632
633 set_bit(__IXGBEVF_TESTING, &adapter->state);
634 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
635 /* Offline tests */
636
637 hw_dbg(&adapter->hw, "offline testing starting\n");
638
639 /* Link test performed before hardware reset so autoneg doesn't
640 * interfere with test result */
641 if (ixgbevf_link_test(adapter, &data[1]))
642 eth_test->flags |= ETH_TEST_FL_FAILED;
643
644 if (if_running)
645 /* indicate we're in test mode */
646 dev_close(netdev);
647 else
648 ixgbevf_reset(adapter);
649
650 hw_dbg(&adapter->hw, "register testing starting\n");
651 if (ixgbevf_reg_test(adapter, &data[0]))
652 eth_test->flags |= ETH_TEST_FL_FAILED;
653
654 ixgbevf_reset(adapter);
655
656 clear_bit(__IXGBEVF_TESTING, &adapter->state);
657 if (if_running)
658 dev_open(netdev);
659 } else {
660 hw_dbg(&adapter->hw, "online testing starting\n");
661 /* Online tests */
662 if (ixgbevf_link_test(adapter, &data[1]))
663 eth_test->flags |= ETH_TEST_FL_FAILED;
664
665 /* Online tests aren't run; pass by default */
666 data[0] = 0;
667
668 clear_bit(__IXGBEVF_TESTING, &adapter->state);
669 }
670 msleep_interruptible(4 * 1000);
671}
672
673static int ixgbevf_nway_reset(struct net_device *netdev)
674{
675 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
676
677 if (netif_running(netdev)) {
678 if (!adapter->dev_closed)
679 ixgbevf_reinit_locked(adapter);
680 }
681
682 return 0;
683}
684
685static const struct ethtool_ops ixgbevf_ethtool_ops = {
686 .get_settings = ixgbevf_get_settings,
687 .get_drvinfo = ixgbevf_get_drvinfo,
688 .get_regs_len = ixgbevf_get_regs_len,
689 .get_regs = ixgbevf_get_regs,
690 .nway_reset = ixgbevf_nway_reset,
691 .get_link = ethtool_op_get_link,
692 .get_ringparam = ixgbevf_get_ringparam,
693 .set_ringparam = ixgbevf_set_ringparam,
694 .get_msglevel = ixgbevf_get_msglevel,
695 .set_msglevel = ixgbevf_set_msglevel,
696 .self_test = ixgbevf_diag_test,
697 .get_sset_count = ixgbevf_get_sset_count,
698 .get_strings = ixgbevf_get_strings,
699 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
700};
701
702void ixgbevf_set_ethtool_ops(struct net_device *netdev)
703{
704 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
705}