Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#ifdef CONFIG_DEBUG_FS
5
6#include <linux/fs.h>
7#include <linux/debugfs.h>
8#include <linux/if_bridge.h>
9#include "i40e.h"
10#include "i40e_virtchnl_pf.h"
11
12static struct dentry *i40e_dbg_root;
13
14enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18};
19
20/**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
25static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26{
27 int i;
28
29 if (seid < 0)
30 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
31 else
32 for (i = 0; i < pf->num_alloc_vsi; i++)
33 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
34 return pf->vsi[i];
35
36 return NULL;
37}
38
39/**
40 * i40e_dbg_find_veb - searches for the veb with the given seid
41 * @pf: the PF structure to search for the veb
42 * @seid: seid of the veb it is searching for
43 **/
44static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
45{
46 int i;
47
48 for (i = 0; i < I40E_MAX_VEB; i++)
49 if (pf->veb[i] && pf->veb[i]->seid == seid)
50 return pf->veb[i];
51 return NULL;
52}
53
54/**************************************************************
55 * command
56 * The command entry in debugfs is for giving the driver commands
57 * to be executed - these may be for changing the internal switch
58 * setup, adding or removing filters, or other things. Many of
59 * these will be useful for some forms of unit testing.
60 **************************************************************/
61static char i40e_dbg_command_buf[256] = "";
62
63/**
64 * i40e_dbg_command_read - read for command datum
65 * @filp: the opened file
66 * @buffer: where to write the data for the user to read
67 * @count: the size of the user's buffer
68 * @ppos: file position offset
69 **/
70static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
71 size_t count, loff_t *ppos)
72{
73 struct i40e_pf *pf = filp->private_data;
74 int bytes_not_copied;
75 int buf_size = 256;
76 char *buf;
77 int len;
78
79 /* don't allow partial reads */
80 if (*ppos != 0)
81 return 0;
82 if (count < buf_size)
83 return -ENOSPC;
84
85 buf = kzalloc(buf_size, GFP_KERNEL);
86 if (!buf)
87 return -ENOSPC;
88
89 len = snprintf(buf, buf_size, "%s: %s\n",
90 pf->vsi[pf->lan_vsi]->netdev->name,
91 i40e_dbg_command_buf);
92
93 bytes_not_copied = copy_to_user(buffer, buf, len);
94 kfree(buf);
95
96 if (bytes_not_copied)
97 return -EFAULT;
98
99 *ppos = len;
100 return len;
101}
102
103static char *i40e_filter_state_string[] = {
104 "INVALID",
105 "NEW",
106 "ACTIVE",
107 "FAILED",
108 "REMOVE",
109};
110
111/**
112 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
113 * @pf: the i40e_pf created in command write
114 * @seid: the seid the user put in
115 **/
116static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
117{
118 struct rtnl_link_stats64 *nstat;
119 struct i40e_mac_filter *f;
120 struct i40e_vsi *vsi;
121 int i, bkt;
122
123 vsi = i40e_dbg_find_vsi(pf, seid);
124 if (!vsi) {
125 dev_info(&pf->pdev->dev,
126 "dump %d: seid not found\n", seid);
127 return;
128 }
129 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
130 if (vsi->netdev) {
131 struct net_device *nd = vsi->netdev;
132
133 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
134 nd->name, nd->state, nd->flags);
135 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
136 (unsigned long int)nd->features);
137 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
138 (unsigned long int)nd->hw_features);
139 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
140 (unsigned long int)nd->vlan_features);
141 }
142 dev_info(&pf->pdev->dev,
143 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
144 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
145 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
146 dev_info(&pf->pdev->dev,
147 " state[%d] = %08lx\n",
148 i, vsi->state[i]);
149 if (vsi == pf->vsi[pf->lan_vsi])
150 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
151 pf->hw.mac.addr,
152 pf->hw.mac.port_addr);
153 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
154 dev_info(&pf->pdev->dev,
155 " mac_filter_hash: %pM vid=%d, state %s\n",
156 f->macaddr, f->vlan,
157 i40e_filter_state_string[f->state]);
158 }
159 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
160 vsi->active_filters, vsi->promisc_threshold,
161 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
162 "ON" : "OFF"));
163 nstat = i40e_get_vsi_stats_struct(vsi);
164 dev_info(&pf->pdev->dev,
165 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
166 (unsigned long int)nstat->rx_packets,
167 (unsigned long int)nstat->rx_bytes,
168 (unsigned long int)nstat->rx_errors,
169 (unsigned long int)nstat->rx_dropped);
170 dev_info(&pf->pdev->dev,
171 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
172 (unsigned long int)nstat->tx_packets,
173 (unsigned long int)nstat->tx_bytes,
174 (unsigned long int)nstat->tx_errors,
175 (unsigned long int)nstat->tx_dropped);
176 dev_info(&pf->pdev->dev,
177 " net_stats: multicast = %lu, collisions = %lu\n",
178 (unsigned long int)nstat->multicast,
179 (unsigned long int)nstat->collisions);
180 dev_info(&pf->pdev->dev,
181 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
182 (unsigned long int)nstat->rx_length_errors,
183 (unsigned long int)nstat->rx_over_errors,
184 (unsigned long int)nstat->rx_crc_errors);
185 dev_info(&pf->pdev->dev,
186 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
187 (unsigned long int)nstat->rx_frame_errors,
188 (unsigned long int)nstat->rx_fifo_errors,
189 (unsigned long int)nstat->rx_missed_errors);
190 dev_info(&pf->pdev->dev,
191 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
192 (unsigned long int)nstat->tx_aborted_errors,
193 (unsigned long int)nstat->tx_carrier_errors,
194 (unsigned long int)nstat->tx_fifo_errors);
195 dev_info(&pf->pdev->dev,
196 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
197 (unsigned long int)nstat->tx_heartbeat_errors,
198 (unsigned long int)nstat->tx_window_errors);
199 dev_info(&pf->pdev->dev,
200 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
201 (unsigned long int)nstat->rx_compressed,
202 (unsigned long int)nstat->tx_compressed);
203 dev_info(&pf->pdev->dev,
204 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
205 (unsigned long int)vsi->net_stats_offsets.rx_packets,
206 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
207 (unsigned long int)vsi->net_stats_offsets.rx_errors,
208 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
209 dev_info(&pf->pdev->dev,
210 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
211 (unsigned long int)vsi->net_stats_offsets.tx_packets,
212 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
213 (unsigned long int)vsi->net_stats_offsets.tx_errors,
214 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
215 dev_info(&pf->pdev->dev,
216 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
217 (unsigned long int)vsi->net_stats_offsets.multicast,
218 (unsigned long int)vsi->net_stats_offsets.collisions);
219 dev_info(&pf->pdev->dev,
220 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
221 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
222 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
223 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
224 dev_info(&pf->pdev->dev,
225 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
226 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
227 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
228 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
229 dev_info(&pf->pdev->dev,
230 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
231 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
232 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
233 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
234 dev_info(&pf->pdev->dev,
235 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
236 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
237 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
238 dev_info(&pf->pdev->dev,
239 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
240 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
241 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
242 dev_info(&pf->pdev->dev,
243 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
244 vsi->tx_restart, vsi->tx_busy,
245 vsi->rx_buf_failed, vsi->rx_page_failed);
246 rcu_read_lock();
247 for (i = 0; i < vsi->num_queue_pairs; i++) {
248 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
249
250 if (!rx_ring)
251 continue;
252
253 dev_info(&pf->pdev->dev,
254 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
255 i, *rx_ring->state,
256 rx_ring->queue_index,
257 rx_ring->reg_idx);
258 dev_info(&pf->pdev->dev,
259 " rx_rings[%i]: rx_buf_len = %d\n",
260 i, rx_ring->rx_buf_len);
261 dev_info(&pf->pdev->dev,
262 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
263 i,
264 rx_ring->next_to_use,
265 rx_ring->next_to_clean,
266 rx_ring->ring_active);
267 dev_info(&pf->pdev->dev,
268 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
269 i, rx_ring->stats.packets,
270 rx_ring->stats.bytes,
271 rx_ring->rx_stats.non_eop_descs);
272 dev_info(&pf->pdev->dev,
273 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
274 i,
275 rx_ring->rx_stats.alloc_page_failed,
276 rx_ring->rx_stats.alloc_buff_failed);
277 dev_info(&pf->pdev->dev,
278 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
279 i,
280 rx_ring->rx_stats.page_reuse_count);
281 dev_info(&pf->pdev->dev,
282 " rx_rings[%i]: size = %i\n",
283 i, rx_ring->size);
284 dev_info(&pf->pdev->dev,
285 " rx_rings[%i]: itr_setting = %d (%s)\n",
286 i, rx_ring->itr_setting,
287 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
288 }
289 for (i = 0; i < vsi->num_queue_pairs; i++) {
290 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
291
292 if (!tx_ring)
293 continue;
294
295 dev_info(&pf->pdev->dev,
296 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
297 i, *tx_ring->state,
298 tx_ring->queue_index,
299 tx_ring->reg_idx);
300 dev_info(&pf->pdev->dev,
301 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
302 i,
303 tx_ring->next_to_use,
304 tx_ring->next_to_clean,
305 tx_ring->ring_active);
306 dev_info(&pf->pdev->dev,
307 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
308 i, tx_ring->stats.packets,
309 tx_ring->stats.bytes,
310 tx_ring->tx_stats.restart_queue);
311 dev_info(&pf->pdev->dev,
312 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
313 i,
314 tx_ring->tx_stats.tx_busy,
315 tx_ring->tx_stats.tx_done_old,
316 tx_ring->tx_stats.tx_stopped);
317 dev_info(&pf->pdev->dev,
318 " tx_rings[%i]: size = %i\n",
319 i, tx_ring->size);
320 dev_info(&pf->pdev->dev,
321 " tx_rings[%i]: DCB tc = %d\n",
322 i, tx_ring->dcb_tc);
323 dev_info(&pf->pdev->dev,
324 " tx_rings[%i]: itr_setting = %d (%s)\n",
325 i, tx_ring->itr_setting,
326 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
327 }
328 if (i40e_enabled_xdp_vsi(vsi)) {
329 for (i = 0; i < vsi->num_queue_pairs; i++) {
330 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
331
332 if (!xdp_ring)
333 continue;
334
335 dev_info(&pf->pdev->dev,
336 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
337 i, *xdp_ring->state,
338 xdp_ring->queue_index,
339 xdp_ring->reg_idx);
340 dev_info(&pf->pdev->dev,
341 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
342 i,
343 xdp_ring->next_to_use,
344 xdp_ring->next_to_clean,
345 xdp_ring->ring_active);
346 dev_info(&pf->pdev->dev,
347 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
348 i, xdp_ring->stats.packets,
349 xdp_ring->stats.bytes,
350 xdp_ring->tx_stats.restart_queue);
351 dev_info(&pf->pdev->dev,
352 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
353 i,
354 xdp_ring->tx_stats.tx_busy,
355 xdp_ring->tx_stats.tx_done_old);
356 dev_info(&pf->pdev->dev,
357 " xdp_rings[%i]: size = %i\n",
358 i, xdp_ring->size);
359 dev_info(&pf->pdev->dev,
360 " xdp_rings[%i]: DCB tc = %d\n",
361 i, xdp_ring->dcb_tc);
362 dev_info(&pf->pdev->dev,
363 " xdp_rings[%i]: itr_setting = %d (%s)\n",
364 i, xdp_ring->itr_setting,
365 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
366 "dynamic" : "fixed");
367 }
368 }
369 rcu_read_unlock();
370 dev_info(&pf->pdev->dev,
371 " work_limit = %d\n",
372 vsi->work_limit);
373 dev_info(&pf->pdev->dev,
374 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
375 vsi->max_frame, vsi->rx_buf_len, 0);
376 dev_info(&pf->pdev->dev,
377 " num_q_vectors = %i, base_vector = %i\n",
378 vsi->num_q_vectors, vsi->base_vector);
379 dev_info(&pf->pdev->dev,
380 " seid = %d, id = %d, uplink_seid = %d\n",
381 vsi->seid, vsi->id, vsi->uplink_seid);
382 dev_info(&pf->pdev->dev,
383 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
384 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
385 vsi->num_rx_desc);
386 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
387 if (vsi->type == I40E_VSI_SRIOV)
388 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
389 dev_info(&pf->pdev->dev,
390 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
391 vsi->info.valid_sections, vsi->info.switch_id);
392 dev_info(&pf->pdev->dev,
393 " info: sw_reserved[] = 0x%02x 0x%02x\n",
394 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
395 dev_info(&pf->pdev->dev,
396 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
397 vsi->info.sec_flags, vsi->info.sec_reserved);
398 dev_info(&pf->pdev->dev,
399 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
400 vsi->info.pvid, vsi->info.fcoe_pvid,
401 vsi->info.port_vlan_flags);
402 dev_info(&pf->pdev->dev,
403 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
404 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
405 vsi->info.pvlan_reserved[2]);
406 dev_info(&pf->pdev->dev,
407 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
408 vsi->info.ingress_table, vsi->info.egress_table);
409 dev_info(&pf->pdev->dev,
410 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
411 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
412 vsi->info.cas_pv_reserved);
413 dev_info(&pf->pdev->dev,
414 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
415 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
416 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
417 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
418 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
419 dev_info(&pf->pdev->dev,
420 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
421 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
422 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
423 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
424 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
425 dev_info(&pf->pdev->dev,
426 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
427 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
428 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
429 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
430 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
431 dev_info(&pf->pdev->dev,
432 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
433 vsi->info.queueing_opt_flags,
434 vsi->info.queueing_opt_reserved[0],
435 vsi->info.queueing_opt_reserved[1],
436 vsi->info.queueing_opt_reserved[2]);
437 dev_info(&pf->pdev->dev,
438 " info: up_enable_bits = 0x%02x\n",
439 vsi->info.up_enable_bits);
440 dev_info(&pf->pdev->dev,
441 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
442 vsi->info.sched_reserved, vsi->info.outer_up_table);
443 dev_info(&pf->pdev->dev,
444 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
445 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
446 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
447 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
448 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
449 dev_info(&pf->pdev->dev,
450 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
451 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
452 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
453 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
454 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
455 dev_info(&pf->pdev->dev,
456 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
457 vsi->info.stat_counter_idx, vsi->info.sched_id);
458 dev_info(&pf->pdev->dev,
459 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
460 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
461 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
462 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
463 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
464 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
465 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
466 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
467 dev_info(&pf->pdev->dev,
468 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
469 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
470 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
471 dev_info(&pf->pdev->dev,
472 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
473 i, vsi->tc_config.tc_info[i].qoffset,
474 vsi->tc_config.tc_info[i].qcount,
475 vsi->tc_config.tc_info[i].netdev_tc);
476 }
477 dev_info(&pf->pdev->dev,
478 " bw: bw_limit = %d, bw_max_quanta = %d\n",
479 vsi->bw_limit, vsi->bw_max_quanta);
480 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
481 dev_info(&pf->pdev->dev,
482 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
483 i, vsi->bw_ets_share_credits[i],
484 vsi->bw_ets_limit_credits[i],
485 vsi->bw_ets_max_quanta[i]);
486 }
487}
488
489/**
490 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
491 * @pf: the i40e_pf created in command write
492 **/
493static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
494{
495 struct i40e_adminq_ring *ring;
496 struct i40e_hw *hw = &pf->hw;
497 char hdr[32];
498 int i;
499
500 snprintf(hdr, sizeof(hdr), "%s %s: ",
501 dev_driver_string(&pf->pdev->dev),
502 dev_name(&pf->pdev->dev));
503
504 /* first the send (command) ring, then the receive (event) ring */
505 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
506 ring = &(hw->aq.asq);
507 for (i = 0; i < ring->count; i++) {
508 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
509
510 dev_info(&pf->pdev->dev,
511 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
512 i, d->flags, d->opcode, d->datalen, d->retval,
513 d->cookie_high, d->cookie_low);
514 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
515 16, 1, d->params.raw, 16, 0);
516 }
517
518 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
519 ring = &(hw->aq.arq);
520 for (i = 0; i < ring->count; i++) {
521 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
522
523 dev_info(&pf->pdev->dev,
524 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
525 i, d->flags, d->opcode, d->datalen, d->retval,
526 d->cookie_high, d->cookie_low);
527 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
528 16, 1, d->params.raw, 16, 0);
529 }
530}
531
532/**
533 * i40e_dbg_dump_desc - handles dump desc write into command datum
534 * @cnt: number of arguments that the user supplied
535 * @vsi_seid: vsi id entered by user
536 * @ring_id: ring id entered by user
537 * @desc_n: descriptor number entered by user
538 * @pf: the i40e_pf created in command write
539 * @type: enum describing whether ring is RX, TX or XDP
540 **/
541static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
542 struct i40e_pf *pf, enum ring_type type)
543{
544 bool is_rx_ring = type == RING_TYPE_RX;
545 struct i40e_tx_desc *txd;
546 union i40e_rx_desc *rxd;
547 struct i40e_ring *ring;
548 struct i40e_vsi *vsi;
549 int i;
550
551 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
552 if (!vsi) {
553 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
554 return;
555 }
556 if (vsi->type != I40E_VSI_MAIN &&
557 vsi->type != I40E_VSI_FDIR &&
558 vsi->type != I40E_VSI_VMDQ2) {
559 dev_info(&pf->pdev->dev,
560 "vsi %d type %d descriptor rings not available\n",
561 vsi_seid, vsi->type);
562 return;
563 }
564 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
565 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
566 return;
567 }
568 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
569 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
570 return;
571 }
572 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
573 dev_info(&pf->pdev->dev,
574 "descriptor rings have not been allocated for vsi %d\n",
575 vsi_seid);
576 return;
577 }
578
579 switch (type) {
580 case RING_TYPE_RX:
581 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
582 break;
583 case RING_TYPE_TX:
584 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
585 break;
586 case RING_TYPE_XDP:
587 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
588 break;
589 default:
590 ring = NULL;
591 break;
592 }
593 if (!ring)
594 return;
595
596 if (cnt == 2) {
597 switch (type) {
598 case RING_TYPE_RX:
599 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
600 break;
601 case RING_TYPE_TX:
602 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
603 break;
604 case RING_TYPE_XDP:
605 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
606 break;
607 }
608 for (i = 0; i < ring->count; i++) {
609 if (!is_rx_ring) {
610 txd = I40E_TX_DESC(ring, i);
611 dev_info(&pf->pdev->dev,
612 " d[%03x] = 0x%016llx 0x%016llx\n",
613 i, txd->buffer_addr,
614 txd->cmd_type_offset_bsz);
615 } else {
616 rxd = I40E_RX_DESC(ring, i);
617 dev_info(&pf->pdev->dev,
618 " d[%03x] = 0x%016llx 0x%016llx\n",
619 i, rxd->read.pkt_addr,
620 rxd->read.hdr_addr);
621 }
622 }
623 } else if (cnt == 3) {
624 if (desc_n >= ring->count || desc_n < 0) {
625 dev_info(&pf->pdev->dev,
626 "descriptor %d not found\n", desc_n);
627 goto out;
628 }
629 if (!is_rx_ring) {
630 txd = I40E_TX_DESC(ring, desc_n);
631 dev_info(&pf->pdev->dev,
632 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
633 vsi_seid, ring_id, desc_n,
634 txd->buffer_addr, txd->cmd_type_offset_bsz);
635 } else {
636 rxd = I40E_RX_DESC(ring, desc_n);
637 dev_info(&pf->pdev->dev,
638 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
639 vsi_seid, ring_id, desc_n,
640 rxd->read.pkt_addr, rxd->read.hdr_addr);
641 }
642 } else {
643 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
644 }
645
646out:
647 kfree(ring);
648}
649
650/**
651 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
652 * @pf: the i40e_pf created in command write
653 **/
654static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
655{
656 int i;
657
658 for (i = 0; i < pf->num_alloc_vsi; i++)
659 if (pf->vsi[i])
660 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
661 i, pf->vsi[i]->seid);
662}
663
664/**
665 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
666 * @pf: the i40e_pf created in command write
667 * @estats: the eth stats structure to be dumped
668 **/
669static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
670 struct i40e_eth_stats *estats)
671{
672 dev_info(&pf->pdev->dev, " ethstats:\n");
673 dev_info(&pf->pdev->dev,
674 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
675 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
676 dev_info(&pf->pdev->dev,
677 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
678 estats->rx_broadcast, estats->rx_discards);
679 dev_info(&pf->pdev->dev,
680 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
681 estats->rx_unknown_protocol, estats->tx_bytes);
682 dev_info(&pf->pdev->dev,
683 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
684 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
685 dev_info(&pf->pdev->dev,
686 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
687 estats->tx_discards, estats->tx_errors);
688}
689
690/**
691 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
692 * @pf: the i40e_pf created in command write
693 * @seid: the seid the user put in
694 **/
695static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
696{
697 struct i40e_veb *veb;
698
699 veb = i40e_dbg_find_veb(pf, seid);
700 if (!veb) {
701 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
702 return;
703 }
704 dev_info(&pf->pdev->dev,
705 "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
706 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
707 veb->uplink_seid,
708 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
709 i40e_dbg_dump_eth_stats(pf, &veb->stats);
710}
711
712/**
713 * i40e_dbg_dump_veb_all - dumps all known veb's stats
714 * @pf: the i40e_pf created in command write
715 **/
716static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
717{
718 struct i40e_veb *veb;
719 int i;
720
721 for (i = 0; i < I40E_MAX_VEB; i++) {
722 veb = pf->veb[i];
723 if (veb)
724 i40e_dbg_dump_veb_seid(pf, veb->seid);
725 }
726}
727
728/**
729 * i40e_dbg_dump_vf - dump VF info
730 * @pf: the i40e_pf created in command write
731 * @vf_id: the vf_id from the user
732 **/
733static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
734{
735 struct i40e_vf *vf;
736 struct i40e_vsi *vsi;
737
738 if (!pf->num_alloc_vfs) {
739 dev_info(&pf->pdev->dev, "no VFs allocated\n");
740 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
741 vf = &pf->vf[vf_id];
742 vsi = pf->vsi[vf->lan_vsi_idx];
743 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
744 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
745 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
746 vf->num_mdd_events);
747 } else {
748 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
749 }
750}
751
752/**
753 * i40e_dbg_dump_vf_all - dump VF info for all VFs
754 * @pf: the i40e_pf created in command write
755 **/
756static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
757{
758 int i;
759
760 if (!pf->num_alloc_vfs)
761 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
762 else
763 for (i = 0; i < pf->num_alloc_vfs; i++)
764 i40e_dbg_dump_vf(pf, i);
765}
766
767/**
768 * i40e_dbg_command_write - write into command datum
769 * @filp: the opened file
770 * @buffer: where to find the user's data
771 * @count: the length of the user's data
772 * @ppos: file position offset
773 **/
774static ssize_t i40e_dbg_command_write(struct file *filp,
775 const char __user *buffer,
776 size_t count, loff_t *ppos)
777{
778 struct i40e_pf *pf = filp->private_data;
779 char *cmd_buf, *cmd_buf_tmp;
780 int bytes_not_copied;
781 struct i40e_vsi *vsi;
782 int vsi_seid;
783 int veb_seid;
784 int vf_id;
785 int cnt;
786
787 /* don't allow partial writes */
788 if (*ppos != 0)
789 return 0;
790
791 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
792 if (!cmd_buf)
793 return count;
794 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
795 if (bytes_not_copied) {
796 kfree(cmd_buf);
797 return -EFAULT;
798 }
799 cmd_buf[count] = '\0';
800
801 cmd_buf_tmp = strchr(cmd_buf, '\n');
802 if (cmd_buf_tmp) {
803 *cmd_buf_tmp = '\0';
804 count = cmd_buf_tmp - cmd_buf + 1;
805 }
806
807 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
808 vsi_seid = -1;
809 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
810 if (cnt == 0) {
811 /* default to PF VSI */
812 vsi_seid = pf->vsi[pf->lan_vsi]->seid;
813 } else if (vsi_seid < 0) {
814 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
815 vsi_seid);
816 goto command_write_done;
817 }
818
819 /* By default we are in VEPA mode, if this is the first VF/VMDq
820 * VSI to be added switch to VEB mode.
821 */
822 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
823 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
824 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
825 }
826
827 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
828 if (vsi)
829 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
830 vsi->seid, vsi->uplink_seid);
831 else
832 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
833
834 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
835 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
836 if (cnt != 1) {
837 dev_info(&pf->pdev->dev,
838 "del vsi: bad command string, cnt=%d\n",
839 cnt);
840 goto command_write_done;
841 }
842 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
843 if (!vsi) {
844 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
845 vsi_seid);
846 goto command_write_done;
847 }
848
849 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
850 i40e_vsi_release(vsi);
851
852 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
853 struct i40e_veb *veb;
854 int uplink_seid, i;
855
856 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
857 if (cnt != 2) {
858 dev_info(&pf->pdev->dev,
859 "add relay: bad command string, cnt=%d\n",
860 cnt);
861 goto command_write_done;
862 } else if (uplink_seid < 0) {
863 dev_info(&pf->pdev->dev,
864 "add relay %d: bad uplink seid\n",
865 uplink_seid);
866 goto command_write_done;
867 }
868
869 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
870 if (!vsi) {
871 dev_info(&pf->pdev->dev,
872 "add relay: VSI %d not found\n", vsi_seid);
873 goto command_write_done;
874 }
875
876 for (i = 0; i < I40E_MAX_VEB; i++)
877 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
878 break;
879 if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
880 uplink_seid != pf->mac_seid) {
881 dev_info(&pf->pdev->dev,
882 "add relay: relay uplink %d not found\n",
883 uplink_seid);
884 goto command_write_done;
885 }
886
887 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
888 vsi->tc_config.enabled_tc);
889 if (veb)
890 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
891 else
892 dev_info(&pf->pdev->dev, "add relay failed\n");
893
894 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
895 int i;
896 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
897 if (cnt != 1) {
898 dev_info(&pf->pdev->dev,
899 "del relay: bad command string, cnt=%d\n",
900 cnt);
901 goto command_write_done;
902 } else if (veb_seid < 0) {
903 dev_info(&pf->pdev->dev,
904 "del relay %d: bad relay seid\n", veb_seid);
905 goto command_write_done;
906 }
907
908 /* find the veb */
909 for (i = 0; i < I40E_MAX_VEB; i++)
910 if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
911 break;
912 if (i >= I40E_MAX_VEB) {
913 dev_info(&pf->pdev->dev,
914 "del relay: relay %d not found\n", veb_seid);
915 goto command_write_done;
916 }
917
918 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
919 i40e_veb_release(pf->veb[i]);
920 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
921 unsigned int v;
922 int ret;
923 u16 vid;
924
925 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
926 if (cnt != 2) {
927 dev_info(&pf->pdev->dev,
928 "add pvid: bad command string, cnt=%d\n", cnt);
929 goto command_write_done;
930 }
931
932 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
933 if (!vsi) {
934 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
935 vsi_seid);
936 goto command_write_done;
937 }
938
939 vid = v;
940 ret = i40e_vsi_add_pvid(vsi, vid);
941 if (!ret)
942 dev_info(&pf->pdev->dev,
943 "add pvid: %d added to VSI %d\n",
944 vid, vsi_seid);
945 else
946 dev_info(&pf->pdev->dev,
947 "add pvid: %d to VSI %d failed, ret=%d\n",
948 vid, vsi_seid, ret);
949
950 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
951
952 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
953 if (cnt != 1) {
954 dev_info(&pf->pdev->dev,
955 "del pvid: bad command string, cnt=%d\n",
956 cnt);
957 goto command_write_done;
958 }
959
960 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
961 if (!vsi) {
962 dev_info(&pf->pdev->dev,
963 "del pvid: VSI %d not found\n", vsi_seid);
964 goto command_write_done;
965 }
966
967 i40e_vsi_remove_pvid(vsi);
968 dev_info(&pf->pdev->dev,
969 "del pvid: removed from VSI %d\n", vsi_seid);
970
971 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
972 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
973 i40e_fetch_switch_configuration(pf, true);
974 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
975 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
976 if (cnt > 0)
977 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
978 else
979 i40e_dbg_dump_vsi_no_seid(pf);
980 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
981 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
982 if (cnt > 0)
983 i40e_dbg_dump_veb_seid(pf, vsi_seid);
984 else
985 i40e_dbg_dump_veb_all(pf);
986 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
987 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
988 if (cnt > 0)
989 i40e_dbg_dump_vf(pf, vf_id);
990 else
991 i40e_dbg_dump_vf_all(pf);
992 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
993 int ring_id, desc_n;
994 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
995 cnt = sscanf(&cmd_buf[12], "%i %i %i",
996 &vsi_seid, &ring_id, &desc_n);
997 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
998 desc_n, pf, RING_TYPE_RX);
999 } else if (strncmp(&cmd_buf[10], "tx", 2)
1000 == 0) {
1001 cnt = sscanf(&cmd_buf[12], "%i %i %i",
1002 &vsi_seid, &ring_id, &desc_n);
1003 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1004 desc_n, pf, RING_TYPE_TX);
1005 } else if (strncmp(&cmd_buf[10], "xdp", 3)
1006 == 0) {
1007 cnt = sscanf(&cmd_buf[13], "%i %i %i",
1008 &vsi_seid, &ring_id, &desc_n);
1009 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1010 desc_n, pf, RING_TYPE_XDP);
1011 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
1012 i40e_dbg_dump_aq_desc(pf);
1013 } else {
1014 dev_info(&pf->pdev->dev,
1015 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1016 dev_info(&pf->pdev->dev,
1017 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1018 dev_info(&pf->pdev->dev,
1019 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1020 dev_info(&pf->pdev->dev, "dump desc aq\n");
1021 }
1022 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1023 dev_info(&pf->pdev->dev,
1024 "core reset count: %d\n", pf->corer_count);
1025 dev_info(&pf->pdev->dev,
1026 "global reset count: %d\n", pf->globr_count);
1027 dev_info(&pf->pdev->dev,
1028 "emp reset count: %d\n", pf->empr_count);
1029 dev_info(&pf->pdev->dev,
1030 "pf reset count: %d\n", pf->pfr_count);
1031 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1032 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1033 struct i40e_dcbx_config *cfg =
1034 &pf->hw.local_dcbx_config;
1035 struct i40e_dcbx_config *r_cfg =
1036 &pf->hw.remote_dcbx_config;
1037 int i, ret;
1038 u16 switch_id;
1039
1040 bw_data = kzalloc(sizeof(
1041 struct i40e_aqc_query_port_ets_config_resp),
1042 GFP_KERNEL);
1043 if (!bw_data) {
1044 ret = -ENOMEM;
1045 goto command_write_done;
1046 }
1047
1048 vsi = pf->vsi[pf->lan_vsi];
1049 switch_id =
1050 le16_to_cpu(vsi->info.switch_id) &
1051 I40E_AQ_VSI_SW_ID_MASK;
1052
1053 ret = i40e_aq_query_port_ets_config(&pf->hw,
1054 switch_id,
1055 bw_data, NULL);
1056 if (ret) {
1057 dev_info(&pf->pdev->dev,
1058 "Query Port ETS Config AQ command failed =0x%x\n",
1059 pf->hw.aq.asq_last_status);
1060 kfree(bw_data);
1061 bw_data = NULL;
1062 goto command_write_done;
1063 }
1064 dev_info(&pf->pdev->dev,
1065 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1066 bw_data->tc_valid_bits,
1067 bw_data->tc_strict_priority_bits,
1068 le16_to_cpu(bw_data->tc_bw_max[0]),
1069 le16_to_cpu(bw_data->tc_bw_max[1]));
1070 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1071 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1072 bw_data->tc_bw_share_credits[i],
1073 le16_to_cpu(bw_data->tc_bw_limits[i]));
1074 }
1075
1076 kfree(bw_data);
1077 bw_data = NULL;
1078
1079 dev_info(&pf->pdev->dev,
1080 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1081 dev_info(&pf->pdev->dev,
1082 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1083 cfg->etscfg.willing, cfg->etscfg.cbs,
1084 cfg->etscfg.maxtcs);
1085 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1086 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1087 i, cfg->etscfg.prioritytable[i],
1088 cfg->etscfg.tcbwtable[i],
1089 cfg->etscfg.tsatable[i]);
1090 }
1091 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1092 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1093 i, cfg->etsrec.prioritytable[i],
1094 cfg->etsrec.tcbwtable[i],
1095 cfg->etsrec.tsatable[i]);
1096 }
1097 dev_info(&pf->pdev->dev,
1098 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1099 cfg->pfc.willing, cfg->pfc.mbc,
1100 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1101 dev_info(&pf->pdev->dev,
1102 "port app_table: num_apps=%d\n", cfg->numapps);
1103 for (i = 0; i < cfg->numapps; i++) {
1104 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1105 i, cfg->app[i].priority,
1106 cfg->app[i].selector,
1107 cfg->app[i].protocolid);
1108 }
1109 /* Peer TLV DCBX data */
1110 dev_info(&pf->pdev->dev,
1111 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1112 r_cfg->etscfg.willing,
1113 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1114 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1115 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1116 i, r_cfg->etscfg.prioritytable[i],
1117 r_cfg->etscfg.tcbwtable[i],
1118 r_cfg->etscfg.tsatable[i]);
1119 }
1120 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1121 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1122 i, r_cfg->etsrec.prioritytable[i],
1123 r_cfg->etsrec.tcbwtable[i],
1124 r_cfg->etsrec.tsatable[i]);
1125 }
1126 dev_info(&pf->pdev->dev,
1127 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1128 r_cfg->pfc.willing,
1129 r_cfg->pfc.mbc,
1130 r_cfg->pfc.pfccap,
1131 r_cfg->pfc.pfcenable);
1132 dev_info(&pf->pdev->dev,
1133 "remote port app_table: num_apps=%d\n",
1134 r_cfg->numapps);
1135 for (i = 0; i < r_cfg->numapps; i++) {
1136 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1137 i, r_cfg->app[i].priority,
1138 r_cfg->app[i].selector,
1139 r_cfg->app[i].protocolid);
1140 }
1141 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1142 int cluster_id, table_id;
1143 int index, ret;
1144 u16 buff_len = 4096;
1145 u32 next_index;
1146 u8 next_table;
1147 u8 *buff;
1148 u16 rlen;
1149
1150 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1151 &cluster_id, &table_id, &index);
1152 if (cnt != 3) {
1153 dev_info(&pf->pdev->dev,
1154 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1155 goto command_write_done;
1156 }
1157
1158 dev_info(&pf->pdev->dev,
1159 "AQ debug dump fwdata params %x %x %x %x\n",
1160 cluster_id, table_id, index, buff_len);
1161 buff = kzalloc(buff_len, GFP_KERNEL);
1162 if (!buff)
1163 goto command_write_done;
1164
1165 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1166 index, buff_len, buff, &rlen,
1167 &next_table, &next_index,
1168 NULL);
1169 if (ret) {
1170 dev_info(&pf->pdev->dev,
1171 "debug dump fwdata AQ Failed %d 0x%x\n",
1172 ret, pf->hw.aq.asq_last_status);
1173 kfree(buff);
1174 buff = NULL;
1175 goto command_write_done;
1176 }
1177 dev_info(&pf->pdev->dev,
1178 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1179 rlen, next_table, next_index);
1180 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1181 DUMP_PREFIX_OFFSET, 16, 1,
1182 buff, rlen, true);
1183 kfree(buff);
1184 buff = NULL;
1185 } else {
1186 dev_info(&pf->pdev->dev,
1187 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1188 dev_info(&pf->pdev->dev, "dump switch\n");
1189 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1190 dev_info(&pf->pdev->dev, "dump reset stats\n");
1191 dev_info(&pf->pdev->dev, "dump port\n");
1192 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1193 dev_info(&pf->pdev->dev,
1194 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1195 }
1196 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1197 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1198 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1199
1200 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1201 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1202 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1203
1204 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1205 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1206 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1207
1208 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1209 u32 address;
1210 u32 value;
1211
1212 cnt = sscanf(&cmd_buf[4], "%i", &address);
1213 if (cnt != 1) {
1214 dev_info(&pf->pdev->dev, "read <reg>\n");
1215 goto command_write_done;
1216 }
1217
1218 /* check the range on address */
1219 if (address > (pf->ioremap_len - sizeof(u32))) {
1220 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1221 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1222 goto command_write_done;
1223 }
1224
1225 value = rd32(&pf->hw, address);
1226 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1227 address, value);
1228
1229 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1230 u32 address, value;
1231
1232 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1233 if (cnt != 2) {
1234 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1235 goto command_write_done;
1236 }
1237
1238 /* check the range on address */
1239 if (address > (pf->ioremap_len - sizeof(u32))) {
1240 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1241 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1242 goto command_write_done;
1243 }
1244 wr32(&pf->hw, address, value);
1245 value = rd32(&pf->hw, address);
1246 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1247 address, value);
1248 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1249 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1250 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1251 if (cnt == 0) {
1252 int i;
1253
1254 for (i = 0; i < pf->num_alloc_vsi; i++)
1255 i40e_vsi_reset_stats(pf->vsi[i]);
1256 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1257 } else if (cnt == 1) {
1258 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1259 if (!vsi) {
1260 dev_info(&pf->pdev->dev,
1261 "clear_stats vsi: bad vsi %d\n",
1262 vsi_seid);
1263 goto command_write_done;
1264 }
1265 i40e_vsi_reset_stats(vsi);
1266 dev_info(&pf->pdev->dev,
1267 "vsi clear stats called for vsi %d\n",
1268 vsi_seid);
1269 } else {
1270 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1271 }
1272 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1273 if (pf->hw.partition_id == 1) {
1274 i40e_pf_reset_stats(pf);
1275 dev_info(&pf->pdev->dev, "port stats cleared\n");
1276 } else {
1277 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1278 }
1279 } else {
1280 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1281 }
1282 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1283 struct i40e_aq_desc *desc;
1284 int ret;
1285
1286 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1287 if (!desc)
1288 goto command_write_done;
1289 cnt = sscanf(&cmd_buf[11],
1290 "%hi %hi %hi %hi %i %i %i %i %i %i",
1291 &desc->flags,
1292 &desc->opcode, &desc->datalen, &desc->retval,
1293 &desc->cookie_high, &desc->cookie_low,
1294 &desc->params.internal.param0,
1295 &desc->params.internal.param1,
1296 &desc->params.internal.param2,
1297 &desc->params.internal.param3);
1298 if (cnt != 10) {
1299 dev_info(&pf->pdev->dev,
1300 "send aq_cmd: bad command string, cnt=%d\n",
1301 cnt);
1302 kfree(desc);
1303 desc = NULL;
1304 goto command_write_done;
1305 }
1306 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
1307 if (!ret) {
1308 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1309 } else if (ret == -EIO) {
1310 dev_info(&pf->pdev->dev,
1311 "AQ command send failed Opcode %x AQ Error: %d\n",
1312 desc->opcode, pf->hw.aq.asq_last_status);
1313 } else {
1314 dev_info(&pf->pdev->dev,
1315 "AQ command send failed Opcode %x Status: %d\n",
1316 desc->opcode, ret);
1317 }
1318 dev_info(&pf->pdev->dev,
1319 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1320 desc->flags, desc->opcode, desc->datalen, desc->retval,
1321 desc->cookie_high, desc->cookie_low,
1322 desc->params.internal.param0,
1323 desc->params.internal.param1,
1324 desc->params.internal.param2,
1325 desc->params.internal.param3);
1326 kfree(desc);
1327 desc = NULL;
1328 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1329 struct i40e_aq_desc *desc;
1330 u16 buffer_len;
1331 u8 *buff;
1332 int ret;
1333
1334 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1335 if (!desc)
1336 goto command_write_done;
1337 cnt = sscanf(&cmd_buf[20],
1338 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1339 &desc->flags,
1340 &desc->opcode, &desc->datalen, &desc->retval,
1341 &desc->cookie_high, &desc->cookie_low,
1342 &desc->params.internal.param0,
1343 &desc->params.internal.param1,
1344 &desc->params.internal.param2,
1345 &desc->params.internal.param3,
1346 &buffer_len);
1347 if (cnt != 11) {
1348 dev_info(&pf->pdev->dev,
1349 "send indirect aq_cmd: bad command string, cnt=%d\n",
1350 cnt);
1351 kfree(desc);
1352 desc = NULL;
1353 goto command_write_done;
1354 }
1355 /* Just stub a buffer big enough in case user messed up */
1356 if (buffer_len == 0)
1357 buffer_len = 1280;
1358
1359 buff = kzalloc(buffer_len, GFP_KERNEL);
1360 if (!buff) {
1361 kfree(desc);
1362 desc = NULL;
1363 goto command_write_done;
1364 }
1365 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1366 ret = i40e_asq_send_command(&pf->hw, desc, buff,
1367 buffer_len, NULL);
1368 if (!ret) {
1369 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1370 } else if (ret == -EIO) {
1371 dev_info(&pf->pdev->dev,
1372 "AQ command send failed Opcode %x AQ Error: %d\n",
1373 desc->opcode, pf->hw.aq.asq_last_status);
1374 } else {
1375 dev_info(&pf->pdev->dev,
1376 "AQ command send failed Opcode %x Status: %d\n",
1377 desc->opcode, ret);
1378 }
1379 dev_info(&pf->pdev->dev,
1380 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1381 desc->flags, desc->opcode, desc->datalen, desc->retval,
1382 desc->cookie_high, desc->cookie_low,
1383 desc->params.internal.param0,
1384 desc->params.internal.param1,
1385 desc->params.internal.param2,
1386 desc->params.internal.param3);
1387 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1388 DUMP_PREFIX_OFFSET, 16, 1,
1389 buff, buffer_len, true);
1390 kfree(buff);
1391 buff = NULL;
1392 kfree(desc);
1393 desc = NULL;
1394 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1395 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1396 i40e_get_current_fd_count(pf));
1397 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1398 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1399 int ret;
1400
1401 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
1402 if (ret) {
1403 dev_info(&pf->pdev->dev,
1404 "Stop LLDP AQ command failed =0x%x\n",
1405 pf->hw.aq.asq_last_status);
1406 goto command_write_done;
1407 }
1408 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1409 pf->hw.mac.addr,
1410 ETH_P_LLDP, 0,
1411 pf->vsi[pf->lan_vsi]->seid,
1412 0, true, NULL, NULL);
1413 if (ret) {
1414 dev_info(&pf->pdev->dev,
1415 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1416 __func__, pf->hw.aq.asq_last_status);
1417 goto command_write_done;
1418 }
1419#ifdef CONFIG_I40E_DCB
1420 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1421 DCB_CAP_DCBX_VER_IEEE;
1422#endif /* CONFIG_I40E_DCB */
1423 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1424 int ret;
1425
1426 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1427 pf->hw.mac.addr,
1428 ETH_P_LLDP, 0,
1429 pf->vsi[pf->lan_vsi]->seid,
1430 0, false, NULL, NULL);
1431 if (ret) {
1432 dev_info(&pf->pdev->dev,
1433 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1434 __func__, pf->hw.aq.asq_last_status);
1435 /* Continue and start FW LLDP anyways */
1436 }
1437
1438 ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
1439 if (ret) {
1440 dev_info(&pf->pdev->dev,
1441 "Start LLDP AQ command failed =0x%x\n",
1442 pf->hw.aq.asq_last_status);
1443 goto command_write_done;
1444 }
1445#ifdef CONFIG_I40E_DCB
1446 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1447 DCB_CAP_DCBX_VER_IEEE;
1448#endif /* CONFIG_I40E_DCB */
1449 } else if (strncmp(&cmd_buf[5],
1450 "get local", 9) == 0) {
1451 u16 llen, rlen;
1452 int ret;
1453 u8 *buff;
1454
1455 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1456 if (!buff)
1457 goto command_write_done;
1458
1459 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1460 I40E_AQ_LLDP_MIB_LOCAL,
1461 buff, I40E_LLDPDU_SIZE,
1462 &llen, &rlen, NULL);
1463 if (ret) {
1464 dev_info(&pf->pdev->dev,
1465 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1466 pf->hw.aq.asq_last_status);
1467 kfree(buff);
1468 buff = NULL;
1469 goto command_write_done;
1470 }
1471 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1472 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1473 DUMP_PREFIX_OFFSET, 16, 1,
1474 buff, I40E_LLDPDU_SIZE, true);
1475 kfree(buff);
1476 buff = NULL;
1477 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1478 u16 llen, rlen;
1479 int ret;
1480 u8 *buff;
1481
1482 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1483 if (!buff)
1484 goto command_write_done;
1485
1486 ret = i40e_aq_get_lldp_mib(&pf->hw,
1487 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1488 I40E_AQ_LLDP_MIB_REMOTE,
1489 buff, I40E_LLDPDU_SIZE,
1490 &llen, &rlen, NULL);
1491 if (ret) {
1492 dev_info(&pf->pdev->dev,
1493 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1494 pf->hw.aq.asq_last_status);
1495 kfree(buff);
1496 buff = NULL;
1497 goto command_write_done;
1498 }
1499 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1500 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1501 DUMP_PREFIX_OFFSET, 16, 1,
1502 buff, I40E_LLDPDU_SIZE, true);
1503 kfree(buff);
1504 buff = NULL;
1505 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1506 int ret;
1507
1508 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1509 true, NULL);
1510 if (ret) {
1511 dev_info(&pf->pdev->dev,
1512 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1513 pf->hw.aq.asq_last_status);
1514 goto command_write_done;
1515 }
1516 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1517 int ret;
1518
1519 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1520 false, NULL);
1521 if (ret) {
1522 dev_info(&pf->pdev->dev,
1523 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1524 pf->hw.aq.asq_last_status);
1525 goto command_write_done;
1526 }
1527 }
1528 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1529 u16 buffer_len, bytes;
1530 u16 module;
1531 u32 offset;
1532 u16 *buff;
1533 int ret;
1534
1535 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1536 &module, &offset, &buffer_len);
1537 if (cnt == 0) {
1538 module = 0;
1539 offset = 0;
1540 buffer_len = 0;
1541 } else if (cnt == 1) {
1542 offset = 0;
1543 buffer_len = 0;
1544 } else if (cnt == 2) {
1545 buffer_len = 0;
1546 } else if (cnt > 3) {
1547 dev_info(&pf->pdev->dev,
1548 "nvm read: bad command string, cnt=%d\n", cnt);
1549 goto command_write_done;
1550 }
1551
1552 /* set the max length */
1553 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1554
1555 bytes = 2 * buffer_len;
1556
1557 /* read at least 1k bytes, no more than 4kB */
1558 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1559 buff = kzalloc(bytes, GFP_KERNEL);
1560 if (!buff)
1561 goto command_write_done;
1562
1563 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1564 if (ret) {
1565 dev_info(&pf->pdev->dev,
1566 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1567 ret, pf->hw.aq.asq_last_status);
1568 kfree(buff);
1569 goto command_write_done;
1570 }
1571
1572 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1573 bytes, (u8 *)buff, true, NULL);
1574 i40e_release_nvm(&pf->hw);
1575 if (ret) {
1576 dev_info(&pf->pdev->dev,
1577 "Read NVM AQ failed err=%d status=0x%x\n",
1578 ret, pf->hw.aq.asq_last_status);
1579 } else {
1580 dev_info(&pf->pdev->dev,
1581 "Read NVM module=0x%x offset=0x%x words=%d\n",
1582 module, offset, buffer_len);
1583 if (bytes)
1584 print_hex_dump(KERN_INFO, "NVM Dump: ",
1585 DUMP_PREFIX_OFFSET, 16, 2,
1586 buff, bytes, true);
1587 }
1588 kfree(buff);
1589 buff = NULL;
1590 } else {
1591 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1592 dev_info(&pf->pdev->dev, "available commands\n");
1593 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1594 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1595 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1596 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1597 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1598 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1599 dev_info(&pf->pdev->dev, " dump switch\n");
1600 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1601 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1602 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1603 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1604 dev_info(&pf->pdev->dev, " dump desc aq\n");
1605 dev_info(&pf->pdev->dev, " dump reset stats\n");
1606 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1607 dev_info(&pf->pdev->dev, " read <reg>\n");
1608 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1609 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1610 dev_info(&pf->pdev->dev, " clear_stats port\n");
1611 dev_info(&pf->pdev->dev, " pfr\n");
1612 dev_info(&pf->pdev->dev, " corer\n");
1613 dev_info(&pf->pdev->dev, " globr\n");
1614 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1615 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1616 dev_info(&pf->pdev->dev, " fd current cnt");
1617 dev_info(&pf->pdev->dev, " lldp start\n");
1618 dev_info(&pf->pdev->dev, " lldp stop\n");
1619 dev_info(&pf->pdev->dev, " lldp get local\n");
1620 dev_info(&pf->pdev->dev, " lldp get remote\n");
1621 dev_info(&pf->pdev->dev, " lldp event on\n");
1622 dev_info(&pf->pdev->dev, " lldp event off\n");
1623 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1624 }
1625
1626command_write_done:
1627 kfree(cmd_buf);
1628 cmd_buf = NULL;
1629 return count;
1630}
1631
1632static const struct file_operations i40e_dbg_command_fops = {
1633 .owner = THIS_MODULE,
1634 .open = simple_open,
1635 .read = i40e_dbg_command_read,
1636 .write = i40e_dbg_command_write,
1637};
1638
1639/**************************************************************
1640 * netdev_ops
1641 * The netdev_ops entry in debugfs is for giving the driver commands
1642 * to be executed from the netdev operations.
1643 **************************************************************/
1644static char i40e_dbg_netdev_ops_buf[256] = "";
1645
1646/**
1647 * i40e_dbg_netdev_ops_read - read for netdev_ops datum
1648 * @filp: the opened file
1649 * @buffer: where to write the data for the user to read
1650 * @count: the size of the user's buffer
1651 * @ppos: file position offset
1652 **/
1653static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1654 size_t count, loff_t *ppos)
1655{
1656 struct i40e_pf *pf = filp->private_data;
1657 int bytes_not_copied;
1658 int buf_size = 256;
1659 char *buf;
1660 int len;
1661
1662 /* don't allow partal reads */
1663 if (*ppos != 0)
1664 return 0;
1665 if (count < buf_size)
1666 return -ENOSPC;
1667
1668 buf = kzalloc(buf_size, GFP_KERNEL);
1669 if (!buf)
1670 return -ENOSPC;
1671
1672 len = snprintf(buf, buf_size, "%s: %s\n",
1673 pf->vsi[pf->lan_vsi]->netdev->name,
1674 i40e_dbg_netdev_ops_buf);
1675
1676 bytes_not_copied = copy_to_user(buffer, buf, len);
1677 kfree(buf);
1678
1679 if (bytes_not_copied)
1680 return -EFAULT;
1681
1682 *ppos = len;
1683 return len;
1684}
1685
1686/**
1687 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1688 * @filp: the opened file
1689 * @buffer: where to find the user's data
1690 * @count: the length of the user's data
1691 * @ppos: file position offset
1692 **/
1693static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1694 const char __user *buffer,
1695 size_t count, loff_t *ppos)
1696{
1697 struct i40e_pf *pf = filp->private_data;
1698 int bytes_not_copied;
1699 struct i40e_vsi *vsi;
1700 char *buf_tmp;
1701 int vsi_seid;
1702 int i, cnt;
1703
1704 /* don't allow partial writes */
1705 if (*ppos != 0)
1706 return 0;
1707 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1708 return -ENOSPC;
1709
1710 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1711 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
1712 buffer, count);
1713 if (bytes_not_copied)
1714 return -EFAULT;
1715 i40e_dbg_netdev_ops_buf[count] = '\0';
1716
1717 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1718 if (buf_tmp) {
1719 *buf_tmp = '\0';
1720 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1721 }
1722
1723 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1724 int mtu;
1725
1726 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1727 &vsi_seid, &mtu);
1728 if (cnt != 2) {
1729 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1730 goto netdev_ops_write_done;
1731 }
1732 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1733 if (!vsi) {
1734 dev_info(&pf->pdev->dev,
1735 "change_mtu: VSI %d not found\n", vsi_seid);
1736 } else if (!vsi->netdev) {
1737 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1738 vsi_seid);
1739 } else if (rtnl_trylock()) {
1740 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1741 mtu);
1742 rtnl_unlock();
1743 dev_info(&pf->pdev->dev, "change_mtu called\n");
1744 } else {
1745 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1746 }
1747
1748 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1749 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1750 if (cnt != 1) {
1751 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1752 goto netdev_ops_write_done;
1753 }
1754 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1755 if (!vsi) {
1756 dev_info(&pf->pdev->dev,
1757 "set_rx_mode: VSI %d not found\n", vsi_seid);
1758 } else if (!vsi->netdev) {
1759 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1760 vsi_seid);
1761 } else if (rtnl_trylock()) {
1762 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1763 rtnl_unlock();
1764 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1765 } else {
1766 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1767 }
1768
1769 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1770 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1771 if (cnt != 1) {
1772 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1773 goto netdev_ops_write_done;
1774 }
1775 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1776 if (!vsi) {
1777 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1778 vsi_seid);
1779 } else if (!vsi->netdev) {
1780 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1781 vsi_seid);
1782 } else {
1783 for (i = 0; i < vsi->num_q_vectors; i++)
1784 napi_schedule(&vsi->q_vectors[i]->napi);
1785 dev_info(&pf->pdev->dev, "napi called\n");
1786 }
1787 } else {
1788 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
1789 i40e_dbg_netdev_ops_buf);
1790 dev_info(&pf->pdev->dev, "available commands\n");
1791 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1792 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1793 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1794 }
1795netdev_ops_write_done:
1796 return count;
1797}
1798
1799static const struct file_operations i40e_dbg_netdev_ops_fops = {
1800 .owner = THIS_MODULE,
1801 .open = simple_open,
1802 .read = i40e_dbg_netdev_ops_read,
1803 .write = i40e_dbg_netdev_ops_write,
1804};
1805
1806/**
1807 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1808 * @pf: the PF that is starting up
1809 **/
1810void i40e_dbg_pf_init(struct i40e_pf *pf)
1811{
1812 const char *name = pci_name(pf->pdev);
1813
1814 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
1815
1816 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
1817 &i40e_dbg_command_fops);
1818
1819 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
1820 &i40e_dbg_netdev_ops_fops);
1821}
1822
1823/**
1824 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1825 * @pf: the PF that is stopping
1826 **/
1827void i40e_dbg_pf_exit(struct i40e_pf *pf)
1828{
1829 debugfs_remove_recursive(pf->i40e_dbg_pf);
1830 pf->i40e_dbg_pf = NULL;
1831}
1832
1833/**
1834 * i40e_dbg_init - start up debugfs for the driver
1835 **/
1836void i40e_dbg_init(void)
1837{
1838 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
1839 if (IS_ERR(i40e_dbg_root))
1840 pr_info("init of debugfs failed\n");
1841}
1842
1843/**
1844 * i40e_dbg_exit - clean out the driver's debugfs entries
1845 **/
1846void i40e_dbg_exit(void)
1847{
1848 debugfs_remove_recursive(i40e_dbg_root);
1849 i40e_dbg_root = NULL;
1850}
1851
1852#endif /* CONFIG_DEBUG_FS */
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#ifdef CONFIG_DEBUG_FS
5
6#include <linux/fs.h>
7#include <linux/debugfs.h>
8#include <linux/if_bridge.h>
9#include "i40e.h"
10#include "i40e_virtchnl_pf.h"
11
12static struct dentry *i40e_dbg_root;
13
14enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18};
19
20/**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
25static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26{
27 if (seid < 0) {
28 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
29
30 return NULL;
31 }
32
33 return i40e_pf_get_vsi_by_seid(pf, seid);
34}
35
36/**************************************************************
37 * command
38 * The command entry in debugfs is for giving the driver commands
39 * to be executed - these may be for changing the internal switch
40 * setup, adding or removing filters, or other things. Many of
41 * these will be useful for some forms of unit testing.
42 **************************************************************/
43static char i40e_dbg_command_buf[256] = "";
44
45/**
46 * i40e_dbg_command_read - read for command datum
47 * @filp: the opened file
48 * @buffer: where to write the data for the user to read
49 * @count: the size of the user's buffer
50 * @ppos: file position offset
51 **/
52static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
53 size_t count, loff_t *ppos)
54{
55 struct i40e_pf *pf = filp->private_data;
56 int bytes_not_copied;
57 int buf_size = 256;
58 char *buf;
59 int len;
60
61 /* don't allow partial reads */
62 if (*ppos != 0)
63 return 0;
64 if (count < buf_size)
65 return -ENOSPC;
66
67 buf = kzalloc(buf_size, GFP_KERNEL);
68 if (!buf)
69 return -ENOSPC;
70
71 len = snprintf(buf, buf_size, "%s: %s\n",
72 pf->vsi[pf->lan_vsi]->netdev->name,
73 i40e_dbg_command_buf);
74
75 bytes_not_copied = copy_to_user(buffer, buf, len);
76 kfree(buf);
77
78 if (bytes_not_copied)
79 return -EFAULT;
80
81 *ppos = len;
82 return len;
83}
84
85static char *i40e_filter_state_string[] = {
86 "INVALID",
87 "NEW",
88 "ACTIVE",
89 "FAILED",
90 "REMOVE",
91};
92
93/**
94 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
95 * @pf: the i40e_pf created in command write
96 * @seid: the seid the user put in
97 **/
98static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
99{
100 struct rtnl_link_stats64 *nstat;
101 struct i40e_mac_filter *f;
102 struct i40e_vsi *vsi;
103 int i, bkt;
104
105 vsi = i40e_dbg_find_vsi(pf, seid);
106 if (!vsi) {
107 dev_info(&pf->pdev->dev,
108 "dump %d: seid not found\n", seid);
109 return;
110 }
111 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
112 if (vsi->netdev) {
113 struct net_device *nd = vsi->netdev;
114
115 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
116 nd->name, nd->state, nd->flags);
117 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
118 (unsigned long int)nd->features);
119 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
120 (unsigned long int)nd->hw_features);
121 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
122 (unsigned long int)nd->vlan_features);
123 }
124 dev_info(&pf->pdev->dev,
125 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
126 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
127 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
128 dev_info(&pf->pdev->dev,
129 " state[%d] = %08lx\n",
130 i, vsi->state[i]);
131 if (vsi == pf->vsi[pf->lan_vsi])
132 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
133 pf->hw.mac.addr,
134 pf->hw.mac.port_addr);
135 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
136 dev_info(&pf->pdev->dev,
137 " mac_filter_hash: %pM vid=%d, state %s\n",
138 f->macaddr, f->vlan,
139 i40e_filter_state_string[f->state]);
140 }
141 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
142 vsi->active_filters, vsi->promisc_threshold,
143 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
144 "ON" : "OFF"));
145 nstat = i40e_get_vsi_stats_struct(vsi);
146 dev_info(&pf->pdev->dev,
147 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
148 (unsigned long int)nstat->rx_packets,
149 (unsigned long int)nstat->rx_bytes,
150 (unsigned long int)nstat->rx_errors,
151 (unsigned long int)nstat->rx_dropped);
152 dev_info(&pf->pdev->dev,
153 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
154 (unsigned long int)nstat->tx_packets,
155 (unsigned long int)nstat->tx_bytes,
156 (unsigned long int)nstat->tx_errors,
157 (unsigned long int)nstat->tx_dropped);
158 dev_info(&pf->pdev->dev,
159 " net_stats: multicast = %lu, collisions = %lu\n",
160 (unsigned long int)nstat->multicast,
161 (unsigned long int)nstat->collisions);
162 dev_info(&pf->pdev->dev,
163 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
164 (unsigned long int)nstat->rx_length_errors,
165 (unsigned long int)nstat->rx_over_errors,
166 (unsigned long int)nstat->rx_crc_errors);
167 dev_info(&pf->pdev->dev,
168 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
169 (unsigned long int)nstat->rx_frame_errors,
170 (unsigned long int)nstat->rx_fifo_errors,
171 (unsigned long int)nstat->rx_missed_errors);
172 dev_info(&pf->pdev->dev,
173 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
174 (unsigned long int)nstat->tx_aborted_errors,
175 (unsigned long int)nstat->tx_carrier_errors,
176 (unsigned long int)nstat->tx_fifo_errors);
177 dev_info(&pf->pdev->dev,
178 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
179 (unsigned long int)nstat->tx_heartbeat_errors,
180 (unsigned long int)nstat->tx_window_errors);
181 dev_info(&pf->pdev->dev,
182 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
183 (unsigned long int)nstat->rx_compressed,
184 (unsigned long int)nstat->tx_compressed);
185 dev_info(&pf->pdev->dev,
186 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
187 (unsigned long int)vsi->net_stats_offsets.rx_packets,
188 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
189 (unsigned long int)vsi->net_stats_offsets.rx_errors,
190 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
191 dev_info(&pf->pdev->dev,
192 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
193 (unsigned long int)vsi->net_stats_offsets.tx_packets,
194 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
195 (unsigned long int)vsi->net_stats_offsets.tx_errors,
196 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
197 dev_info(&pf->pdev->dev,
198 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
199 (unsigned long int)vsi->net_stats_offsets.multicast,
200 (unsigned long int)vsi->net_stats_offsets.collisions);
201 dev_info(&pf->pdev->dev,
202 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
203 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
204 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
205 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
206 dev_info(&pf->pdev->dev,
207 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
208 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
209 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
210 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
211 dev_info(&pf->pdev->dev,
212 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
213 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
214 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
215 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
216 dev_info(&pf->pdev->dev,
217 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
218 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
219 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
220 dev_info(&pf->pdev->dev,
221 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
222 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
223 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
224 dev_info(&pf->pdev->dev,
225 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
226 vsi->tx_restart, vsi->tx_busy,
227 vsi->rx_buf_failed, vsi->rx_page_failed);
228 rcu_read_lock();
229 for (i = 0; i < vsi->num_queue_pairs; i++) {
230 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
231
232 if (!rx_ring)
233 continue;
234
235 dev_info(&pf->pdev->dev,
236 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
237 i, *rx_ring->state,
238 rx_ring->queue_index,
239 rx_ring->reg_idx);
240 dev_info(&pf->pdev->dev,
241 " rx_rings[%i]: rx_buf_len = %d\n",
242 i, rx_ring->rx_buf_len);
243 dev_info(&pf->pdev->dev,
244 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
245 i,
246 rx_ring->next_to_use,
247 rx_ring->next_to_clean,
248 rx_ring->ring_active);
249 dev_info(&pf->pdev->dev,
250 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
251 i, rx_ring->stats.packets,
252 rx_ring->stats.bytes,
253 rx_ring->rx_stats.non_eop_descs);
254 dev_info(&pf->pdev->dev,
255 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
256 i,
257 rx_ring->rx_stats.alloc_page_failed,
258 rx_ring->rx_stats.alloc_buff_failed);
259 dev_info(&pf->pdev->dev,
260 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
261 i,
262 rx_ring->rx_stats.page_reuse_count);
263 dev_info(&pf->pdev->dev,
264 " rx_rings[%i]: size = %i\n",
265 i, rx_ring->size);
266 dev_info(&pf->pdev->dev,
267 " rx_rings[%i]: itr_setting = %d (%s)\n",
268 i, rx_ring->itr_setting,
269 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
270 }
271 for (i = 0; i < vsi->num_queue_pairs; i++) {
272 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
273
274 if (!tx_ring)
275 continue;
276
277 dev_info(&pf->pdev->dev,
278 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
279 i, *tx_ring->state,
280 tx_ring->queue_index,
281 tx_ring->reg_idx);
282 dev_info(&pf->pdev->dev,
283 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
284 i,
285 tx_ring->next_to_use,
286 tx_ring->next_to_clean,
287 tx_ring->ring_active);
288 dev_info(&pf->pdev->dev,
289 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
290 i, tx_ring->stats.packets,
291 tx_ring->stats.bytes,
292 tx_ring->tx_stats.restart_queue);
293 dev_info(&pf->pdev->dev,
294 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
295 i,
296 tx_ring->tx_stats.tx_busy,
297 tx_ring->tx_stats.tx_done_old,
298 tx_ring->tx_stats.tx_stopped);
299 dev_info(&pf->pdev->dev,
300 " tx_rings[%i]: size = %i\n",
301 i, tx_ring->size);
302 dev_info(&pf->pdev->dev,
303 " tx_rings[%i]: DCB tc = %d\n",
304 i, tx_ring->dcb_tc);
305 dev_info(&pf->pdev->dev,
306 " tx_rings[%i]: itr_setting = %d (%s)\n",
307 i, tx_ring->itr_setting,
308 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
309 }
310 if (i40e_enabled_xdp_vsi(vsi)) {
311 for (i = 0; i < vsi->num_queue_pairs; i++) {
312 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
313
314 if (!xdp_ring)
315 continue;
316
317 dev_info(&pf->pdev->dev,
318 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
319 i, *xdp_ring->state,
320 xdp_ring->queue_index,
321 xdp_ring->reg_idx);
322 dev_info(&pf->pdev->dev,
323 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
324 i,
325 xdp_ring->next_to_use,
326 xdp_ring->next_to_clean,
327 xdp_ring->ring_active);
328 dev_info(&pf->pdev->dev,
329 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
330 i, xdp_ring->stats.packets,
331 xdp_ring->stats.bytes,
332 xdp_ring->tx_stats.restart_queue);
333 dev_info(&pf->pdev->dev,
334 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
335 i,
336 xdp_ring->tx_stats.tx_busy,
337 xdp_ring->tx_stats.tx_done_old);
338 dev_info(&pf->pdev->dev,
339 " xdp_rings[%i]: size = %i\n",
340 i, xdp_ring->size);
341 dev_info(&pf->pdev->dev,
342 " xdp_rings[%i]: DCB tc = %d\n",
343 i, xdp_ring->dcb_tc);
344 dev_info(&pf->pdev->dev,
345 " xdp_rings[%i]: itr_setting = %d (%s)\n",
346 i, xdp_ring->itr_setting,
347 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
348 "dynamic" : "fixed");
349 }
350 }
351 rcu_read_unlock();
352 dev_info(&pf->pdev->dev,
353 " work_limit = %d\n",
354 vsi->work_limit);
355 dev_info(&pf->pdev->dev,
356 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
357 vsi->max_frame, vsi->rx_buf_len, 0);
358 dev_info(&pf->pdev->dev,
359 " num_q_vectors = %i, base_vector = %i\n",
360 vsi->num_q_vectors, vsi->base_vector);
361 dev_info(&pf->pdev->dev,
362 " seid = %d, id = %d, uplink_seid = %d\n",
363 vsi->seid, vsi->id, vsi->uplink_seid);
364 dev_info(&pf->pdev->dev,
365 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
366 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
367 vsi->num_rx_desc);
368 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
369 if (vsi->type == I40E_VSI_SRIOV)
370 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
371 dev_info(&pf->pdev->dev,
372 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
373 vsi->info.valid_sections, vsi->info.switch_id);
374 dev_info(&pf->pdev->dev,
375 " info: sw_reserved[] = 0x%02x 0x%02x\n",
376 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
377 dev_info(&pf->pdev->dev,
378 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
379 vsi->info.sec_flags, vsi->info.sec_reserved);
380 dev_info(&pf->pdev->dev,
381 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
382 vsi->info.pvid, vsi->info.fcoe_pvid,
383 vsi->info.port_vlan_flags);
384 dev_info(&pf->pdev->dev,
385 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
386 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
387 vsi->info.pvlan_reserved[2]);
388 dev_info(&pf->pdev->dev,
389 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
390 vsi->info.ingress_table, vsi->info.egress_table);
391 dev_info(&pf->pdev->dev,
392 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
393 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
394 vsi->info.cas_pv_reserved);
395 dev_info(&pf->pdev->dev,
396 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
397 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
398 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
399 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
400 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
401 dev_info(&pf->pdev->dev,
402 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
403 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
404 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
405 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
406 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
407 dev_info(&pf->pdev->dev,
408 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
409 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
410 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
411 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
412 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
413 dev_info(&pf->pdev->dev,
414 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
415 vsi->info.queueing_opt_flags,
416 vsi->info.queueing_opt_reserved[0],
417 vsi->info.queueing_opt_reserved[1],
418 vsi->info.queueing_opt_reserved[2]);
419 dev_info(&pf->pdev->dev,
420 " info: up_enable_bits = 0x%02x\n",
421 vsi->info.up_enable_bits);
422 dev_info(&pf->pdev->dev,
423 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
424 vsi->info.sched_reserved, vsi->info.outer_up_table);
425 dev_info(&pf->pdev->dev,
426 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
427 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
428 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
429 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
430 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
431 dev_info(&pf->pdev->dev,
432 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
433 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
434 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
435 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
436 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
437 dev_info(&pf->pdev->dev,
438 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
439 vsi->info.stat_counter_idx, vsi->info.sched_id);
440 dev_info(&pf->pdev->dev,
441 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
442 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
443 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
444 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
445 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
446 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
447 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
448 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
449 dev_info(&pf->pdev->dev,
450 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
451 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
452 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
453 dev_info(&pf->pdev->dev,
454 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
455 i, vsi->tc_config.tc_info[i].qoffset,
456 vsi->tc_config.tc_info[i].qcount,
457 vsi->tc_config.tc_info[i].netdev_tc);
458 }
459 dev_info(&pf->pdev->dev,
460 " bw: bw_limit = %d, bw_max_quanta = %d\n",
461 vsi->bw_limit, vsi->bw_max_quanta);
462 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
463 dev_info(&pf->pdev->dev,
464 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
465 i, vsi->bw_ets_share_credits[i],
466 vsi->bw_ets_limit_credits[i],
467 vsi->bw_ets_max_quanta[i]);
468 }
469}
470
471/**
472 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
473 * @pf: the i40e_pf created in command write
474 **/
475static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
476{
477 struct i40e_adminq_ring *ring;
478 struct i40e_hw *hw = &pf->hw;
479 char hdr[32];
480 int i;
481
482 snprintf(hdr, sizeof(hdr), "%s %s: ",
483 dev_driver_string(&pf->pdev->dev),
484 dev_name(&pf->pdev->dev));
485
486 /* first the send (command) ring, then the receive (event) ring */
487 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
488 ring = &(hw->aq.asq);
489 for (i = 0; i < ring->count; i++) {
490 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
491
492 dev_info(&pf->pdev->dev,
493 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
494 i, d->flags, d->opcode, d->datalen, d->retval,
495 d->cookie_high, d->cookie_low);
496 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
497 16, 1, d->params.raw, 16, 0);
498 }
499
500 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
501 ring = &(hw->aq.arq);
502 for (i = 0; i < ring->count; i++) {
503 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
504
505 dev_info(&pf->pdev->dev,
506 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
507 i, d->flags, d->opcode, d->datalen, d->retval,
508 d->cookie_high, d->cookie_low);
509 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
510 16, 1, d->params.raw, 16, 0);
511 }
512}
513
514/**
515 * i40e_dbg_dump_desc - handles dump desc write into command datum
516 * @cnt: number of arguments that the user supplied
517 * @vsi_seid: vsi id entered by user
518 * @ring_id: ring id entered by user
519 * @desc_n: descriptor number entered by user
520 * @pf: the i40e_pf created in command write
521 * @type: enum describing whether ring is RX, TX or XDP
522 **/
523static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
524 struct i40e_pf *pf, enum ring_type type)
525{
526 bool is_rx_ring = type == RING_TYPE_RX;
527 struct i40e_tx_desc *txd;
528 union i40e_rx_desc *rxd;
529 struct i40e_ring *ring;
530 struct i40e_vsi *vsi;
531 int i;
532
533 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
534 if (!vsi) {
535 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
536 return;
537 }
538 if (vsi->type != I40E_VSI_MAIN &&
539 vsi->type != I40E_VSI_FDIR &&
540 vsi->type != I40E_VSI_VMDQ2) {
541 dev_info(&pf->pdev->dev,
542 "vsi %d type %d descriptor rings not available\n",
543 vsi_seid, vsi->type);
544 return;
545 }
546 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
547 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
548 return;
549 }
550 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
551 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
552 return;
553 }
554 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
555 dev_info(&pf->pdev->dev,
556 "descriptor rings have not been allocated for vsi %d\n",
557 vsi_seid);
558 return;
559 }
560
561 switch (type) {
562 case RING_TYPE_RX:
563 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
564 break;
565 case RING_TYPE_TX:
566 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
567 break;
568 case RING_TYPE_XDP:
569 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
570 break;
571 default:
572 ring = NULL;
573 break;
574 }
575 if (!ring)
576 return;
577
578 if (cnt == 2) {
579 switch (type) {
580 case RING_TYPE_RX:
581 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
582 break;
583 case RING_TYPE_TX:
584 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
585 break;
586 case RING_TYPE_XDP:
587 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
588 break;
589 }
590 for (i = 0; i < ring->count; i++) {
591 if (!is_rx_ring) {
592 txd = I40E_TX_DESC(ring, i);
593 dev_info(&pf->pdev->dev,
594 " d[%03x] = 0x%016llx 0x%016llx\n",
595 i, txd->buffer_addr,
596 txd->cmd_type_offset_bsz);
597 } else {
598 rxd = I40E_RX_DESC(ring, i);
599 dev_info(&pf->pdev->dev,
600 " d[%03x] = 0x%016llx 0x%016llx\n",
601 i, rxd->read.pkt_addr,
602 rxd->read.hdr_addr);
603 }
604 }
605 } else if (cnt == 3) {
606 if (desc_n >= ring->count || desc_n < 0) {
607 dev_info(&pf->pdev->dev,
608 "descriptor %d not found\n", desc_n);
609 goto out;
610 }
611 if (!is_rx_ring) {
612 txd = I40E_TX_DESC(ring, desc_n);
613 dev_info(&pf->pdev->dev,
614 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
615 vsi_seid, ring_id, desc_n,
616 txd->buffer_addr, txd->cmd_type_offset_bsz);
617 } else {
618 rxd = I40E_RX_DESC(ring, desc_n);
619 dev_info(&pf->pdev->dev,
620 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
621 vsi_seid, ring_id, desc_n,
622 rxd->read.pkt_addr, rxd->read.hdr_addr);
623 }
624 } else {
625 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
626 }
627
628out:
629 kfree(ring);
630}
631
632/**
633 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
634 * @pf: the i40e_pf created in command write
635 **/
636static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
637{
638 struct i40e_vsi *vsi;
639 int i;
640
641 i40e_pf_for_each_vsi(pf, i, vsi)
642 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
643}
644
645/**
646 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
647 * @pf: the i40e_pf created in command write
648 * @estats: the eth stats structure to be dumped
649 **/
650static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
651 struct i40e_eth_stats *estats)
652{
653 dev_info(&pf->pdev->dev, " ethstats:\n");
654 dev_info(&pf->pdev->dev,
655 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
656 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
657 dev_info(&pf->pdev->dev,
658 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
659 estats->rx_broadcast, estats->rx_discards);
660 dev_info(&pf->pdev->dev,
661 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
662 estats->rx_unknown_protocol, estats->tx_bytes);
663 dev_info(&pf->pdev->dev,
664 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
665 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
666 dev_info(&pf->pdev->dev,
667 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
668 estats->tx_discards, estats->tx_errors);
669}
670
671/**
672 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
673 * @pf: the i40e_pf created in command write
674 * @seid: the seid the user put in
675 **/
676static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
677{
678 struct i40e_veb *veb;
679
680 veb = i40e_pf_get_veb_by_seid(pf, seid);
681 if (!veb) {
682 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
683 return;
684 }
685 dev_info(&pf->pdev->dev,
686 "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
687 veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
688 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
689 i40e_dbg_dump_eth_stats(pf, &veb->stats);
690}
691
692/**
693 * i40e_dbg_dump_veb_all - dumps all known veb's stats
694 * @pf: the i40e_pf created in command write
695 **/
696static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
697{
698 struct i40e_veb *veb;
699 int i;
700
701 i40e_pf_for_each_veb(pf, i, veb)
702 i40e_dbg_dump_veb_seid(pf, veb->seid);
703}
704
705/**
706 * i40e_dbg_dump_vf - dump VF info
707 * @pf: the i40e_pf created in command write
708 * @vf_id: the vf_id from the user
709 **/
710static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
711{
712 struct i40e_vf *vf;
713 struct i40e_vsi *vsi;
714
715 if (!pf->num_alloc_vfs) {
716 dev_info(&pf->pdev->dev, "no VFs allocated\n");
717 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
718 vf = &pf->vf[vf_id];
719 vsi = pf->vsi[vf->lan_vsi_idx];
720 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
721 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
722 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
723 vf->num_mdd_events);
724 } else {
725 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
726 }
727}
728
729/**
730 * i40e_dbg_dump_vf_all - dump VF info for all VFs
731 * @pf: the i40e_pf created in command write
732 **/
733static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
734{
735 int i;
736
737 if (!pf->num_alloc_vfs)
738 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
739 else
740 for (i = 0; i < pf->num_alloc_vfs; i++)
741 i40e_dbg_dump_vf(pf, i);
742}
743
744/**
745 * i40e_dbg_command_write - write into command datum
746 * @filp: the opened file
747 * @buffer: where to find the user's data
748 * @count: the length of the user's data
749 * @ppos: file position offset
750 **/
751static ssize_t i40e_dbg_command_write(struct file *filp,
752 const char __user *buffer,
753 size_t count, loff_t *ppos)
754{
755 struct i40e_pf *pf = filp->private_data;
756 char *cmd_buf, *cmd_buf_tmp;
757 int bytes_not_copied;
758 struct i40e_vsi *vsi;
759 int vsi_seid;
760 int veb_seid;
761 int vf_id;
762 int cnt;
763
764 /* don't allow partial writes */
765 if (*ppos != 0)
766 return 0;
767
768 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
769 if (!cmd_buf)
770 return count;
771 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
772 if (bytes_not_copied) {
773 kfree(cmd_buf);
774 return -EFAULT;
775 }
776 cmd_buf[count] = '\0';
777
778 cmd_buf_tmp = strchr(cmd_buf, '\n');
779 if (cmd_buf_tmp) {
780 *cmd_buf_tmp = '\0';
781 count = cmd_buf_tmp - cmd_buf + 1;
782 }
783
784 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
785 vsi_seid = -1;
786 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
787 if (cnt == 0) {
788 /* default to PF VSI */
789 vsi_seid = pf->vsi[pf->lan_vsi]->seid;
790 } else if (vsi_seid < 0) {
791 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
792 vsi_seid);
793 goto command_write_done;
794 }
795
796 /* By default we are in VEPA mode, if this is the first VF/VMDq
797 * VSI to be added switch to VEB mode.
798 */
799 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
800 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
801 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
802 }
803
804 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
805 if (vsi)
806 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
807 vsi->seid, vsi->uplink_seid);
808 else
809 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
810
811 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
812 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
813 if (cnt != 1) {
814 dev_info(&pf->pdev->dev,
815 "del vsi: bad command string, cnt=%d\n",
816 cnt);
817 goto command_write_done;
818 }
819 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
820 if (!vsi) {
821 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
822 vsi_seid);
823 goto command_write_done;
824 }
825
826 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
827 i40e_vsi_release(vsi);
828
829 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
830 struct i40e_veb *veb;
831 u8 enabled_tc = 0x1;
832 int uplink_seid;
833
834 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
835 if (cnt == 0) {
836 uplink_seid = 0;
837 vsi_seid = 0;
838 } else if (cnt != 2) {
839 dev_info(&pf->pdev->dev,
840 "add relay: bad command string, cnt=%d\n",
841 cnt);
842 goto command_write_done;
843 } else if (uplink_seid < 0) {
844 dev_info(&pf->pdev->dev,
845 "add relay %d: bad uplink seid\n",
846 uplink_seid);
847 goto command_write_done;
848 }
849
850 if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
851 dev_info(&pf->pdev->dev,
852 "add relay: relay uplink %d not found\n",
853 uplink_seid);
854 goto command_write_done;
855 } else if (uplink_seid) {
856 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
857 if (!vsi) {
858 dev_info(&pf->pdev->dev,
859 "add relay: VSI %d not found\n",
860 vsi_seid);
861 goto command_write_done;
862 }
863 enabled_tc = vsi->tc_config.enabled_tc;
864 } else if (vsi_seid) {
865 dev_info(&pf->pdev->dev,
866 "add relay: VSI must be 0 for floating relay\n");
867 goto command_write_done;
868 }
869
870 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
871 if (veb)
872 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
873 else
874 dev_info(&pf->pdev->dev, "add relay failed\n");
875
876 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
877 struct i40e_veb *veb;
878 int i;
879
880 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
881 if (cnt != 1) {
882 dev_info(&pf->pdev->dev,
883 "del relay: bad command string, cnt=%d\n",
884 cnt);
885 goto command_write_done;
886 } else if (veb_seid < 0) {
887 dev_info(&pf->pdev->dev,
888 "del relay %d: bad relay seid\n", veb_seid);
889 goto command_write_done;
890 }
891
892 /* find the veb */
893 i40e_pf_for_each_veb(pf, i, veb)
894 if (veb->seid == veb_seid)
895 break;
896
897 if (i >= I40E_MAX_VEB) {
898 dev_info(&pf->pdev->dev,
899 "del relay: relay %d not found\n", veb_seid);
900 goto command_write_done;
901 }
902
903 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
904 i40e_veb_release(veb);
905 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
906 unsigned int v;
907 int ret;
908 u16 vid;
909
910 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
911 if (cnt != 2) {
912 dev_info(&pf->pdev->dev,
913 "add pvid: bad command string, cnt=%d\n", cnt);
914 goto command_write_done;
915 }
916
917 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
918 if (!vsi) {
919 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
920 vsi_seid);
921 goto command_write_done;
922 }
923
924 vid = v;
925 ret = i40e_vsi_add_pvid(vsi, vid);
926 if (!ret)
927 dev_info(&pf->pdev->dev,
928 "add pvid: %d added to VSI %d\n",
929 vid, vsi_seid);
930 else
931 dev_info(&pf->pdev->dev,
932 "add pvid: %d to VSI %d failed, ret=%d\n",
933 vid, vsi_seid, ret);
934
935 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
936
937 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
938 if (cnt != 1) {
939 dev_info(&pf->pdev->dev,
940 "del pvid: bad command string, cnt=%d\n",
941 cnt);
942 goto command_write_done;
943 }
944
945 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
946 if (!vsi) {
947 dev_info(&pf->pdev->dev,
948 "del pvid: VSI %d not found\n", vsi_seid);
949 goto command_write_done;
950 }
951
952 i40e_vsi_remove_pvid(vsi);
953 dev_info(&pf->pdev->dev,
954 "del pvid: removed from VSI %d\n", vsi_seid);
955
956 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
957 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
958 i40e_fetch_switch_configuration(pf, true);
959 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
960 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
961 if (cnt > 0)
962 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
963 else
964 i40e_dbg_dump_vsi_no_seid(pf);
965 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
966 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
967 if (cnt > 0)
968 i40e_dbg_dump_veb_seid(pf, vsi_seid);
969 else
970 i40e_dbg_dump_veb_all(pf);
971 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
972 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
973 if (cnt > 0)
974 i40e_dbg_dump_vf(pf, vf_id);
975 else
976 i40e_dbg_dump_vf_all(pf);
977 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
978 int ring_id, desc_n;
979 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
980 cnt = sscanf(&cmd_buf[12], "%i %i %i",
981 &vsi_seid, &ring_id, &desc_n);
982 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
983 desc_n, pf, RING_TYPE_RX);
984 } else if (strncmp(&cmd_buf[10], "tx", 2)
985 == 0) {
986 cnt = sscanf(&cmd_buf[12], "%i %i %i",
987 &vsi_seid, &ring_id, &desc_n);
988 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
989 desc_n, pf, RING_TYPE_TX);
990 } else if (strncmp(&cmd_buf[10], "xdp", 3)
991 == 0) {
992 cnt = sscanf(&cmd_buf[13], "%i %i %i",
993 &vsi_seid, &ring_id, &desc_n);
994 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
995 desc_n, pf, RING_TYPE_XDP);
996 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
997 i40e_dbg_dump_aq_desc(pf);
998 } else {
999 dev_info(&pf->pdev->dev,
1000 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1001 dev_info(&pf->pdev->dev,
1002 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1003 dev_info(&pf->pdev->dev,
1004 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1005 dev_info(&pf->pdev->dev, "dump desc aq\n");
1006 }
1007 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1008 dev_info(&pf->pdev->dev,
1009 "core reset count: %d\n", pf->corer_count);
1010 dev_info(&pf->pdev->dev,
1011 "global reset count: %d\n", pf->globr_count);
1012 dev_info(&pf->pdev->dev,
1013 "emp reset count: %d\n", pf->empr_count);
1014 dev_info(&pf->pdev->dev,
1015 "pf reset count: %d\n", pf->pfr_count);
1016 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1017 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1018 struct i40e_dcbx_config *cfg =
1019 &pf->hw.local_dcbx_config;
1020 struct i40e_dcbx_config *r_cfg =
1021 &pf->hw.remote_dcbx_config;
1022 int i, ret;
1023 u16 switch_id;
1024
1025 bw_data = kzalloc(sizeof(
1026 struct i40e_aqc_query_port_ets_config_resp),
1027 GFP_KERNEL);
1028 if (!bw_data) {
1029 ret = -ENOMEM;
1030 goto command_write_done;
1031 }
1032
1033 vsi = pf->vsi[pf->lan_vsi];
1034 switch_id =
1035 le16_to_cpu(vsi->info.switch_id) &
1036 I40E_AQ_VSI_SW_ID_MASK;
1037
1038 ret = i40e_aq_query_port_ets_config(&pf->hw,
1039 switch_id,
1040 bw_data, NULL);
1041 if (ret) {
1042 dev_info(&pf->pdev->dev,
1043 "Query Port ETS Config AQ command failed =0x%x\n",
1044 pf->hw.aq.asq_last_status);
1045 kfree(bw_data);
1046 bw_data = NULL;
1047 goto command_write_done;
1048 }
1049 dev_info(&pf->pdev->dev,
1050 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1051 bw_data->tc_valid_bits,
1052 bw_data->tc_strict_priority_bits,
1053 le16_to_cpu(bw_data->tc_bw_max[0]),
1054 le16_to_cpu(bw_data->tc_bw_max[1]));
1055 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1056 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1057 bw_data->tc_bw_share_credits[i],
1058 le16_to_cpu(bw_data->tc_bw_limits[i]));
1059 }
1060
1061 kfree(bw_data);
1062 bw_data = NULL;
1063
1064 dev_info(&pf->pdev->dev,
1065 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1066 dev_info(&pf->pdev->dev,
1067 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1068 cfg->etscfg.willing, cfg->etscfg.cbs,
1069 cfg->etscfg.maxtcs);
1070 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1071 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1072 i, cfg->etscfg.prioritytable[i],
1073 cfg->etscfg.tcbwtable[i],
1074 cfg->etscfg.tsatable[i]);
1075 }
1076 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1077 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1078 i, cfg->etsrec.prioritytable[i],
1079 cfg->etsrec.tcbwtable[i],
1080 cfg->etsrec.tsatable[i]);
1081 }
1082 dev_info(&pf->pdev->dev,
1083 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1084 cfg->pfc.willing, cfg->pfc.mbc,
1085 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1086 dev_info(&pf->pdev->dev,
1087 "port app_table: num_apps=%d\n", cfg->numapps);
1088 for (i = 0; i < cfg->numapps; i++) {
1089 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1090 i, cfg->app[i].priority,
1091 cfg->app[i].selector,
1092 cfg->app[i].protocolid);
1093 }
1094 /* Peer TLV DCBX data */
1095 dev_info(&pf->pdev->dev,
1096 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1097 r_cfg->etscfg.willing,
1098 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1099 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1100 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1101 i, r_cfg->etscfg.prioritytable[i],
1102 r_cfg->etscfg.tcbwtable[i],
1103 r_cfg->etscfg.tsatable[i]);
1104 }
1105 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1106 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1107 i, r_cfg->etsrec.prioritytable[i],
1108 r_cfg->etsrec.tcbwtable[i],
1109 r_cfg->etsrec.tsatable[i]);
1110 }
1111 dev_info(&pf->pdev->dev,
1112 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1113 r_cfg->pfc.willing,
1114 r_cfg->pfc.mbc,
1115 r_cfg->pfc.pfccap,
1116 r_cfg->pfc.pfcenable);
1117 dev_info(&pf->pdev->dev,
1118 "remote port app_table: num_apps=%d\n",
1119 r_cfg->numapps);
1120 for (i = 0; i < r_cfg->numapps; i++) {
1121 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1122 i, r_cfg->app[i].priority,
1123 r_cfg->app[i].selector,
1124 r_cfg->app[i].protocolid);
1125 }
1126 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1127 int cluster_id, table_id;
1128 int index, ret;
1129 u16 buff_len = 4096;
1130 u32 next_index;
1131 u8 next_table;
1132 u8 *buff;
1133 u16 rlen;
1134
1135 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1136 &cluster_id, &table_id, &index);
1137 if (cnt != 3) {
1138 dev_info(&pf->pdev->dev,
1139 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1140 goto command_write_done;
1141 }
1142
1143 dev_info(&pf->pdev->dev,
1144 "AQ debug dump fwdata params %x %x %x %x\n",
1145 cluster_id, table_id, index, buff_len);
1146 buff = kzalloc(buff_len, GFP_KERNEL);
1147 if (!buff)
1148 goto command_write_done;
1149
1150 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1151 index, buff_len, buff, &rlen,
1152 &next_table, &next_index,
1153 NULL);
1154 if (ret) {
1155 dev_info(&pf->pdev->dev,
1156 "debug dump fwdata AQ Failed %d 0x%x\n",
1157 ret, pf->hw.aq.asq_last_status);
1158 kfree(buff);
1159 buff = NULL;
1160 goto command_write_done;
1161 }
1162 dev_info(&pf->pdev->dev,
1163 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1164 rlen, next_table, next_index);
1165 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1166 DUMP_PREFIX_OFFSET, 16, 1,
1167 buff, rlen, true);
1168 kfree(buff);
1169 buff = NULL;
1170 } else {
1171 dev_info(&pf->pdev->dev,
1172 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1173 dev_info(&pf->pdev->dev, "dump switch\n");
1174 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1175 dev_info(&pf->pdev->dev, "dump reset stats\n");
1176 dev_info(&pf->pdev->dev, "dump port\n");
1177 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1178 dev_info(&pf->pdev->dev,
1179 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1180 }
1181 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1182 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1183 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1184
1185 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1186 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1187 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1188
1189 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1190 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1191 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1192
1193 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1194 u32 address;
1195 u32 value;
1196
1197 cnt = sscanf(&cmd_buf[4], "%i", &address);
1198 if (cnt != 1) {
1199 dev_info(&pf->pdev->dev, "read <reg>\n");
1200 goto command_write_done;
1201 }
1202
1203 /* check the range on address */
1204 if (address > (pf->ioremap_len - sizeof(u32))) {
1205 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1206 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1207 goto command_write_done;
1208 }
1209
1210 value = rd32(&pf->hw, address);
1211 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1212 address, value);
1213
1214 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1215 u32 address, value;
1216
1217 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1218 if (cnt != 2) {
1219 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1220 goto command_write_done;
1221 }
1222
1223 /* check the range on address */
1224 if (address > (pf->ioremap_len - sizeof(u32))) {
1225 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1226 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1227 goto command_write_done;
1228 }
1229 wr32(&pf->hw, address, value);
1230 value = rd32(&pf->hw, address);
1231 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1232 address, value);
1233 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1234 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1235 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1236 if (cnt == 0) {
1237 int i;
1238
1239 i40e_pf_for_each_vsi(pf, i, vsi)
1240 i40e_vsi_reset_stats(vsi);
1241 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1242 } else if (cnt == 1) {
1243 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1244 if (!vsi) {
1245 dev_info(&pf->pdev->dev,
1246 "clear_stats vsi: bad vsi %d\n",
1247 vsi_seid);
1248 goto command_write_done;
1249 }
1250 i40e_vsi_reset_stats(vsi);
1251 dev_info(&pf->pdev->dev,
1252 "vsi clear stats called for vsi %d\n",
1253 vsi_seid);
1254 } else {
1255 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1256 }
1257 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1258 if (pf->hw.partition_id == 1) {
1259 i40e_pf_reset_stats(pf);
1260 dev_info(&pf->pdev->dev, "port stats cleared\n");
1261 } else {
1262 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1263 }
1264 } else {
1265 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1266 }
1267 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1268 struct i40e_aq_desc *desc;
1269 int ret;
1270
1271 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1272 if (!desc)
1273 goto command_write_done;
1274 cnt = sscanf(&cmd_buf[11],
1275 "%hi %hi %hi %hi %i %i %i %i %i %i",
1276 &desc->flags,
1277 &desc->opcode, &desc->datalen, &desc->retval,
1278 &desc->cookie_high, &desc->cookie_low,
1279 &desc->params.internal.param0,
1280 &desc->params.internal.param1,
1281 &desc->params.internal.param2,
1282 &desc->params.internal.param3);
1283 if (cnt != 10) {
1284 dev_info(&pf->pdev->dev,
1285 "send aq_cmd: bad command string, cnt=%d\n",
1286 cnt);
1287 kfree(desc);
1288 desc = NULL;
1289 goto command_write_done;
1290 }
1291 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
1292 if (!ret) {
1293 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1294 } else if (ret == -EIO) {
1295 dev_info(&pf->pdev->dev,
1296 "AQ command send failed Opcode %x AQ Error: %d\n",
1297 desc->opcode, pf->hw.aq.asq_last_status);
1298 } else {
1299 dev_info(&pf->pdev->dev,
1300 "AQ command send failed Opcode %x Status: %d\n",
1301 desc->opcode, ret);
1302 }
1303 dev_info(&pf->pdev->dev,
1304 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1305 desc->flags, desc->opcode, desc->datalen, desc->retval,
1306 desc->cookie_high, desc->cookie_low,
1307 desc->params.internal.param0,
1308 desc->params.internal.param1,
1309 desc->params.internal.param2,
1310 desc->params.internal.param3);
1311 kfree(desc);
1312 desc = NULL;
1313 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1314 struct i40e_aq_desc *desc;
1315 u16 buffer_len;
1316 u8 *buff;
1317 int ret;
1318
1319 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1320 if (!desc)
1321 goto command_write_done;
1322 cnt = sscanf(&cmd_buf[20],
1323 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1324 &desc->flags,
1325 &desc->opcode, &desc->datalen, &desc->retval,
1326 &desc->cookie_high, &desc->cookie_low,
1327 &desc->params.internal.param0,
1328 &desc->params.internal.param1,
1329 &desc->params.internal.param2,
1330 &desc->params.internal.param3,
1331 &buffer_len);
1332 if (cnt != 11) {
1333 dev_info(&pf->pdev->dev,
1334 "send indirect aq_cmd: bad command string, cnt=%d\n",
1335 cnt);
1336 kfree(desc);
1337 desc = NULL;
1338 goto command_write_done;
1339 }
1340 /* Just stub a buffer big enough in case user messed up */
1341 if (buffer_len == 0)
1342 buffer_len = 1280;
1343
1344 buff = kzalloc(buffer_len, GFP_KERNEL);
1345 if (!buff) {
1346 kfree(desc);
1347 desc = NULL;
1348 goto command_write_done;
1349 }
1350 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1351 ret = i40e_asq_send_command(&pf->hw, desc, buff,
1352 buffer_len, NULL);
1353 if (!ret) {
1354 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1355 } else if (ret == -EIO) {
1356 dev_info(&pf->pdev->dev,
1357 "AQ command send failed Opcode %x AQ Error: %d\n",
1358 desc->opcode, pf->hw.aq.asq_last_status);
1359 } else {
1360 dev_info(&pf->pdev->dev,
1361 "AQ command send failed Opcode %x Status: %d\n",
1362 desc->opcode, ret);
1363 }
1364 dev_info(&pf->pdev->dev,
1365 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1366 desc->flags, desc->opcode, desc->datalen, desc->retval,
1367 desc->cookie_high, desc->cookie_low,
1368 desc->params.internal.param0,
1369 desc->params.internal.param1,
1370 desc->params.internal.param2,
1371 desc->params.internal.param3);
1372 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1373 DUMP_PREFIX_OFFSET, 16, 1,
1374 buff, buffer_len, true);
1375 kfree(buff);
1376 buff = NULL;
1377 kfree(desc);
1378 desc = NULL;
1379 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1380 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1381 i40e_get_current_fd_count(pf));
1382 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1383 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1384 int ret;
1385
1386 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
1387 if (ret) {
1388 dev_info(&pf->pdev->dev,
1389 "Stop LLDP AQ command failed =0x%x\n",
1390 pf->hw.aq.asq_last_status);
1391 goto command_write_done;
1392 }
1393 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1394 pf->hw.mac.addr,
1395 ETH_P_LLDP, 0,
1396 pf->vsi[pf->lan_vsi]->seid,
1397 0, true, NULL, NULL);
1398 if (ret) {
1399 dev_info(&pf->pdev->dev,
1400 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1401 __func__, pf->hw.aq.asq_last_status);
1402 goto command_write_done;
1403 }
1404#ifdef CONFIG_I40E_DCB
1405 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1406 DCB_CAP_DCBX_VER_IEEE;
1407#endif /* CONFIG_I40E_DCB */
1408 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1409 int ret;
1410
1411 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1412 pf->hw.mac.addr,
1413 ETH_P_LLDP, 0,
1414 pf->vsi[pf->lan_vsi]->seid,
1415 0, false, NULL, NULL);
1416 if (ret) {
1417 dev_info(&pf->pdev->dev,
1418 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1419 __func__, pf->hw.aq.asq_last_status);
1420 /* Continue and start FW LLDP anyways */
1421 }
1422
1423 ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
1424 if (ret) {
1425 dev_info(&pf->pdev->dev,
1426 "Start LLDP AQ command failed =0x%x\n",
1427 pf->hw.aq.asq_last_status);
1428 goto command_write_done;
1429 }
1430#ifdef CONFIG_I40E_DCB
1431 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1432 DCB_CAP_DCBX_VER_IEEE;
1433#endif /* CONFIG_I40E_DCB */
1434 } else if (strncmp(&cmd_buf[5],
1435 "get local", 9) == 0) {
1436 u16 llen, rlen;
1437 int ret;
1438 u8 *buff;
1439
1440 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1441 if (!buff)
1442 goto command_write_done;
1443
1444 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1445 I40E_AQ_LLDP_MIB_LOCAL,
1446 buff, I40E_LLDPDU_SIZE,
1447 &llen, &rlen, NULL);
1448 if (ret) {
1449 dev_info(&pf->pdev->dev,
1450 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1451 pf->hw.aq.asq_last_status);
1452 kfree(buff);
1453 buff = NULL;
1454 goto command_write_done;
1455 }
1456 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1457 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1458 DUMP_PREFIX_OFFSET, 16, 1,
1459 buff, I40E_LLDPDU_SIZE, true);
1460 kfree(buff);
1461 buff = NULL;
1462 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1463 u16 llen, rlen;
1464 int ret;
1465 u8 *buff;
1466
1467 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1468 if (!buff)
1469 goto command_write_done;
1470
1471 ret = i40e_aq_get_lldp_mib(&pf->hw,
1472 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1473 I40E_AQ_LLDP_MIB_REMOTE,
1474 buff, I40E_LLDPDU_SIZE,
1475 &llen, &rlen, NULL);
1476 if (ret) {
1477 dev_info(&pf->pdev->dev,
1478 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1479 pf->hw.aq.asq_last_status);
1480 kfree(buff);
1481 buff = NULL;
1482 goto command_write_done;
1483 }
1484 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1485 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1486 DUMP_PREFIX_OFFSET, 16, 1,
1487 buff, I40E_LLDPDU_SIZE, true);
1488 kfree(buff);
1489 buff = NULL;
1490 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1491 int ret;
1492
1493 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1494 true, NULL);
1495 if (ret) {
1496 dev_info(&pf->pdev->dev,
1497 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1498 pf->hw.aq.asq_last_status);
1499 goto command_write_done;
1500 }
1501 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1502 int ret;
1503
1504 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1505 false, NULL);
1506 if (ret) {
1507 dev_info(&pf->pdev->dev,
1508 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1509 pf->hw.aq.asq_last_status);
1510 goto command_write_done;
1511 }
1512 }
1513 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1514 u16 buffer_len, bytes;
1515 u16 module;
1516 u32 offset;
1517 u16 *buff;
1518 int ret;
1519
1520 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1521 &module, &offset, &buffer_len);
1522 if (cnt == 0) {
1523 module = 0;
1524 offset = 0;
1525 buffer_len = 0;
1526 } else if (cnt == 1) {
1527 offset = 0;
1528 buffer_len = 0;
1529 } else if (cnt == 2) {
1530 buffer_len = 0;
1531 } else if (cnt > 3) {
1532 dev_info(&pf->pdev->dev,
1533 "nvm read: bad command string, cnt=%d\n", cnt);
1534 goto command_write_done;
1535 }
1536
1537 /* set the max length */
1538 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1539
1540 bytes = 2 * buffer_len;
1541
1542 /* read at least 1k bytes, no more than 4kB */
1543 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1544 buff = kzalloc(bytes, GFP_KERNEL);
1545 if (!buff)
1546 goto command_write_done;
1547
1548 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1549 if (ret) {
1550 dev_info(&pf->pdev->dev,
1551 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1552 ret, pf->hw.aq.asq_last_status);
1553 kfree(buff);
1554 goto command_write_done;
1555 }
1556
1557 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1558 bytes, (u8 *)buff, true, NULL);
1559 i40e_release_nvm(&pf->hw);
1560 if (ret) {
1561 dev_info(&pf->pdev->dev,
1562 "Read NVM AQ failed err=%d status=0x%x\n",
1563 ret, pf->hw.aq.asq_last_status);
1564 } else {
1565 dev_info(&pf->pdev->dev,
1566 "Read NVM module=0x%x offset=0x%x words=%d\n",
1567 module, offset, buffer_len);
1568 if (bytes)
1569 print_hex_dump(KERN_INFO, "NVM Dump: ",
1570 DUMP_PREFIX_OFFSET, 16, 2,
1571 buff, bytes, true);
1572 }
1573 kfree(buff);
1574 buff = NULL;
1575 } else {
1576 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1577 dev_info(&pf->pdev->dev, "available commands\n");
1578 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1579 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1580 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1581 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1582 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1583 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1584 dev_info(&pf->pdev->dev, " dump switch\n");
1585 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1586 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1587 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1588 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1589 dev_info(&pf->pdev->dev, " dump desc aq\n");
1590 dev_info(&pf->pdev->dev, " dump reset stats\n");
1591 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1592 dev_info(&pf->pdev->dev, " read <reg>\n");
1593 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1594 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1595 dev_info(&pf->pdev->dev, " clear_stats port\n");
1596 dev_info(&pf->pdev->dev, " pfr\n");
1597 dev_info(&pf->pdev->dev, " corer\n");
1598 dev_info(&pf->pdev->dev, " globr\n");
1599 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1600 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1601 dev_info(&pf->pdev->dev, " fd current cnt");
1602 dev_info(&pf->pdev->dev, " lldp start\n");
1603 dev_info(&pf->pdev->dev, " lldp stop\n");
1604 dev_info(&pf->pdev->dev, " lldp get local\n");
1605 dev_info(&pf->pdev->dev, " lldp get remote\n");
1606 dev_info(&pf->pdev->dev, " lldp event on\n");
1607 dev_info(&pf->pdev->dev, " lldp event off\n");
1608 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1609 }
1610
1611command_write_done:
1612 kfree(cmd_buf);
1613 cmd_buf = NULL;
1614 return count;
1615}
1616
1617static const struct file_operations i40e_dbg_command_fops = {
1618 .owner = THIS_MODULE,
1619 .open = simple_open,
1620 .read = i40e_dbg_command_read,
1621 .write = i40e_dbg_command_write,
1622};
1623
1624/**************************************************************
1625 * netdev_ops
1626 * The netdev_ops entry in debugfs is for giving the driver commands
1627 * to be executed from the netdev operations.
1628 **************************************************************/
1629static char i40e_dbg_netdev_ops_buf[256] = "";
1630
1631/**
1632 * i40e_dbg_netdev_ops_read - read for netdev_ops datum
1633 * @filp: the opened file
1634 * @buffer: where to write the data for the user to read
1635 * @count: the size of the user's buffer
1636 * @ppos: file position offset
1637 **/
1638static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1639 size_t count, loff_t *ppos)
1640{
1641 struct i40e_pf *pf = filp->private_data;
1642 int bytes_not_copied;
1643 int buf_size = 256;
1644 char *buf;
1645 int len;
1646
1647 /* don't allow partal reads */
1648 if (*ppos != 0)
1649 return 0;
1650 if (count < buf_size)
1651 return -ENOSPC;
1652
1653 buf = kzalloc(buf_size, GFP_KERNEL);
1654 if (!buf)
1655 return -ENOSPC;
1656
1657 len = snprintf(buf, buf_size, "%s: %s\n",
1658 pf->vsi[pf->lan_vsi]->netdev->name,
1659 i40e_dbg_netdev_ops_buf);
1660
1661 bytes_not_copied = copy_to_user(buffer, buf, len);
1662 kfree(buf);
1663
1664 if (bytes_not_copied)
1665 return -EFAULT;
1666
1667 *ppos = len;
1668 return len;
1669}
1670
1671/**
1672 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1673 * @filp: the opened file
1674 * @buffer: where to find the user's data
1675 * @count: the length of the user's data
1676 * @ppos: file position offset
1677 **/
1678static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1679 const char __user *buffer,
1680 size_t count, loff_t *ppos)
1681{
1682 struct i40e_pf *pf = filp->private_data;
1683 int bytes_not_copied;
1684 struct i40e_vsi *vsi;
1685 char *buf_tmp;
1686 int vsi_seid;
1687 int i, cnt;
1688
1689 /* don't allow partial writes */
1690 if (*ppos != 0)
1691 return 0;
1692 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1693 return -ENOSPC;
1694
1695 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1696 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
1697 buffer, count);
1698 if (bytes_not_copied)
1699 return -EFAULT;
1700 i40e_dbg_netdev_ops_buf[count] = '\0';
1701
1702 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1703 if (buf_tmp) {
1704 *buf_tmp = '\0';
1705 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1706 }
1707
1708 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1709 int mtu;
1710
1711 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1712 &vsi_seid, &mtu);
1713 if (cnt != 2) {
1714 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1715 goto netdev_ops_write_done;
1716 }
1717 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1718 if (!vsi) {
1719 dev_info(&pf->pdev->dev,
1720 "change_mtu: VSI %d not found\n", vsi_seid);
1721 } else if (!vsi->netdev) {
1722 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1723 vsi_seid);
1724 } else if (rtnl_trylock()) {
1725 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1726 mtu);
1727 rtnl_unlock();
1728 dev_info(&pf->pdev->dev, "change_mtu called\n");
1729 } else {
1730 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1731 }
1732
1733 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1734 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1735 if (cnt != 1) {
1736 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1737 goto netdev_ops_write_done;
1738 }
1739 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1740 if (!vsi) {
1741 dev_info(&pf->pdev->dev,
1742 "set_rx_mode: VSI %d not found\n", vsi_seid);
1743 } else if (!vsi->netdev) {
1744 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1745 vsi_seid);
1746 } else if (rtnl_trylock()) {
1747 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1748 rtnl_unlock();
1749 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1750 } else {
1751 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1752 }
1753
1754 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1755 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1756 if (cnt != 1) {
1757 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1758 goto netdev_ops_write_done;
1759 }
1760 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1761 if (!vsi) {
1762 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1763 vsi_seid);
1764 } else if (!vsi->netdev) {
1765 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1766 vsi_seid);
1767 } else {
1768 for (i = 0; i < vsi->num_q_vectors; i++)
1769 napi_schedule(&vsi->q_vectors[i]->napi);
1770 dev_info(&pf->pdev->dev, "napi called\n");
1771 }
1772 } else {
1773 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
1774 i40e_dbg_netdev_ops_buf);
1775 dev_info(&pf->pdev->dev, "available commands\n");
1776 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1777 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1778 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1779 }
1780netdev_ops_write_done:
1781 return count;
1782}
1783
1784static const struct file_operations i40e_dbg_netdev_ops_fops = {
1785 .owner = THIS_MODULE,
1786 .open = simple_open,
1787 .read = i40e_dbg_netdev_ops_read,
1788 .write = i40e_dbg_netdev_ops_write,
1789};
1790
1791/**
1792 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1793 * @pf: the PF that is starting up
1794 **/
1795void i40e_dbg_pf_init(struct i40e_pf *pf)
1796{
1797 const char *name = pci_name(pf->pdev);
1798
1799 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
1800
1801 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
1802 &i40e_dbg_command_fops);
1803
1804 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
1805 &i40e_dbg_netdev_ops_fops);
1806}
1807
1808/**
1809 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1810 * @pf: the PF that is stopping
1811 **/
1812void i40e_dbg_pf_exit(struct i40e_pf *pf)
1813{
1814 debugfs_remove_recursive(pf->i40e_dbg_pf);
1815 pf->i40e_dbg_pf = NULL;
1816}
1817
1818/**
1819 * i40e_dbg_init - start up debugfs for the driver
1820 **/
1821void i40e_dbg_init(void)
1822{
1823 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
1824 if (IS_ERR(i40e_dbg_root))
1825 pr_info("init of debugfs failed\n");
1826}
1827
1828/**
1829 * i40e_dbg_exit - clean out the driver's debugfs entries
1830 **/
1831void i40e_dbg_exit(void)
1832{
1833 debugfs_remove_recursive(i40e_dbg_root);
1834 i40e_dbg_root = NULL;
1835}
1836
1837#endif /* CONFIG_DEBUG_FS */