Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#ifdef CONFIG_DEBUG_FS
5
6#include <linux/fs.h>
7#include <linux/debugfs.h>
8#include <linux/if_bridge.h>
9#include "i40e.h"
10#include "i40e_virtchnl_pf.h"
11
12static struct dentry *i40e_dbg_root;
13
14enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18};
19
20/**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
25static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26{
27 if (seid < 0) {
28 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
29
30 return NULL;
31 }
32
33 return i40e_pf_get_vsi_by_seid(pf, seid);
34}
35
36/**************************************************************
37 * command
38 * The command entry in debugfs is for giving the driver commands
39 * to be executed - these may be for changing the internal switch
40 * setup, adding or removing filters, or other things. Many of
41 * these will be useful for some forms of unit testing.
42 **************************************************************/
43static char i40e_dbg_command_buf[256] = "";
44
45/**
46 * i40e_dbg_command_read - read for command datum
47 * @filp: the opened file
48 * @buffer: where to write the data for the user to read
49 * @count: the size of the user's buffer
50 * @ppos: file position offset
51 **/
52static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
53 size_t count, loff_t *ppos)
54{
55 struct i40e_pf *pf = filp->private_data;
56 int bytes_not_copied;
57 int buf_size = 256;
58 char *buf;
59 int len;
60
61 /* don't allow partial reads */
62 if (*ppos != 0)
63 return 0;
64 if (count < buf_size)
65 return -ENOSPC;
66
67 buf = kzalloc(buf_size, GFP_KERNEL);
68 if (!buf)
69 return -ENOSPC;
70
71 len = snprintf(buf, buf_size, "%s: %s\n",
72 pf->vsi[pf->lan_vsi]->netdev->name,
73 i40e_dbg_command_buf);
74
75 bytes_not_copied = copy_to_user(buffer, buf, len);
76 kfree(buf);
77
78 if (bytes_not_copied)
79 return -EFAULT;
80
81 *ppos = len;
82 return len;
83}
84
85static char *i40e_filter_state_string[] = {
86 "INVALID",
87 "NEW",
88 "ACTIVE",
89 "FAILED",
90 "REMOVE",
91};
92
93/**
94 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
95 * @pf: the i40e_pf created in command write
96 * @seid: the seid the user put in
97 **/
98static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
99{
100 struct rtnl_link_stats64 *nstat;
101 struct i40e_mac_filter *f;
102 struct i40e_vsi *vsi;
103 int i, bkt;
104
105 vsi = i40e_dbg_find_vsi(pf, seid);
106 if (!vsi) {
107 dev_info(&pf->pdev->dev,
108 "dump %d: seid not found\n", seid);
109 return;
110 }
111 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
112 if (vsi->netdev) {
113 struct net_device *nd = vsi->netdev;
114
115 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
116 nd->name, nd->state, nd->flags);
117 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
118 (unsigned long int)nd->features);
119 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
120 (unsigned long int)nd->hw_features);
121 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
122 (unsigned long int)nd->vlan_features);
123 }
124 dev_info(&pf->pdev->dev,
125 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
126 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
127 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
128 dev_info(&pf->pdev->dev,
129 " state[%d] = %08lx\n",
130 i, vsi->state[i]);
131 if (vsi == pf->vsi[pf->lan_vsi])
132 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
133 pf->hw.mac.addr,
134 pf->hw.mac.port_addr);
135 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
136 dev_info(&pf->pdev->dev,
137 " mac_filter_hash: %pM vid=%d, state %s\n",
138 f->macaddr, f->vlan,
139 i40e_filter_state_string[f->state]);
140 }
141 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
142 vsi->active_filters, vsi->promisc_threshold,
143 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
144 "ON" : "OFF"));
145 nstat = i40e_get_vsi_stats_struct(vsi);
146 dev_info(&pf->pdev->dev,
147 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
148 (unsigned long int)nstat->rx_packets,
149 (unsigned long int)nstat->rx_bytes,
150 (unsigned long int)nstat->rx_errors,
151 (unsigned long int)nstat->rx_dropped);
152 dev_info(&pf->pdev->dev,
153 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
154 (unsigned long int)nstat->tx_packets,
155 (unsigned long int)nstat->tx_bytes,
156 (unsigned long int)nstat->tx_errors,
157 (unsigned long int)nstat->tx_dropped);
158 dev_info(&pf->pdev->dev,
159 " net_stats: multicast = %lu, collisions = %lu\n",
160 (unsigned long int)nstat->multicast,
161 (unsigned long int)nstat->collisions);
162 dev_info(&pf->pdev->dev,
163 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
164 (unsigned long int)nstat->rx_length_errors,
165 (unsigned long int)nstat->rx_over_errors,
166 (unsigned long int)nstat->rx_crc_errors);
167 dev_info(&pf->pdev->dev,
168 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
169 (unsigned long int)nstat->rx_frame_errors,
170 (unsigned long int)nstat->rx_fifo_errors,
171 (unsigned long int)nstat->rx_missed_errors);
172 dev_info(&pf->pdev->dev,
173 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
174 (unsigned long int)nstat->tx_aborted_errors,
175 (unsigned long int)nstat->tx_carrier_errors,
176 (unsigned long int)nstat->tx_fifo_errors);
177 dev_info(&pf->pdev->dev,
178 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
179 (unsigned long int)nstat->tx_heartbeat_errors,
180 (unsigned long int)nstat->tx_window_errors);
181 dev_info(&pf->pdev->dev,
182 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
183 (unsigned long int)nstat->rx_compressed,
184 (unsigned long int)nstat->tx_compressed);
185 dev_info(&pf->pdev->dev,
186 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
187 (unsigned long int)vsi->net_stats_offsets.rx_packets,
188 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
189 (unsigned long int)vsi->net_stats_offsets.rx_errors,
190 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
191 dev_info(&pf->pdev->dev,
192 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
193 (unsigned long int)vsi->net_stats_offsets.tx_packets,
194 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
195 (unsigned long int)vsi->net_stats_offsets.tx_errors,
196 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
197 dev_info(&pf->pdev->dev,
198 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
199 (unsigned long int)vsi->net_stats_offsets.multicast,
200 (unsigned long int)vsi->net_stats_offsets.collisions);
201 dev_info(&pf->pdev->dev,
202 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
203 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
204 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
205 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
206 dev_info(&pf->pdev->dev,
207 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
208 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
209 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
210 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
211 dev_info(&pf->pdev->dev,
212 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
213 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
214 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
215 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
216 dev_info(&pf->pdev->dev,
217 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
218 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
219 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
220 dev_info(&pf->pdev->dev,
221 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
222 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
223 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
224 dev_info(&pf->pdev->dev,
225 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
226 vsi->tx_restart, vsi->tx_busy,
227 vsi->rx_buf_failed, vsi->rx_page_failed);
228 rcu_read_lock();
229 for (i = 0; i < vsi->num_queue_pairs; i++) {
230 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
231
232 if (!rx_ring)
233 continue;
234
235 dev_info(&pf->pdev->dev,
236 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
237 i, *rx_ring->state,
238 rx_ring->queue_index,
239 rx_ring->reg_idx);
240 dev_info(&pf->pdev->dev,
241 " rx_rings[%i]: rx_buf_len = %d\n",
242 i, rx_ring->rx_buf_len);
243 dev_info(&pf->pdev->dev,
244 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
245 i,
246 rx_ring->next_to_use,
247 rx_ring->next_to_clean,
248 rx_ring->ring_active);
249 dev_info(&pf->pdev->dev,
250 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
251 i, rx_ring->stats.packets,
252 rx_ring->stats.bytes,
253 rx_ring->rx_stats.non_eop_descs);
254 dev_info(&pf->pdev->dev,
255 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
256 i,
257 rx_ring->rx_stats.alloc_page_failed,
258 rx_ring->rx_stats.alloc_buff_failed);
259 dev_info(&pf->pdev->dev,
260 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
261 i,
262 rx_ring->rx_stats.page_reuse_count);
263 dev_info(&pf->pdev->dev,
264 " rx_rings[%i]: size = %i\n",
265 i, rx_ring->size);
266 dev_info(&pf->pdev->dev,
267 " rx_rings[%i]: itr_setting = %d (%s)\n",
268 i, rx_ring->itr_setting,
269 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
270 }
271 for (i = 0; i < vsi->num_queue_pairs; i++) {
272 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
273
274 if (!tx_ring)
275 continue;
276
277 dev_info(&pf->pdev->dev,
278 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
279 i, *tx_ring->state,
280 tx_ring->queue_index,
281 tx_ring->reg_idx);
282 dev_info(&pf->pdev->dev,
283 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
284 i,
285 tx_ring->next_to_use,
286 tx_ring->next_to_clean,
287 tx_ring->ring_active);
288 dev_info(&pf->pdev->dev,
289 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
290 i, tx_ring->stats.packets,
291 tx_ring->stats.bytes,
292 tx_ring->tx_stats.restart_queue);
293 dev_info(&pf->pdev->dev,
294 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
295 i,
296 tx_ring->tx_stats.tx_busy,
297 tx_ring->tx_stats.tx_done_old,
298 tx_ring->tx_stats.tx_stopped);
299 dev_info(&pf->pdev->dev,
300 " tx_rings[%i]: size = %i\n",
301 i, tx_ring->size);
302 dev_info(&pf->pdev->dev,
303 " tx_rings[%i]: DCB tc = %d\n",
304 i, tx_ring->dcb_tc);
305 dev_info(&pf->pdev->dev,
306 " tx_rings[%i]: itr_setting = %d (%s)\n",
307 i, tx_ring->itr_setting,
308 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
309 }
310 if (i40e_enabled_xdp_vsi(vsi)) {
311 for (i = 0; i < vsi->num_queue_pairs; i++) {
312 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
313
314 if (!xdp_ring)
315 continue;
316
317 dev_info(&pf->pdev->dev,
318 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
319 i, *xdp_ring->state,
320 xdp_ring->queue_index,
321 xdp_ring->reg_idx);
322 dev_info(&pf->pdev->dev,
323 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
324 i,
325 xdp_ring->next_to_use,
326 xdp_ring->next_to_clean,
327 xdp_ring->ring_active);
328 dev_info(&pf->pdev->dev,
329 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
330 i, xdp_ring->stats.packets,
331 xdp_ring->stats.bytes,
332 xdp_ring->tx_stats.restart_queue);
333 dev_info(&pf->pdev->dev,
334 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
335 i,
336 xdp_ring->tx_stats.tx_busy,
337 xdp_ring->tx_stats.tx_done_old);
338 dev_info(&pf->pdev->dev,
339 " xdp_rings[%i]: size = %i\n",
340 i, xdp_ring->size);
341 dev_info(&pf->pdev->dev,
342 " xdp_rings[%i]: DCB tc = %d\n",
343 i, xdp_ring->dcb_tc);
344 dev_info(&pf->pdev->dev,
345 " xdp_rings[%i]: itr_setting = %d (%s)\n",
346 i, xdp_ring->itr_setting,
347 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
348 "dynamic" : "fixed");
349 }
350 }
351 rcu_read_unlock();
352 dev_info(&pf->pdev->dev,
353 " work_limit = %d\n",
354 vsi->work_limit);
355 dev_info(&pf->pdev->dev,
356 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
357 vsi->max_frame, vsi->rx_buf_len, 0);
358 dev_info(&pf->pdev->dev,
359 " num_q_vectors = %i, base_vector = %i\n",
360 vsi->num_q_vectors, vsi->base_vector);
361 dev_info(&pf->pdev->dev,
362 " seid = %d, id = %d, uplink_seid = %d\n",
363 vsi->seid, vsi->id, vsi->uplink_seid);
364 dev_info(&pf->pdev->dev,
365 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
366 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
367 vsi->num_rx_desc);
368 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
369 if (vsi->type == I40E_VSI_SRIOV)
370 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
371 dev_info(&pf->pdev->dev,
372 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
373 vsi->info.valid_sections, vsi->info.switch_id);
374 dev_info(&pf->pdev->dev,
375 " info: sw_reserved[] = 0x%02x 0x%02x\n",
376 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
377 dev_info(&pf->pdev->dev,
378 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
379 vsi->info.sec_flags, vsi->info.sec_reserved);
380 dev_info(&pf->pdev->dev,
381 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
382 vsi->info.pvid, vsi->info.fcoe_pvid,
383 vsi->info.port_vlan_flags);
384 dev_info(&pf->pdev->dev,
385 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
386 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
387 vsi->info.pvlan_reserved[2]);
388 dev_info(&pf->pdev->dev,
389 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
390 vsi->info.ingress_table, vsi->info.egress_table);
391 dev_info(&pf->pdev->dev,
392 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
393 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
394 vsi->info.cas_pv_reserved);
395 dev_info(&pf->pdev->dev,
396 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
397 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
398 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
399 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
400 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
401 dev_info(&pf->pdev->dev,
402 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
403 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
404 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
405 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
406 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
407 dev_info(&pf->pdev->dev,
408 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
409 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
410 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
411 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
412 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
413 dev_info(&pf->pdev->dev,
414 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
415 vsi->info.queueing_opt_flags,
416 vsi->info.queueing_opt_reserved[0],
417 vsi->info.queueing_opt_reserved[1],
418 vsi->info.queueing_opt_reserved[2]);
419 dev_info(&pf->pdev->dev,
420 " info: up_enable_bits = 0x%02x\n",
421 vsi->info.up_enable_bits);
422 dev_info(&pf->pdev->dev,
423 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
424 vsi->info.sched_reserved, vsi->info.outer_up_table);
425 dev_info(&pf->pdev->dev,
426 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
427 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
428 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
429 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
430 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
431 dev_info(&pf->pdev->dev,
432 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
433 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
434 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
435 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
436 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
437 dev_info(&pf->pdev->dev,
438 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
439 vsi->info.stat_counter_idx, vsi->info.sched_id);
440 dev_info(&pf->pdev->dev,
441 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
442 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
443 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
444 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
445 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
446 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
447 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
448 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
449 dev_info(&pf->pdev->dev,
450 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
451 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
452 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
453 dev_info(&pf->pdev->dev,
454 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
455 i, vsi->tc_config.tc_info[i].qoffset,
456 vsi->tc_config.tc_info[i].qcount,
457 vsi->tc_config.tc_info[i].netdev_tc);
458 }
459 dev_info(&pf->pdev->dev,
460 " bw: bw_limit = %d, bw_max_quanta = %d\n",
461 vsi->bw_limit, vsi->bw_max_quanta);
462 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
463 dev_info(&pf->pdev->dev,
464 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
465 i, vsi->bw_ets_share_credits[i],
466 vsi->bw_ets_limit_credits[i],
467 vsi->bw_ets_max_quanta[i]);
468 }
469}
470
471/**
472 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
473 * @pf: the i40e_pf created in command write
474 **/
475static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
476{
477 struct i40e_adminq_ring *ring;
478 struct i40e_hw *hw = &pf->hw;
479 char hdr[32];
480 int i;
481
482 snprintf(hdr, sizeof(hdr), "%s %s: ",
483 dev_driver_string(&pf->pdev->dev),
484 dev_name(&pf->pdev->dev));
485
486 /* first the send (command) ring, then the receive (event) ring */
487 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
488 ring = &(hw->aq.asq);
489 for (i = 0; i < ring->count; i++) {
490 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
491
492 dev_info(&pf->pdev->dev,
493 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
494 i, d->flags, d->opcode, d->datalen, d->retval,
495 d->cookie_high, d->cookie_low);
496 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
497 16, 1, d->params.raw, 16, 0);
498 }
499
500 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
501 ring = &(hw->aq.arq);
502 for (i = 0; i < ring->count; i++) {
503 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
504
505 dev_info(&pf->pdev->dev,
506 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
507 i, d->flags, d->opcode, d->datalen, d->retval,
508 d->cookie_high, d->cookie_low);
509 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
510 16, 1, d->params.raw, 16, 0);
511 }
512}
513
514/**
515 * i40e_dbg_dump_desc - handles dump desc write into command datum
516 * @cnt: number of arguments that the user supplied
517 * @vsi_seid: vsi id entered by user
518 * @ring_id: ring id entered by user
519 * @desc_n: descriptor number entered by user
520 * @pf: the i40e_pf created in command write
521 * @type: enum describing whether ring is RX, TX or XDP
522 **/
523static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
524 struct i40e_pf *pf, enum ring_type type)
525{
526 bool is_rx_ring = type == RING_TYPE_RX;
527 struct i40e_tx_desc *txd;
528 union i40e_rx_desc *rxd;
529 struct i40e_ring *ring;
530 struct i40e_vsi *vsi;
531 int i;
532
533 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
534 if (!vsi) {
535 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
536 return;
537 }
538 if (vsi->type != I40E_VSI_MAIN &&
539 vsi->type != I40E_VSI_FDIR &&
540 vsi->type != I40E_VSI_VMDQ2) {
541 dev_info(&pf->pdev->dev,
542 "vsi %d type %d descriptor rings not available\n",
543 vsi_seid, vsi->type);
544 return;
545 }
546 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
547 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
548 return;
549 }
550 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
551 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
552 return;
553 }
554 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
555 dev_info(&pf->pdev->dev,
556 "descriptor rings have not been allocated for vsi %d\n",
557 vsi_seid);
558 return;
559 }
560
561 switch (type) {
562 case RING_TYPE_RX:
563 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
564 break;
565 case RING_TYPE_TX:
566 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
567 break;
568 case RING_TYPE_XDP:
569 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
570 break;
571 default:
572 ring = NULL;
573 break;
574 }
575 if (!ring)
576 return;
577
578 if (cnt == 2) {
579 switch (type) {
580 case RING_TYPE_RX:
581 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
582 break;
583 case RING_TYPE_TX:
584 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
585 break;
586 case RING_TYPE_XDP:
587 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
588 break;
589 }
590 for (i = 0; i < ring->count; i++) {
591 if (!is_rx_ring) {
592 txd = I40E_TX_DESC(ring, i);
593 dev_info(&pf->pdev->dev,
594 " d[%03x] = 0x%016llx 0x%016llx\n",
595 i, txd->buffer_addr,
596 txd->cmd_type_offset_bsz);
597 } else {
598 rxd = I40E_RX_DESC(ring, i);
599 dev_info(&pf->pdev->dev,
600 " d[%03x] = 0x%016llx 0x%016llx\n",
601 i, rxd->read.pkt_addr,
602 rxd->read.hdr_addr);
603 }
604 }
605 } else if (cnt == 3) {
606 if (desc_n >= ring->count || desc_n < 0) {
607 dev_info(&pf->pdev->dev,
608 "descriptor %d not found\n", desc_n);
609 goto out;
610 }
611 if (!is_rx_ring) {
612 txd = I40E_TX_DESC(ring, desc_n);
613 dev_info(&pf->pdev->dev,
614 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
615 vsi_seid, ring_id, desc_n,
616 txd->buffer_addr, txd->cmd_type_offset_bsz);
617 } else {
618 rxd = I40E_RX_DESC(ring, desc_n);
619 dev_info(&pf->pdev->dev,
620 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
621 vsi_seid, ring_id, desc_n,
622 rxd->read.pkt_addr, rxd->read.hdr_addr);
623 }
624 } else {
625 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
626 }
627
628out:
629 kfree(ring);
630}
631
632/**
633 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
634 * @pf: the i40e_pf created in command write
635 **/
636static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
637{
638 struct i40e_vsi *vsi;
639 int i;
640
641 i40e_pf_for_each_vsi(pf, i, vsi)
642 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
643}
644
645/**
646 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
647 * @pf: the i40e_pf created in command write
648 * @estats: the eth stats structure to be dumped
649 **/
650static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
651 struct i40e_eth_stats *estats)
652{
653 dev_info(&pf->pdev->dev, " ethstats:\n");
654 dev_info(&pf->pdev->dev,
655 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
656 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
657 dev_info(&pf->pdev->dev,
658 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
659 estats->rx_broadcast, estats->rx_discards);
660 dev_info(&pf->pdev->dev,
661 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
662 estats->rx_unknown_protocol, estats->tx_bytes);
663 dev_info(&pf->pdev->dev,
664 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
665 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
666 dev_info(&pf->pdev->dev,
667 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
668 estats->tx_discards, estats->tx_errors);
669}
670
671/**
672 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
673 * @pf: the i40e_pf created in command write
674 * @seid: the seid the user put in
675 **/
676static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
677{
678 struct i40e_veb *veb;
679
680 veb = i40e_pf_get_veb_by_seid(pf, seid);
681 if (!veb) {
682 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
683 return;
684 }
685 dev_info(&pf->pdev->dev,
686 "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
687 veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
688 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
689 i40e_dbg_dump_eth_stats(pf, &veb->stats);
690}
691
692/**
693 * i40e_dbg_dump_veb_all - dumps all known veb's stats
694 * @pf: the i40e_pf created in command write
695 **/
696static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
697{
698 struct i40e_veb *veb;
699 int i;
700
701 i40e_pf_for_each_veb(pf, i, veb)
702 i40e_dbg_dump_veb_seid(pf, veb->seid);
703}
704
705/**
706 * i40e_dbg_dump_vf - dump VF info
707 * @pf: the i40e_pf created in command write
708 * @vf_id: the vf_id from the user
709 **/
710static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
711{
712 struct i40e_vf *vf;
713 struct i40e_vsi *vsi;
714
715 if (!pf->num_alloc_vfs) {
716 dev_info(&pf->pdev->dev, "no VFs allocated\n");
717 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
718 vf = &pf->vf[vf_id];
719 vsi = pf->vsi[vf->lan_vsi_idx];
720 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
721 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
722 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
723 vf->num_mdd_events);
724 } else {
725 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
726 }
727}
728
729/**
730 * i40e_dbg_dump_vf_all - dump VF info for all VFs
731 * @pf: the i40e_pf created in command write
732 **/
733static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
734{
735 int i;
736
737 if (!pf->num_alloc_vfs)
738 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
739 else
740 for (i = 0; i < pf->num_alloc_vfs; i++)
741 i40e_dbg_dump_vf(pf, i);
742}
743
744/**
745 * i40e_dbg_command_write - write into command datum
746 * @filp: the opened file
747 * @buffer: where to find the user's data
748 * @count: the length of the user's data
749 * @ppos: file position offset
750 **/
751static ssize_t i40e_dbg_command_write(struct file *filp,
752 const char __user *buffer,
753 size_t count, loff_t *ppos)
754{
755 struct i40e_pf *pf = filp->private_data;
756 char *cmd_buf, *cmd_buf_tmp;
757 int bytes_not_copied;
758 struct i40e_vsi *vsi;
759 int vsi_seid;
760 int veb_seid;
761 int vf_id;
762 int cnt;
763
764 /* don't allow partial writes */
765 if (*ppos != 0)
766 return 0;
767
768 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
769 if (!cmd_buf)
770 return count;
771 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
772 if (bytes_not_copied) {
773 kfree(cmd_buf);
774 return -EFAULT;
775 }
776 cmd_buf[count] = '\0';
777
778 cmd_buf_tmp = strchr(cmd_buf, '\n');
779 if (cmd_buf_tmp) {
780 *cmd_buf_tmp = '\0';
781 count = cmd_buf_tmp - cmd_buf + 1;
782 }
783
784 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
785 vsi_seid = -1;
786 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
787 if (cnt == 0) {
788 /* default to PF VSI */
789 vsi_seid = pf->vsi[pf->lan_vsi]->seid;
790 } else if (vsi_seid < 0) {
791 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
792 vsi_seid);
793 goto command_write_done;
794 }
795
796 /* By default we are in VEPA mode, if this is the first VF/VMDq
797 * VSI to be added switch to VEB mode.
798 */
799 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
800 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
801 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
802 }
803
804 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
805 if (vsi)
806 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
807 vsi->seid, vsi->uplink_seid);
808 else
809 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
810
811 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
812 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
813 if (cnt != 1) {
814 dev_info(&pf->pdev->dev,
815 "del vsi: bad command string, cnt=%d\n",
816 cnt);
817 goto command_write_done;
818 }
819 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
820 if (!vsi) {
821 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
822 vsi_seid);
823 goto command_write_done;
824 }
825
826 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
827 i40e_vsi_release(vsi);
828
829 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
830 struct i40e_veb *veb;
831 u8 enabled_tc = 0x1;
832 int uplink_seid;
833
834 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
835 if (cnt == 0) {
836 uplink_seid = 0;
837 vsi_seid = 0;
838 } else if (cnt != 2) {
839 dev_info(&pf->pdev->dev,
840 "add relay: bad command string, cnt=%d\n",
841 cnt);
842 goto command_write_done;
843 } else if (uplink_seid < 0) {
844 dev_info(&pf->pdev->dev,
845 "add relay %d: bad uplink seid\n",
846 uplink_seid);
847 goto command_write_done;
848 }
849
850 if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
851 dev_info(&pf->pdev->dev,
852 "add relay: relay uplink %d not found\n",
853 uplink_seid);
854 goto command_write_done;
855 } else if (uplink_seid) {
856 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
857 if (!vsi) {
858 dev_info(&pf->pdev->dev,
859 "add relay: VSI %d not found\n",
860 vsi_seid);
861 goto command_write_done;
862 }
863 enabled_tc = vsi->tc_config.enabled_tc;
864 } else if (vsi_seid) {
865 dev_info(&pf->pdev->dev,
866 "add relay: VSI must be 0 for floating relay\n");
867 goto command_write_done;
868 }
869
870 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
871 if (veb)
872 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
873 else
874 dev_info(&pf->pdev->dev, "add relay failed\n");
875
876 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
877 struct i40e_veb *veb;
878 int i;
879
880 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
881 if (cnt != 1) {
882 dev_info(&pf->pdev->dev,
883 "del relay: bad command string, cnt=%d\n",
884 cnt);
885 goto command_write_done;
886 } else if (veb_seid < 0) {
887 dev_info(&pf->pdev->dev,
888 "del relay %d: bad relay seid\n", veb_seid);
889 goto command_write_done;
890 }
891
892 /* find the veb */
893 i40e_pf_for_each_veb(pf, i, veb)
894 if (veb->seid == veb_seid)
895 break;
896
897 if (i >= I40E_MAX_VEB) {
898 dev_info(&pf->pdev->dev,
899 "del relay: relay %d not found\n", veb_seid);
900 goto command_write_done;
901 }
902
903 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
904 i40e_veb_release(veb);
905 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
906 unsigned int v;
907 int ret;
908 u16 vid;
909
910 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
911 if (cnt != 2) {
912 dev_info(&pf->pdev->dev,
913 "add pvid: bad command string, cnt=%d\n", cnt);
914 goto command_write_done;
915 }
916
917 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
918 if (!vsi) {
919 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
920 vsi_seid);
921 goto command_write_done;
922 }
923
924 vid = v;
925 ret = i40e_vsi_add_pvid(vsi, vid);
926 if (!ret)
927 dev_info(&pf->pdev->dev,
928 "add pvid: %d added to VSI %d\n",
929 vid, vsi_seid);
930 else
931 dev_info(&pf->pdev->dev,
932 "add pvid: %d to VSI %d failed, ret=%d\n",
933 vid, vsi_seid, ret);
934
935 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
936
937 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
938 if (cnt != 1) {
939 dev_info(&pf->pdev->dev,
940 "del pvid: bad command string, cnt=%d\n",
941 cnt);
942 goto command_write_done;
943 }
944
945 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
946 if (!vsi) {
947 dev_info(&pf->pdev->dev,
948 "del pvid: VSI %d not found\n", vsi_seid);
949 goto command_write_done;
950 }
951
952 i40e_vsi_remove_pvid(vsi);
953 dev_info(&pf->pdev->dev,
954 "del pvid: removed from VSI %d\n", vsi_seid);
955
956 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
957 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
958 i40e_fetch_switch_configuration(pf, true);
959 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
960 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
961 if (cnt > 0)
962 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
963 else
964 i40e_dbg_dump_vsi_no_seid(pf);
965 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
966 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
967 if (cnt > 0)
968 i40e_dbg_dump_veb_seid(pf, vsi_seid);
969 else
970 i40e_dbg_dump_veb_all(pf);
971 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
972 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
973 if (cnt > 0)
974 i40e_dbg_dump_vf(pf, vf_id);
975 else
976 i40e_dbg_dump_vf_all(pf);
977 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
978 int ring_id, desc_n;
979 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
980 cnt = sscanf(&cmd_buf[12], "%i %i %i",
981 &vsi_seid, &ring_id, &desc_n);
982 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
983 desc_n, pf, RING_TYPE_RX);
984 } else if (strncmp(&cmd_buf[10], "tx", 2)
985 == 0) {
986 cnt = sscanf(&cmd_buf[12], "%i %i %i",
987 &vsi_seid, &ring_id, &desc_n);
988 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
989 desc_n, pf, RING_TYPE_TX);
990 } else if (strncmp(&cmd_buf[10], "xdp", 3)
991 == 0) {
992 cnt = sscanf(&cmd_buf[13], "%i %i %i",
993 &vsi_seid, &ring_id, &desc_n);
994 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
995 desc_n, pf, RING_TYPE_XDP);
996 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
997 i40e_dbg_dump_aq_desc(pf);
998 } else {
999 dev_info(&pf->pdev->dev,
1000 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1001 dev_info(&pf->pdev->dev,
1002 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1003 dev_info(&pf->pdev->dev,
1004 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1005 dev_info(&pf->pdev->dev, "dump desc aq\n");
1006 }
1007 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1008 dev_info(&pf->pdev->dev,
1009 "core reset count: %d\n", pf->corer_count);
1010 dev_info(&pf->pdev->dev,
1011 "global reset count: %d\n", pf->globr_count);
1012 dev_info(&pf->pdev->dev,
1013 "emp reset count: %d\n", pf->empr_count);
1014 dev_info(&pf->pdev->dev,
1015 "pf reset count: %d\n", pf->pfr_count);
1016 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1017 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1018 struct i40e_dcbx_config *cfg =
1019 &pf->hw.local_dcbx_config;
1020 struct i40e_dcbx_config *r_cfg =
1021 &pf->hw.remote_dcbx_config;
1022 int i, ret;
1023 u16 switch_id;
1024
1025 bw_data = kzalloc(sizeof(
1026 struct i40e_aqc_query_port_ets_config_resp),
1027 GFP_KERNEL);
1028 if (!bw_data) {
1029 ret = -ENOMEM;
1030 goto command_write_done;
1031 }
1032
1033 vsi = pf->vsi[pf->lan_vsi];
1034 switch_id =
1035 le16_to_cpu(vsi->info.switch_id) &
1036 I40E_AQ_VSI_SW_ID_MASK;
1037
1038 ret = i40e_aq_query_port_ets_config(&pf->hw,
1039 switch_id,
1040 bw_data, NULL);
1041 if (ret) {
1042 dev_info(&pf->pdev->dev,
1043 "Query Port ETS Config AQ command failed =0x%x\n",
1044 pf->hw.aq.asq_last_status);
1045 kfree(bw_data);
1046 bw_data = NULL;
1047 goto command_write_done;
1048 }
1049 dev_info(&pf->pdev->dev,
1050 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1051 bw_data->tc_valid_bits,
1052 bw_data->tc_strict_priority_bits,
1053 le16_to_cpu(bw_data->tc_bw_max[0]),
1054 le16_to_cpu(bw_data->tc_bw_max[1]));
1055 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1056 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1057 bw_data->tc_bw_share_credits[i],
1058 le16_to_cpu(bw_data->tc_bw_limits[i]));
1059 }
1060
1061 kfree(bw_data);
1062 bw_data = NULL;
1063
1064 dev_info(&pf->pdev->dev,
1065 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1066 dev_info(&pf->pdev->dev,
1067 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1068 cfg->etscfg.willing, cfg->etscfg.cbs,
1069 cfg->etscfg.maxtcs);
1070 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1071 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1072 i, cfg->etscfg.prioritytable[i],
1073 cfg->etscfg.tcbwtable[i],
1074 cfg->etscfg.tsatable[i]);
1075 }
1076 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1077 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1078 i, cfg->etsrec.prioritytable[i],
1079 cfg->etsrec.tcbwtable[i],
1080 cfg->etsrec.tsatable[i]);
1081 }
1082 dev_info(&pf->pdev->dev,
1083 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1084 cfg->pfc.willing, cfg->pfc.mbc,
1085 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1086 dev_info(&pf->pdev->dev,
1087 "port app_table: num_apps=%d\n", cfg->numapps);
1088 for (i = 0; i < cfg->numapps; i++) {
1089 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1090 i, cfg->app[i].priority,
1091 cfg->app[i].selector,
1092 cfg->app[i].protocolid);
1093 }
1094 /* Peer TLV DCBX data */
1095 dev_info(&pf->pdev->dev,
1096 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1097 r_cfg->etscfg.willing,
1098 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1099 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1100 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1101 i, r_cfg->etscfg.prioritytable[i],
1102 r_cfg->etscfg.tcbwtable[i],
1103 r_cfg->etscfg.tsatable[i]);
1104 }
1105 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1106 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1107 i, r_cfg->etsrec.prioritytable[i],
1108 r_cfg->etsrec.tcbwtable[i],
1109 r_cfg->etsrec.tsatable[i]);
1110 }
1111 dev_info(&pf->pdev->dev,
1112 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1113 r_cfg->pfc.willing,
1114 r_cfg->pfc.mbc,
1115 r_cfg->pfc.pfccap,
1116 r_cfg->pfc.pfcenable);
1117 dev_info(&pf->pdev->dev,
1118 "remote port app_table: num_apps=%d\n",
1119 r_cfg->numapps);
1120 for (i = 0; i < r_cfg->numapps; i++) {
1121 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1122 i, r_cfg->app[i].priority,
1123 r_cfg->app[i].selector,
1124 r_cfg->app[i].protocolid);
1125 }
1126 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1127 int cluster_id, table_id;
1128 int index, ret;
1129 u16 buff_len = 4096;
1130 u32 next_index;
1131 u8 next_table;
1132 u8 *buff;
1133 u16 rlen;
1134
1135 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1136 &cluster_id, &table_id, &index);
1137 if (cnt != 3) {
1138 dev_info(&pf->pdev->dev,
1139 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1140 goto command_write_done;
1141 }
1142
1143 dev_info(&pf->pdev->dev,
1144 "AQ debug dump fwdata params %x %x %x %x\n",
1145 cluster_id, table_id, index, buff_len);
1146 buff = kzalloc(buff_len, GFP_KERNEL);
1147 if (!buff)
1148 goto command_write_done;
1149
1150 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1151 index, buff_len, buff, &rlen,
1152 &next_table, &next_index,
1153 NULL);
1154 if (ret) {
1155 dev_info(&pf->pdev->dev,
1156 "debug dump fwdata AQ Failed %d 0x%x\n",
1157 ret, pf->hw.aq.asq_last_status);
1158 kfree(buff);
1159 buff = NULL;
1160 goto command_write_done;
1161 }
1162 dev_info(&pf->pdev->dev,
1163 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1164 rlen, next_table, next_index);
1165 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1166 DUMP_PREFIX_OFFSET, 16, 1,
1167 buff, rlen, true);
1168 kfree(buff);
1169 buff = NULL;
1170 } else {
1171 dev_info(&pf->pdev->dev,
1172 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1173 dev_info(&pf->pdev->dev, "dump switch\n");
1174 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1175 dev_info(&pf->pdev->dev, "dump reset stats\n");
1176 dev_info(&pf->pdev->dev, "dump port\n");
1177 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1178 dev_info(&pf->pdev->dev,
1179 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1180 }
1181 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1182 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1183 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1184
1185 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1186 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1187 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1188
1189 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1190 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1191 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1192
1193 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1194 u32 address;
1195 u32 value;
1196
1197 cnt = sscanf(&cmd_buf[4], "%i", &address);
1198 if (cnt != 1) {
1199 dev_info(&pf->pdev->dev, "read <reg>\n");
1200 goto command_write_done;
1201 }
1202
1203 /* check the range on address */
1204 if (address > (pf->ioremap_len - sizeof(u32))) {
1205 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1206 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1207 goto command_write_done;
1208 }
1209
1210 value = rd32(&pf->hw, address);
1211 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1212 address, value);
1213
1214 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1215 u32 address, value;
1216
1217 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1218 if (cnt != 2) {
1219 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1220 goto command_write_done;
1221 }
1222
1223 /* check the range on address */
1224 if (address > (pf->ioremap_len - sizeof(u32))) {
1225 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1226 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1227 goto command_write_done;
1228 }
1229 wr32(&pf->hw, address, value);
1230 value = rd32(&pf->hw, address);
1231 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1232 address, value);
1233 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1234 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1235 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1236 if (cnt == 0) {
1237 int i;
1238
1239 i40e_pf_for_each_vsi(pf, i, vsi)
1240 i40e_vsi_reset_stats(vsi);
1241 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1242 } else if (cnt == 1) {
1243 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1244 if (!vsi) {
1245 dev_info(&pf->pdev->dev,
1246 "clear_stats vsi: bad vsi %d\n",
1247 vsi_seid);
1248 goto command_write_done;
1249 }
1250 i40e_vsi_reset_stats(vsi);
1251 dev_info(&pf->pdev->dev,
1252 "vsi clear stats called for vsi %d\n",
1253 vsi_seid);
1254 } else {
1255 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1256 }
1257 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1258 if (pf->hw.partition_id == 1) {
1259 i40e_pf_reset_stats(pf);
1260 dev_info(&pf->pdev->dev, "port stats cleared\n");
1261 } else {
1262 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1263 }
1264 } else {
1265 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1266 }
1267 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1268 struct i40e_aq_desc *desc;
1269 int ret;
1270
1271 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1272 if (!desc)
1273 goto command_write_done;
1274 cnt = sscanf(&cmd_buf[11],
1275 "%hi %hi %hi %hi %i %i %i %i %i %i",
1276 &desc->flags,
1277 &desc->opcode, &desc->datalen, &desc->retval,
1278 &desc->cookie_high, &desc->cookie_low,
1279 &desc->params.internal.param0,
1280 &desc->params.internal.param1,
1281 &desc->params.internal.param2,
1282 &desc->params.internal.param3);
1283 if (cnt != 10) {
1284 dev_info(&pf->pdev->dev,
1285 "send aq_cmd: bad command string, cnt=%d\n",
1286 cnt);
1287 kfree(desc);
1288 desc = NULL;
1289 goto command_write_done;
1290 }
1291 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
1292 if (!ret) {
1293 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1294 } else if (ret == -EIO) {
1295 dev_info(&pf->pdev->dev,
1296 "AQ command send failed Opcode %x AQ Error: %d\n",
1297 desc->opcode, pf->hw.aq.asq_last_status);
1298 } else {
1299 dev_info(&pf->pdev->dev,
1300 "AQ command send failed Opcode %x Status: %d\n",
1301 desc->opcode, ret);
1302 }
1303 dev_info(&pf->pdev->dev,
1304 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1305 desc->flags, desc->opcode, desc->datalen, desc->retval,
1306 desc->cookie_high, desc->cookie_low,
1307 desc->params.internal.param0,
1308 desc->params.internal.param1,
1309 desc->params.internal.param2,
1310 desc->params.internal.param3);
1311 kfree(desc);
1312 desc = NULL;
1313 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1314 struct i40e_aq_desc *desc;
1315 u16 buffer_len;
1316 u8 *buff;
1317 int ret;
1318
1319 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1320 if (!desc)
1321 goto command_write_done;
1322 cnt = sscanf(&cmd_buf[20],
1323 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1324 &desc->flags,
1325 &desc->opcode, &desc->datalen, &desc->retval,
1326 &desc->cookie_high, &desc->cookie_low,
1327 &desc->params.internal.param0,
1328 &desc->params.internal.param1,
1329 &desc->params.internal.param2,
1330 &desc->params.internal.param3,
1331 &buffer_len);
1332 if (cnt != 11) {
1333 dev_info(&pf->pdev->dev,
1334 "send indirect aq_cmd: bad command string, cnt=%d\n",
1335 cnt);
1336 kfree(desc);
1337 desc = NULL;
1338 goto command_write_done;
1339 }
1340 /* Just stub a buffer big enough in case user messed up */
1341 if (buffer_len == 0)
1342 buffer_len = 1280;
1343
1344 buff = kzalloc(buffer_len, GFP_KERNEL);
1345 if (!buff) {
1346 kfree(desc);
1347 desc = NULL;
1348 goto command_write_done;
1349 }
1350 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1351 ret = i40e_asq_send_command(&pf->hw, desc, buff,
1352 buffer_len, NULL);
1353 if (!ret) {
1354 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1355 } else if (ret == -EIO) {
1356 dev_info(&pf->pdev->dev,
1357 "AQ command send failed Opcode %x AQ Error: %d\n",
1358 desc->opcode, pf->hw.aq.asq_last_status);
1359 } else {
1360 dev_info(&pf->pdev->dev,
1361 "AQ command send failed Opcode %x Status: %d\n",
1362 desc->opcode, ret);
1363 }
1364 dev_info(&pf->pdev->dev,
1365 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1366 desc->flags, desc->opcode, desc->datalen, desc->retval,
1367 desc->cookie_high, desc->cookie_low,
1368 desc->params.internal.param0,
1369 desc->params.internal.param1,
1370 desc->params.internal.param2,
1371 desc->params.internal.param3);
1372 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1373 DUMP_PREFIX_OFFSET, 16, 1,
1374 buff, buffer_len, true);
1375 kfree(buff);
1376 buff = NULL;
1377 kfree(desc);
1378 desc = NULL;
1379 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1380 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1381 i40e_get_current_fd_count(pf));
1382 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1383 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1384 int ret;
1385
1386 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
1387 if (ret) {
1388 dev_info(&pf->pdev->dev,
1389 "Stop LLDP AQ command failed =0x%x\n",
1390 pf->hw.aq.asq_last_status);
1391 goto command_write_done;
1392 }
1393 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1394 pf->hw.mac.addr,
1395 ETH_P_LLDP, 0,
1396 pf->vsi[pf->lan_vsi]->seid,
1397 0, true, NULL, NULL);
1398 if (ret) {
1399 dev_info(&pf->pdev->dev,
1400 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1401 __func__, pf->hw.aq.asq_last_status);
1402 goto command_write_done;
1403 }
1404#ifdef CONFIG_I40E_DCB
1405 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1406 DCB_CAP_DCBX_VER_IEEE;
1407#endif /* CONFIG_I40E_DCB */
1408 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1409 int ret;
1410
1411 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1412 pf->hw.mac.addr,
1413 ETH_P_LLDP, 0,
1414 pf->vsi[pf->lan_vsi]->seid,
1415 0, false, NULL, NULL);
1416 if (ret) {
1417 dev_info(&pf->pdev->dev,
1418 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1419 __func__, pf->hw.aq.asq_last_status);
1420 /* Continue and start FW LLDP anyways */
1421 }
1422
1423 ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
1424 if (ret) {
1425 dev_info(&pf->pdev->dev,
1426 "Start LLDP AQ command failed =0x%x\n",
1427 pf->hw.aq.asq_last_status);
1428 goto command_write_done;
1429 }
1430#ifdef CONFIG_I40E_DCB
1431 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1432 DCB_CAP_DCBX_VER_IEEE;
1433#endif /* CONFIG_I40E_DCB */
1434 } else if (strncmp(&cmd_buf[5],
1435 "get local", 9) == 0) {
1436 u16 llen, rlen;
1437 int ret;
1438 u8 *buff;
1439
1440 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1441 if (!buff)
1442 goto command_write_done;
1443
1444 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1445 I40E_AQ_LLDP_MIB_LOCAL,
1446 buff, I40E_LLDPDU_SIZE,
1447 &llen, &rlen, NULL);
1448 if (ret) {
1449 dev_info(&pf->pdev->dev,
1450 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1451 pf->hw.aq.asq_last_status);
1452 kfree(buff);
1453 buff = NULL;
1454 goto command_write_done;
1455 }
1456 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1457 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1458 DUMP_PREFIX_OFFSET, 16, 1,
1459 buff, I40E_LLDPDU_SIZE, true);
1460 kfree(buff);
1461 buff = NULL;
1462 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1463 u16 llen, rlen;
1464 int ret;
1465 u8 *buff;
1466
1467 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1468 if (!buff)
1469 goto command_write_done;
1470
1471 ret = i40e_aq_get_lldp_mib(&pf->hw,
1472 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1473 I40E_AQ_LLDP_MIB_REMOTE,
1474 buff, I40E_LLDPDU_SIZE,
1475 &llen, &rlen, NULL);
1476 if (ret) {
1477 dev_info(&pf->pdev->dev,
1478 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1479 pf->hw.aq.asq_last_status);
1480 kfree(buff);
1481 buff = NULL;
1482 goto command_write_done;
1483 }
1484 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1485 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1486 DUMP_PREFIX_OFFSET, 16, 1,
1487 buff, I40E_LLDPDU_SIZE, true);
1488 kfree(buff);
1489 buff = NULL;
1490 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1491 int ret;
1492
1493 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1494 true, NULL);
1495 if (ret) {
1496 dev_info(&pf->pdev->dev,
1497 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1498 pf->hw.aq.asq_last_status);
1499 goto command_write_done;
1500 }
1501 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1502 int ret;
1503
1504 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1505 false, NULL);
1506 if (ret) {
1507 dev_info(&pf->pdev->dev,
1508 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1509 pf->hw.aq.asq_last_status);
1510 goto command_write_done;
1511 }
1512 }
1513 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1514 u16 buffer_len, bytes;
1515 u16 module;
1516 u32 offset;
1517 u16 *buff;
1518 int ret;
1519
1520 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1521 &module, &offset, &buffer_len);
1522 if (cnt == 0) {
1523 module = 0;
1524 offset = 0;
1525 buffer_len = 0;
1526 } else if (cnt == 1) {
1527 offset = 0;
1528 buffer_len = 0;
1529 } else if (cnt == 2) {
1530 buffer_len = 0;
1531 } else if (cnt > 3) {
1532 dev_info(&pf->pdev->dev,
1533 "nvm read: bad command string, cnt=%d\n", cnt);
1534 goto command_write_done;
1535 }
1536
1537 /* set the max length */
1538 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1539
1540 bytes = 2 * buffer_len;
1541
1542 /* read at least 1k bytes, no more than 4kB */
1543 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1544 buff = kzalloc(bytes, GFP_KERNEL);
1545 if (!buff)
1546 goto command_write_done;
1547
1548 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1549 if (ret) {
1550 dev_info(&pf->pdev->dev,
1551 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1552 ret, pf->hw.aq.asq_last_status);
1553 kfree(buff);
1554 goto command_write_done;
1555 }
1556
1557 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1558 bytes, (u8 *)buff, true, NULL);
1559 i40e_release_nvm(&pf->hw);
1560 if (ret) {
1561 dev_info(&pf->pdev->dev,
1562 "Read NVM AQ failed err=%d status=0x%x\n",
1563 ret, pf->hw.aq.asq_last_status);
1564 } else {
1565 dev_info(&pf->pdev->dev,
1566 "Read NVM module=0x%x offset=0x%x words=%d\n",
1567 module, offset, buffer_len);
1568 if (bytes)
1569 print_hex_dump(KERN_INFO, "NVM Dump: ",
1570 DUMP_PREFIX_OFFSET, 16, 2,
1571 buff, bytes, true);
1572 }
1573 kfree(buff);
1574 buff = NULL;
1575 } else {
1576 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1577 dev_info(&pf->pdev->dev, "available commands\n");
1578 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1579 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1580 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1581 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1582 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1583 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1584 dev_info(&pf->pdev->dev, " dump switch\n");
1585 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1586 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1587 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1588 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1589 dev_info(&pf->pdev->dev, " dump desc aq\n");
1590 dev_info(&pf->pdev->dev, " dump reset stats\n");
1591 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1592 dev_info(&pf->pdev->dev, " read <reg>\n");
1593 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1594 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1595 dev_info(&pf->pdev->dev, " clear_stats port\n");
1596 dev_info(&pf->pdev->dev, " pfr\n");
1597 dev_info(&pf->pdev->dev, " corer\n");
1598 dev_info(&pf->pdev->dev, " globr\n");
1599 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1600 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1601 dev_info(&pf->pdev->dev, " fd current cnt");
1602 dev_info(&pf->pdev->dev, " lldp start\n");
1603 dev_info(&pf->pdev->dev, " lldp stop\n");
1604 dev_info(&pf->pdev->dev, " lldp get local\n");
1605 dev_info(&pf->pdev->dev, " lldp get remote\n");
1606 dev_info(&pf->pdev->dev, " lldp event on\n");
1607 dev_info(&pf->pdev->dev, " lldp event off\n");
1608 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1609 }
1610
1611command_write_done:
1612 kfree(cmd_buf);
1613 cmd_buf = NULL;
1614 return count;
1615}
1616
1617static const struct file_operations i40e_dbg_command_fops = {
1618 .owner = THIS_MODULE,
1619 .open = simple_open,
1620 .read = i40e_dbg_command_read,
1621 .write = i40e_dbg_command_write,
1622};
1623
1624/**************************************************************
1625 * netdev_ops
1626 * The netdev_ops entry in debugfs is for giving the driver commands
1627 * to be executed from the netdev operations.
1628 **************************************************************/
1629static char i40e_dbg_netdev_ops_buf[256] = "";
1630
1631/**
1632 * i40e_dbg_netdev_ops_read - read for netdev_ops datum
1633 * @filp: the opened file
1634 * @buffer: where to write the data for the user to read
1635 * @count: the size of the user's buffer
1636 * @ppos: file position offset
1637 **/
1638static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1639 size_t count, loff_t *ppos)
1640{
1641 struct i40e_pf *pf = filp->private_data;
1642 int bytes_not_copied;
1643 int buf_size = 256;
1644 char *buf;
1645 int len;
1646
1647 /* don't allow partal reads */
1648 if (*ppos != 0)
1649 return 0;
1650 if (count < buf_size)
1651 return -ENOSPC;
1652
1653 buf = kzalloc(buf_size, GFP_KERNEL);
1654 if (!buf)
1655 return -ENOSPC;
1656
1657 len = snprintf(buf, buf_size, "%s: %s\n",
1658 pf->vsi[pf->lan_vsi]->netdev->name,
1659 i40e_dbg_netdev_ops_buf);
1660
1661 bytes_not_copied = copy_to_user(buffer, buf, len);
1662 kfree(buf);
1663
1664 if (bytes_not_copied)
1665 return -EFAULT;
1666
1667 *ppos = len;
1668 return len;
1669}
1670
1671/**
1672 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1673 * @filp: the opened file
1674 * @buffer: where to find the user's data
1675 * @count: the length of the user's data
1676 * @ppos: file position offset
1677 **/
1678static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1679 const char __user *buffer,
1680 size_t count, loff_t *ppos)
1681{
1682 struct i40e_pf *pf = filp->private_data;
1683 int bytes_not_copied;
1684 struct i40e_vsi *vsi;
1685 char *buf_tmp;
1686 int vsi_seid;
1687 int i, cnt;
1688
1689 /* don't allow partial writes */
1690 if (*ppos != 0)
1691 return 0;
1692 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1693 return -ENOSPC;
1694
1695 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1696 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
1697 buffer, count);
1698 if (bytes_not_copied)
1699 return -EFAULT;
1700 i40e_dbg_netdev_ops_buf[count] = '\0';
1701
1702 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1703 if (buf_tmp) {
1704 *buf_tmp = '\0';
1705 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1706 }
1707
1708 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1709 int mtu;
1710
1711 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1712 &vsi_seid, &mtu);
1713 if (cnt != 2) {
1714 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1715 goto netdev_ops_write_done;
1716 }
1717 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1718 if (!vsi) {
1719 dev_info(&pf->pdev->dev,
1720 "change_mtu: VSI %d not found\n", vsi_seid);
1721 } else if (!vsi->netdev) {
1722 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1723 vsi_seid);
1724 } else if (rtnl_trylock()) {
1725 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1726 mtu);
1727 rtnl_unlock();
1728 dev_info(&pf->pdev->dev, "change_mtu called\n");
1729 } else {
1730 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1731 }
1732
1733 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1734 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1735 if (cnt != 1) {
1736 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1737 goto netdev_ops_write_done;
1738 }
1739 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1740 if (!vsi) {
1741 dev_info(&pf->pdev->dev,
1742 "set_rx_mode: VSI %d not found\n", vsi_seid);
1743 } else if (!vsi->netdev) {
1744 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1745 vsi_seid);
1746 } else if (rtnl_trylock()) {
1747 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1748 rtnl_unlock();
1749 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1750 } else {
1751 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1752 }
1753
1754 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1755 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1756 if (cnt != 1) {
1757 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1758 goto netdev_ops_write_done;
1759 }
1760 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1761 if (!vsi) {
1762 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1763 vsi_seid);
1764 } else if (!vsi->netdev) {
1765 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1766 vsi_seid);
1767 } else {
1768 for (i = 0; i < vsi->num_q_vectors; i++)
1769 napi_schedule(&vsi->q_vectors[i]->napi);
1770 dev_info(&pf->pdev->dev, "napi called\n");
1771 }
1772 } else {
1773 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
1774 i40e_dbg_netdev_ops_buf);
1775 dev_info(&pf->pdev->dev, "available commands\n");
1776 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1777 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1778 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1779 }
1780netdev_ops_write_done:
1781 return count;
1782}
1783
1784static const struct file_operations i40e_dbg_netdev_ops_fops = {
1785 .owner = THIS_MODULE,
1786 .open = simple_open,
1787 .read = i40e_dbg_netdev_ops_read,
1788 .write = i40e_dbg_netdev_ops_write,
1789};
1790
1791/**
1792 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1793 * @pf: the PF that is starting up
1794 **/
1795void i40e_dbg_pf_init(struct i40e_pf *pf)
1796{
1797 const char *name = pci_name(pf->pdev);
1798
1799 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
1800
1801 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
1802 &i40e_dbg_command_fops);
1803
1804 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
1805 &i40e_dbg_netdev_ops_fops);
1806}
1807
1808/**
1809 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1810 * @pf: the PF that is stopping
1811 **/
1812void i40e_dbg_pf_exit(struct i40e_pf *pf)
1813{
1814 debugfs_remove_recursive(pf->i40e_dbg_pf);
1815 pf->i40e_dbg_pf = NULL;
1816}
1817
1818/**
1819 * i40e_dbg_init - start up debugfs for the driver
1820 **/
1821void i40e_dbg_init(void)
1822{
1823 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
1824 if (IS_ERR(i40e_dbg_root))
1825 pr_info("init of debugfs failed\n");
1826}
1827
1828/**
1829 * i40e_dbg_exit - clean out the driver's debugfs entries
1830 **/
1831void i40e_dbg_exit(void)
1832{
1833 debugfs_remove_recursive(i40e_dbg_root);
1834 i40e_dbg_root = NULL;
1835}
1836
1837#endif /* CONFIG_DEBUG_FS */
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#ifdef CONFIG_DEBUG_FS
5
6#include <linux/fs.h>
7#include <linux/debugfs.h>
8#include <linux/if_bridge.h>
9#include "i40e.h"
10#include "i40e_virtchnl_pf.h"
11
12static struct dentry *i40e_dbg_root;
13
14enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18};
19
20/**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
25static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26{
27 if (seid < 0) {
28 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
29
30 return NULL;
31 }
32
33 return i40e_pf_get_vsi_by_seid(pf, seid);
34}
35
36/**************************************************************
37 * command
38 * The command entry in debugfs is for giving the driver commands
39 * to be executed - these may be for changing the internal switch
40 * setup, adding or removing filters, or other things. Many of
41 * these will be useful for some forms of unit testing.
42 **************************************************************/
43static char i40e_dbg_command_buf[256] = "";
44
45/**
46 * i40e_dbg_command_read - read for command datum
47 * @filp: the opened file
48 * @buffer: where to write the data for the user to read
49 * @count: the size of the user's buffer
50 * @ppos: file position offset
51 **/
52static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
53 size_t count, loff_t *ppos)
54{
55 struct i40e_pf *pf = filp->private_data;
56 struct i40e_vsi *main_vsi;
57 int bytes_not_copied;
58 int buf_size = 256;
59 char *buf;
60 int len;
61
62 /* don't allow partial reads */
63 if (*ppos != 0)
64 return 0;
65 if (count < buf_size)
66 return -ENOSPC;
67
68 buf = kzalloc(buf_size, GFP_KERNEL);
69 if (!buf)
70 return -ENOSPC;
71
72 main_vsi = i40e_pf_get_main_vsi(pf);
73 len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
74 i40e_dbg_command_buf);
75
76 bytes_not_copied = copy_to_user(buffer, buf, len);
77 kfree(buf);
78
79 if (bytes_not_copied)
80 return -EFAULT;
81
82 *ppos = len;
83 return len;
84}
85
86static char *i40e_filter_state_string[] = {
87 "INVALID",
88 "NEW",
89 "ACTIVE",
90 "FAILED",
91 "REMOVE",
92 "NEW_SYNC",
93};
94
95/**
96 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
97 * @pf: the i40e_pf created in command write
98 * @seid: the seid the user put in
99 **/
100static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
101{
102 struct rtnl_link_stats64 *nstat;
103 struct i40e_mac_filter *f;
104 struct i40e_vsi *vsi;
105 int i, bkt;
106
107 vsi = i40e_dbg_find_vsi(pf, seid);
108 if (!vsi) {
109 dev_info(&pf->pdev->dev,
110 "dump %d: seid not found\n", seid);
111 return;
112 }
113 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
114 if (vsi->netdev) {
115 struct net_device *nd = vsi->netdev;
116
117 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
118 nd->name, nd->state, nd->flags);
119 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
120 (unsigned long int)nd->features);
121 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
122 (unsigned long int)nd->hw_features);
123 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
124 (unsigned long int)nd->vlan_features);
125 }
126 dev_info(&pf->pdev->dev,
127 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
128 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
129 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
130 dev_info(&pf->pdev->dev,
131 " state[%d] = %08lx\n",
132 i, vsi->state[i]);
133 if (vsi->type == I40E_VSI_MAIN)
134 dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
135 pf->hw.mac.addr,
136 pf->hw.mac.port_addr);
137 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
138 dev_info(&pf->pdev->dev,
139 " mac_filter_hash: %pM vid=%d, state %s\n",
140 f->macaddr, f->vlan,
141 i40e_filter_state_string[f->state]);
142 }
143 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
144 vsi->active_filters, vsi->promisc_threshold,
145 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
146 "ON" : "OFF"));
147 nstat = i40e_get_vsi_stats_struct(vsi);
148 dev_info(&pf->pdev->dev,
149 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
150 (unsigned long int)nstat->rx_packets,
151 (unsigned long int)nstat->rx_bytes,
152 (unsigned long int)nstat->rx_errors,
153 (unsigned long int)nstat->rx_dropped);
154 dev_info(&pf->pdev->dev,
155 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
156 (unsigned long int)nstat->tx_packets,
157 (unsigned long int)nstat->tx_bytes,
158 (unsigned long int)nstat->tx_errors,
159 (unsigned long int)nstat->tx_dropped);
160 dev_info(&pf->pdev->dev,
161 " net_stats: multicast = %lu, collisions = %lu\n",
162 (unsigned long int)nstat->multicast,
163 (unsigned long int)nstat->collisions);
164 dev_info(&pf->pdev->dev,
165 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
166 (unsigned long int)nstat->rx_length_errors,
167 (unsigned long int)nstat->rx_over_errors,
168 (unsigned long int)nstat->rx_crc_errors);
169 dev_info(&pf->pdev->dev,
170 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
171 (unsigned long int)nstat->rx_frame_errors,
172 (unsigned long int)nstat->rx_fifo_errors,
173 (unsigned long int)nstat->rx_missed_errors);
174 dev_info(&pf->pdev->dev,
175 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
176 (unsigned long int)nstat->tx_aborted_errors,
177 (unsigned long int)nstat->tx_carrier_errors,
178 (unsigned long int)nstat->tx_fifo_errors);
179 dev_info(&pf->pdev->dev,
180 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
181 (unsigned long int)nstat->tx_heartbeat_errors,
182 (unsigned long int)nstat->tx_window_errors);
183 dev_info(&pf->pdev->dev,
184 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
185 (unsigned long int)nstat->rx_compressed,
186 (unsigned long int)nstat->tx_compressed);
187 dev_info(&pf->pdev->dev,
188 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
189 (unsigned long int)vsi->net_stats_offsets.rx_packets,
190 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
191 (unsigned long int)vsi->net_stats_offsets.rx_errors,
192 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
193 dev_info(&pf->pdev->dev,
194 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
195 (unsigned long int)vsi->net_stats_offsets.tx_packets,
196 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
197 (unsigned long int)vsi->net_stats_offsets.tx_errors,
198 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
199 dev_info(&pf->pdev->dev,
200 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
201 (unsigned long int)vsi->net_stats_offsets.multicast,
202 (unsigned long int)vsi->net_stats_offsets.collisions);
203 dev_info(&pf->pdev->dev,
204 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
205 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
206 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
207 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
208 dev_info(&pf->pdev->dev,
209 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
210 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
211 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
212 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
213 dev_info(&pf->pdev->dev,
214 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
215 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
216 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
217 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
218 dev_info(&pf->pdev->dev,
219 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
220 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
221 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
222 dev_info(&pf->pdev->dev,
223 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
224 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
225 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
226 dev_info(&pf->pdev->dev,
227 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
228 vsi->tx_restart, vsi->tx_busy,
229 vsi->rx_buf_failed, vsi->rx_page_failed);
230 rcu_read_lock();
231 for (i = 0; i < vsi->num_queue_pairs; i++) {
232 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
233
234 if (!rx_ring)
235 continue;
236
237 dev_info(&pf->pdev->dev,
238 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
239 i, *rx_ring->state,
240 rx_ring->queue_index,
241 rx_ring->reg_idx);
242 dev_info(&pf->pdev->dev,
243 " rx_rings[%i]: rx_buf_len = %d\n",
244 i, rx_ring->rx_buf_len);
245 dev_info(&pf->pdev->dev,
246 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
247 i,
248 rx_ring->next_to_use,
249 rx_ring->next_to_clean,
250 rx_ring->ring_active);
251 dev_info(&pf->pdev->dev,
252 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
253 i, rx_ring->stats.packets,
254 rx_ring->stats.bytes,
255 rx_ring->rx_stats.non_eop_descs);
256 dev_info(&pf->pdev->dev,
257 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
258 i,
259 rx_ring->rx_stats.alloc_page_failed,
260 rx_ring->rx_stats.alloc_buff_failed);
261 dev_info(&pf->pdev->dev,
262 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
263 i,
264 rx_ring->rx_stats.page_reuse_count);
265 dev_info(&pf->pdev->dev,
266 " rx_rings[%i]: size = %i\n",
267 i, rx_ring->size);
268 dev_info(&pf->pdev->dev,
269 " rx_rings[%i]: itr_setting = %d (%s)\n",
270 i, rx_ring->itr_setting,
271 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
272 }
273 for (i = 0; i < vsi->num_queue_pairs; i++) {
274 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
275
276 if (!tx_ring)
277 continue;
278
279 dev_info(&pf->pdev->dev,
280 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
281 i, *tx_ring->state,
282 tx_ring->queue_index,
283 tx_ring->reg_idx);
284 dev_info(&pf->pdev->dev,
285 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
286 i,
287 tx_ring->next_to_use,
288 tx_ring->next_to_clean,
289 tx_ring->ring_active);
290 dev_info(&pf->pdev->dev,
291 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
292 i, tx_ring->stats.packets,
293 tx_ring->stats.bytes,
294 tx_ring->tx_stats.restart_queue);
295 dev_info(&pf->pdev->dev,
296 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
297 i,
298 tx_ring->tx_stats.tx_busy,
299 tx_ring->tx_stats.tx_done_old,
300 tx_ring->tx_stats.tx_stopped);
301 dev_info(&pf->pdev->dev,
302 " tx_rings[%i]: size = %i\n",
303 i, tx_ring->size);
304 dev_info(&pf->pdev->dev,
305 " tx_rings[%i]: DCB tc = %d\n",
306 i, tx_ring->dcb_tc);
307 dev_info(&pf->pdev->dev,
308 " tx_rings[%i]: itr_setting = %d (%s)\n",
309 i, tx_ring->itr_setting,
310 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
311 }
312 if (i40e_enabled_xdp_vsi(vsi)) {
313 for (i = 0; i < vsi->num_queue_pairs; i++) {
314 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
315
316 if (!xdp_ring)
317 continue;
318
319 dev_info(&pf->pdev->dev,
320 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
321 i, *xdp_ring->state,
322 xdp_ring->queue_index,
323 xdp_ring->reg_idx);
324 dev_info(&pf->pdev->dev,
325 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
326 i,
327 xdp_ring->next_to_use,
328 xdp_ring->next_to_clean,
329 xdp_ring->ring_active);
330 dev_info(&pf->pdev->dev,
331 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
332 i, xdp_ring->stats.packets,
333 xdp_ring->stats.bytes,
334 xdp_ring->tx_stats.restart_queue);
335 dev_info(&pf->pdev->dev,
336 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
337 i,
338 xdp_ring->tx_stats.tx_busy,
339 xdp_ring->tx_stats.tx_done_old);
340 dev_info(&pf->pdev->dev,
341 " xdp_rings[%i]: size = %i\n",
342 i, xdp_ring->size);
343 dev_info(&pf->pdev->dev,
344 " xdp_rings[%i]: DCB tc = %d\n",
345 i, xdp_ring->dcb_tc);
346 dev_info(&pf->pdev->dev,
347 " xdp_rings[%i]: itr_setting = %d (%s)\n",
348 i, xdp_ring->itr_setting,
349 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
350 "dynamic" : "fixed");
351 }
352 }
353 rcu_read_unlock();
354 dev_info(&pf->pdev->dev,
355 " work_limit = %d\n",
356 vsi->work_limit);
357 dev_info(&pf->pdev->dev,
358 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
359 vsi->max_frame, vsi->rx_buf_len, 0);
360 dev_info(&pf->pdev->dev,
361 " num_q_vectors = %i, base_vector = %i\n",
362 vsi->num_q_vectors, vsi->base_vector);
363 dev_info(&pf->pdev->dev,
364 " seid = %d, id = %d, uplink_seid = %d\n",
365 vsi->seid, vsi->id, vsi->uplink_seid);
366 dev_info(&pf->pdev->dev,
367 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
368 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
369 vsi->num_rx_desc);
370 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
371 if (vsi->type == I40E_VSI_SRIOV)
372 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
373 dev_info(&pf->pdev->dev,
374 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
375 vsi->info.valid_sections, vsi->info.switch_id);
376 dev_info(&pf->pdev->dev,
377 " info: sw_reserved[] = 0x%02x 0x%02x\n",
378 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
379 dev_info(&pf->pdev->dev,
380 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
381 vsi->info.sec_flags, vsi->info.sec_reserved);
382 dev_info(&pf->pdev->dev,
383 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
384 vsi->info.pvid, vsi->info.fcoe_pvid,
385 vsi->info.port_vlan_flags);
386 dev_info(&pf->pdev->dev,
387 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
388 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
389 vsi->info.pvlan_reserved[2]);
390 dev_info(&pf->pdev->dev,
391 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
392 vsi->info.ingress_table, vsi->info.egress_table);
393 dev_info(&pf->pdev->dev,
394 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
395 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
396 vsi->info.cas_pv_reserved);
397 dev_info(&pf->pdev->dev,
398 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
399 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
400 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
401 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
402 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
403 dev_info(&pf->pdev->dev,
404 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
405 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
406 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
407 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
408 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
409 dev_info(&pf->pdev->dev,
410 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
411 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
412 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
413 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
414 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
415 dev_info(&pf->pdev->dev,
416 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
417 vsi->info.queueing_opt_flags,
418 vsi->info.queueing_opt_reserved[0],
419 vsi->info.queueing_opt_reserved[1],
420 vsi->info.queueing_opt_reserved[2]);
421 dev_info(&pf->pdev->dev,
422 " info: up_enable_bits = 0x%02x\n",
423 vsi->info.up_enable_bits);
424 dev_info(&pf->pdev->dev,
425 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
426 vsi->info.sched_reserved, vsi->info.outer_up_table);
427 dev_info(&pf->pdev->dev,
428 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
429 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
430 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
431 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
432 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
433 dev_info(&pf->pdev->dev,
434 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
435 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
436 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
437 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
438 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
439 dev_info(&pf->pdev->dev,
440 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
441 vsi->info.stat_counter_idx, vsi->info.sched_id);
442 dev_info(&pf->pdev->dev,
443 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
444 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
445 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
446 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
447 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
448 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
449 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
450 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
451 dev_info(&pf->pdev->dev,
452 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
453 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
454 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
455 dev_info(&pf->pdev->dev,
456 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
457 i, vsi->tc_config.tc_info[i].qoffset,
458 vsi->tc_config.tc_info[i].qcount,
459 vsi->tc_config.tc_info[i].netdev_tc);
460 }
461 dev_info(&pf->pdev->dev,
462 " bw: bw_limit = %d, bw_max_quanta = %d\n",
463 vsi->bw_limit, vsi->bw_max_quanta);
464 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
465 dev_info(&pf->pdev->dev,
466 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
467 i, vsi->bw_ets_share_credits[i],
468 vsi->bw_ets_limit_credits[i],
469 vsi->bw_ets_max_quanta[i]);
470 }
471}
472
473/**
474 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
475 * @pf: the i40e_pf created in command write
476 **/
477static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
478{
479 struct i40e_adminq_ring *ring;
480 struct i40e_hw *hw = &pf->hw;
481 char hdr[32];
482 int i;
483
484 snprintf(hdr, sizeof(hdr), "%s %s: ",
485 dev_driver_string(&pf->pdev->dev),
486 dev_name(&pf->pdev->dev));
487
488 /* first the send (command) ring, then the receive (event) ring */
489 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
490 ring = &(hw->aq.asq);
491 for (i = 0; i < ring->count; i++) {
492 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
493
494 dev_info(&pf->pdev->dev,
495 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
496 i, d->flags, d->opcode, d->datalen, d->retval,
497 d->cookie_high, d->cookie_low);
498 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
499 16, 1, d->params.raw, 16, 0);
500 }
501
502 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
503 ring = &(hw->aq.arq);
504 for (i = 0; i < ring->count; i++) {
505 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
506
507 dev_info(&pf->pdev->dev,
508 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
509 i, d->flags, d->opcode, d->datalen, d->retval,
510 d->cookie_high, d->cookie_low);
511 print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
512 16, 1, d->params.raw, 16, 0);
513 }
514}
515
516/**
517 * i40e_dbg_dump_desc - handles dump desc write into command datum
518 * @cnt: number of arguments that the user supplied
519 * @vsi_seid: vsi id entered by user
520 * @ring_id: ring id entered by user
521 * @desc_n: descriptor number entered by user
522 * @pf: the i40e_pf created in command write
523 * @type: enum describing whether ring is RX, TX or XDP
524 **/
525static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
526 struct i40e_pf *pf, enum ring_type type)
527{
528 bool is_rx_ring = type == RING_TYPE_RX;
529 struct i40e_tx_desc *txd;
530 union i40e_rx_desc *rxd;
531 struct i40e_ring *ring;
532 struct i40e_vsi *vsi;
533 int i;
534
535 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
536 if (!vsi) {
537 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
538 return;
539 }
540 if (vsi->type != I40E_VSI_MAIN &&
541 vsi->type != I40E_VSI_FDIR &&
542 vsi->type != I40E_VSI_VMDQ2) {
543 dev_info(&pf->pdev->dev,
544 "vsi %d type %d descriptor rings not available\n",
545 vsi_seid, vsi->type);
546 return;
547 }
548 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
549 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
550 return;
551 }
552 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
553 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
554 return;
555 }
556 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
557 dev_info(&pf->pdev->dev,
558 "descriptor rings have not been allocated for vsi %d\n",
559 vsi_seid);
560 return;
561 }
562
563 switch (type) {
564 case RING_TYPE_RX:
565 ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
566 break;
567 case RING_TYPE_TX:
568 ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
569 break;
570 case RING_TYPE_XDP:
571 ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
572 break;
573 default:
574 ring = NULL;
575 break;
576 }
577 if (!ring)
578 return;
579
580 if (cnt == 2) {
581 switch (type) {
582 case RING_TYPE_RX:
583 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
584 break;
585 case RING_TYPE_TX:
586 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
587 break;
588 case RING_TYPE_XDP:
589 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
590 break;
591 }
592 for (i = 0; i < ring->count; i++) {
593 if (!is_rx_ring) {
594 txd = I40E_TX_DESC(ring, i);
595 dev_info(&pf->pdev->dev,
596 " d[%03x] = 0x%016llx 0x%016llx\n",
597 i, txd->buffer_addr,
598 txd->cmd_type_offset_bsz);
599 } else {
600 rxd = I40E_RX_DESC(ring, i);
601 dev_info(&pf->pdev->dev,
602 " d[%03x] = 0x%016llx 0x%016llx\n",
603 i, rxd->read.pkt_addr,
604 rxd->read.hdr_addr);
605 }
606 }
607 } else if (cnt == 3) {
608 if (desc_n >= ring->count || desc_n < 0) {
609 dev_info(&pf->pdev->dev,
610 "descriptor %d not found\n", desc_n);
611 goto out;
612 }
613 if (!is_rx_ring) {
614 txd = I40E_TX_DESC(ring, desc_n);
615 dev_info(&pf->pdev->dev,
616 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
617 vsi_seid, ring_id, desc_n,
618 txd->buffer_addr, txd->cmd_type_offset_bsz);
619 } else {
620 rxd = I40E_RX_DESC(ring, desc_n);
621 dev_info(&pf->pdev->dev,
622 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
623 vsi_seid, ring_id, desc_n,
624 rxd->read.pkt_addr, rxd->read.hdr_addr);
625 }
626 } else {
627 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
628 }
629
630out:
631 kfree(ring);
632}
633
634/**
635 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
636 * @pf: the i40e_pf created in command write
637 **/
638static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
639{
640 struct i40e_vsi *vsi;
641 int i;
642
643 i40e_pf_for_each_vsi(pf, i, vsi)
644 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
645}
646
647/**
648 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
649 * @pf: the i40e_pf created in command write
650 * @estats: the eth stats structure to be dumped
651 **/
652static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
653 struct i40e_eth_stats *estats)
654{
655 dev_info(&pf->pdev->dev, " ethstats:\n");
656 dev_info(&pf->pdev->dev,
657 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
658 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
659 dev_info(&pf->pdev->dev,
660 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
661 estats->rx_broadcast, estats->rx_discards);
662 dev_info(&pf->pdev->dev,
663 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
664 estats->rx_unknown_protocol, estats->tx_bytes);
665 dev_info(&pf->pdev->dev,
666 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
667 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
668 dev_info(&pf->pdev->dev,
669 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
670 estats->tx_discards, estats->tx_errors);
671}
672
673/**
674 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
675 * @pf: the i40e_pf created in command write
676 * @seid: the seid the user put in
677 **/
678static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
679{
680 struct i40e_veb *veb;
681
682 veb = i40e_pf_get_veb_by_seid(pf, seid);
683 if (!veb) {
684 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
685 return;
686 }
687 dev_info(&pf->pdev->dev,
688 "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
689 veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
690 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
691 i40e_dbg_dump_eth_stats(pf, &veb->stats);
692}
693
694/**
695 * i40e_dbg_dump_veb_all - dumps all known veb's stats
696 * @pf: the i40e_pf created in command write
697 **/
698static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
699{
700 struct i40e_veb *veb;
701 int i;
702
703 i40e_pf_for_each_veb(pf, i, veb)
704 i40e_dbg_dump_veb_seid(pf, veb->seid);
705}
706
707/**
708 * i40e_dbg_dump_vf - dump VF info
709 * @pf: the i40e_pf created in command write
710 * @vf_id: the vf_id from the user
711 **/
712static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
713{
714 struct i40e_vf *vf;
715 struct i40e_vsi *vsi;
716
717 if (!pf->num_alloc_vfs) {
718 dev_info(&pf->pdev->dev, "no VFs allocated\n");
719 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
720 vf = &pf->vf[vf_id];
721 vsi = pf->vsi[vf->lan_vsi_idx];
722 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
723 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
724 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
725 vf->num_mdd_events);
726 } else {
727 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
728 }
729}
730
731/**
732 * i40e_dbg_dump_vf_all - dump VF info for all VFs
733 * @pf: the i40e_pf created in command write
734 **/
735static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
736{
737 int i;
738
739 if (!pf->num_alloc_vfs)
740 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
741 else
742 for (i = 0; i < pf->num_alloc_vfs; i++)
743 i40e_dbg_dump_vf(pf, i);
744}
745
746/**
747 * i40e_dbg_command_write - write into command datum
748 * @filp: the opened file
749 * @buffer: where to find the user's data
750 * @count: the length of the user's data
751 * @ppos: file position offset
752 **/
753static ssize_t i40e_dbg_command_write(struct file *filp,
754 const char __user *buffer,
755 size_t count, loff_t *ppos)
756{
757 struct i40e_pf *pf = filp->private_data;
758 char *cmd_buf, *cmd_buf_tmp;
759 int bytes_not_copied;
760 struct i40e_vsi *vsi;
761 int vsi_seid;
762 int veb_seid;
763 int vf_id;
764 int cnt;
765
766 /* don't allow partial writes */
767 if (*ppos != 0)
768 return 0;
769
770 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
771 if (!cmd_buf)
772 return count;
773 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
774 if (bytes_not_copied) {
775 kfree(cmd_buf);
776 return -EFAULT;
777 }
778 cmd_buf[count] = '\0';
779
780 cmd_buf_tmp = strchr(cmd_buf, '\n');
781 if (cmd_buf_tmp) {
782 *cmd_buf_tmp = '\0';
783 count = cmd_buf_tmp - cmd_buf + 1;
784 }
785
786 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
787 vsi_seid = -1;
788 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
789 if (cnt == 0) {
790 /* default to PF VSI */
791 vsi = i40e_pf_get_main_vsi(pf);
792 vsi_seid = vsi->seid;
793 } else if (vsi_seid < 0) {
794 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
795 vsi_seid);
796 goto command_write_done;
797 }
798
799 /* By default we are in VEPA mode, if this is the first VF/VMDq
800 * VSI to be added switch to VEB mode.
801 */
802 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
803 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
804 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
805 }
806
807 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
808 if (vsi)
809 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
810 vsi->seid, vsi->uplink_seid);
811 else
812 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
813
814 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
815 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
816 if (cnt != 1) {
817 dev_info(&pf->pdev->dev,
818 "del vsi: bad command string, cnt=%d\n",
819 cnt);
820 goto command_write_done;
821 }
822 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
823 if (!vsi) {
824 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
825 vsi_seid);
826 goto command_write_done;
827 }
828
829 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
830 i40e_vsi_release(vsi);
831
832 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
833 struct i40e_veb *veb;
834 u8 enabled_tc = 0x1;
835 int uplink_seid;
836
837 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
838 if (cnt == 0) {
839 uplink_seid = 0;
840 vsi_seid = 0;
841 } else if (cnt != 2) {
842 dev_info(&pf->pdev->dev,
843 "add relay: bad command string, cnt=%d\n",
844 cnt);
845 goto command_write_done;
846 } else if (uplink_seid < 0) {
847 dev_info(&pf->pdev->dev,
848 "add relay %d: bad uplink seid\n",
849 uplink_seid);
850 goto command_write_done;
851 }
852
853 if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
854 dev_info(&pf->pdev->dev,
855 "add relay: relay uplink %d not found\n",
856 uplink_seid);
857 goto command_write_done;
858 } else if (uplink_seid) {
859 vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
860 if (!vsi) {
861 dev_info(&pf->pdev->dev,
862 "add relay: VSI %d not found\n",
863 vsi_seid);
864 goto command_write_done;
865 }
866 enabled_tc = vsi->tc_config.enabled_tc;
867 } else if (vsi_seid) {
868 dev_info(&pf->pdev->dev,
869 "add relay: VSI must be 0 for floating relay\n");
870 goto command_write_done;
871 }
872
873 veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc);
874 if (veb)
875 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
876 else
877 dev_info(&pf->pdev->dev, "add relay failed\n");
878
879 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
880 struct i40e_veb *veb;
881 int i;
882
883 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
884 if (cnt != 1) {
885 dev_info(&pf->pdev->dev,
886 "del relay: bad command string, cnt=%d\n",
887 cnt);
888 goto command_write_done;
889 } else if (veb_seid < 0) {
890 dev_info(&pf->pdev->dev,
891 "del relay %d: bad relay seid\n", veb_seid);
892 goto command_write_done;
893 }
894
895 /* find the veb */
896 i40e_pf_for_each_veb(pf, i, veb)
897 if (veb->seid == veb_seid)
898 break;
899
900 if (i >= I40E_MAX_VEB) {
901 dev_info(&pf->pdev->dev,
902 "del relay: relay %d not found\n", veb_seid);
903 goto command_write_done;
904 }
905
906 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
907 i40e_veb_release(veb);
908 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
909 unsigned int v;
910 int ret;
911 u16 vid;
912
913 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
914 if (cnt != 2) {
915 dev_info(&pf->pdev->dev,
916 "add pvid: bad command string, cnt=%d\n", cnt);
917 goto command_write_done;
918 }
919
920 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
921 if (!vsi) {
922 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
923 vsi_seid);
924 goto command_write_done;
925 }
926
927 vid = v;
928 ret = i40e_vsi_add_pvid(vsi, vid);
929 if (!ret)
930 dev_info(&pf->pdev->dev,
931 "add pvid: %d added to VSI %d\n",
932 vid, vsi_seid);
933 else
934 dev_info(&pf->pdev->dev,
935 "add pvid: %d to VSI %d failed, ret=%d\n",
936 vid, vsi_seid, ret);
937
938 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
939
940 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
941 if (cnt != 1) {
942 dev_info(&pf->pdev->dev,
943 "del pvid: bad command string, cnt=%d\n",
944 cnt);
945 goto command_write_done;
946 }
947
948 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
949 if (!vsi) {
950 dev_info(&pf->pdev->dev,
951 "del pvid: VSI %d not found\n", vsi_seid);
952 goto command_write_done;
953 }
954
955 i40e_vsi_remove_pvid(vsi);
956 dev_info(&pf->pdev->dev,
957 "del pvid: removed from VSI %d\n", vsi_seid);
958
959 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
960 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
961 i40e_fetch_switch_configuration(pf, true);
962 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
963 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
964 if (cnt > 0)
965 i40e_dbg_dump_vsi_seid(pf, vsi_seid);
966 else
967 i40e_dbg_dump_vsi_no_seid(pf);
968 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
969 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
970 if (cnt > 0)
971 i40e_dbg_dump_veb_seid(pf, vsi_seid);
972 else
973 i40e_dbg_dump_veb_all(pf);
974 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
975 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
976 if (cnt > 0)
977 i40e_dbg_dump_vf(pf, vf_id);
978 else
979 i40e_dbg_dump_vf_all(pf);
980 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
981 int ring_id, desc_n;
982 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
983 cnt = sscanf(&cmd_buf[12], "%i %i %i",
984 &vsi_seid, &ring_id, &desc_n);
985 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
986 desc_n, pf, RING_TYPE_RX);
987 } else if (strncmp(&cmd_buf[10], "tx", 2)
988 == 0) {
989 cnt = sscanf(&cmd_buf[12], "%i %i %i",
990 &vsi_seid, &ring_id, &desc_n);
991 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
992 desc_n, pf, RING_TYPE_TX);
993 } else if (strncmp(&cmd_buf[10], "xdp", 3)
994 == 0) {
995 cnt = sscanf(&cmd_buf[13], "%i %i %i",
996 &vsi_seid, &ring_id, &desc_n);
997 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
998 desc_n, pf, RING_TYPE_XDP);
999 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
1000 i40e_dbg_dump_aq_desc(pf);
1001 } else {
1002 dev_info(&pf->pdev->dev,
1003 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1004 dev_info(&pf->pdev->dev,
1005 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1006 dev_info(&pf->pdev->dev,
1007 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1008 dev_info(&pf->pdev->dev, "dump desc aq\n");
1009 }
1010 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1011 dev_info(&pf->pdev->dev,
1012 "core reset count: %d\n", pf->corer_count);
1013 dev_info(&pf->pdev->dev,
1014 "global reset count: %d\n", pf->globr_count);
1015 dev_info(&pf->pdev->dev,
1016 "emp reset count: %d\n", pf->empr_count);
1017 dev_info(&pf->pdev->dev,
1018 "pf reset count: %d\n", pf->pfr_count);
1019 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1020 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1021 struct i40e_dcbx_config *cfg =
1022 &pf->hw.local_dcbx_config;
1023 struct i40e_dcbx_config *r_cfg =
1024 &pf->hw.remote_dcbx_config;
1025 int i, ret;
1026 u16 switch_id;
1027
1028 bw_data = kzalloc(sizeof(
1029 struct i40e_aqc_query_port_ets_config_resp),
1030 GFP_KERNEL);
1031 if (!bw_data) {
1032 ret = -ENOMEM;
1033 goto command_write_done;
1034 }
1035
1036 vsi = i40e_pf_get_main_vsi(pf);
1037 switch_id =
1038 le16_to_cpu(vsi->info.switch_id) &
1039 I40E_AQ_VSI_SW_ID_MASK;
1040
1041 ret = i40e_aq_query_port_ets_config(&pf->hw,
1042 switch_id,
1043 bw_data, NULL);
1044 if (ret) {
1045 dev_info(&pf->pdev->dev,
1046 "Query Port ETS Config AQ command failed =0x%x\n",
1047 pf->hw.aq.asq_last_status);
1048 kfree(bw_data);
1049 bw_data = NULL;
1050 goto command_write_done;
1051 }
1052 dev_info(&pf->pdev->dev,
1053 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1054 bw_data->tc_valid_bits,
1055 bw_data->tc_strict_priority_bits,
1056 le16_to_cpu(bw_data->tc_bw_max[0]),
1057 le16_to_cpu(bw_data->tc_bw_max[1]));
1058 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1059 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1060 bw_data->tc_bw_share_credits[i],
1061 le16_to_cpu(bw_data->tc_bw_limits[i]));
1062 }
1063
1064 kfree(bw_data);
1065 bw_data = NULL;
1066
1067 dev_info(&pf->pdev->dev,
1068 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1069 dev_info(&pf->pdev->dev,
1070 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1071 cfg->etscfg.willing, cfg->etscfg.cbs,
1072 cfg->etscfg.maxtcs);
1073 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1074 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1075 i, cfg->etscfg.prioritytable[i],
1076 cfg->etscfg.tcbwtable[i],
1077 cfg->etscfg.tsatable[i]);
1078 }
1079 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1080 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1081 i, cfg->etsrec.prioritytable[i],
1082 cfg->etsrec.tcbwtable[i],
1083 cfg->etsrec.tsatable[i]);
1084 }
1085 dev_info(&pf->pdev->dev,
1086 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1087 cfg->pfc.willing, cfg->pfc.mbc,
1088 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1089 dev_info(&pf->pdev->dev,
1090 "port app_table: num_apps=%d\n", cfg->numapps);
1091 for (i = 0; i < cfg->numapps; i++) {
1092 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1093 i, cfg->app[i].priority,
1094 cfg->app[i].selector,
1095 cfg->app[i].protocolid);
1096 }
1097 /* Peer TLV DCBX data */
1098 dev_info(&pf->pdev->dev,
1099 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1100 r_cfg->etscfg.willing,
1101 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1102 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1103 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1104 i, r_cfg->etscfg.prioritytable[i],
1105 r_cfg->etscfg.tcbwtable[i],
1106 r_cfg->etscfg.tsatable[i]);
1107 }
1108 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1109 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1110 i, r_cfg->etsrec.prioritytable[i],
1111 r_cfg->etsrec.tcbwtable[i],
1112 r_cfg->etsrec.tsatable[i]);
1113 }
1114 dev_info(&pf->pdev->dev,
1115 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1116 r_cfg->pfc.willing,
1117 r_cfg->pfc.mbc,
1118 r_cfg->pfc.pfccap,
1119 r_cfg->pfc.pfcenable);
1120 dev_info(&pf->pdev->dev,
1121 "remote port app_table: num_apps=%d\n",
1122 r_cfg->numapps);
1123 for (i = 0; i < r_cfg->numapps; i++) {
1124 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1125 i, r_cfg->app[i].priority,
1126 r_cfg->app[i].selector,
1127 r_cfg->app[i].protocolid);
1128 }
1129 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1130 int cluster_id, table_id;
1131 int index, ret;
1132 u16 buff_len = 4096;
1133 u32 next_index;
1134 u8 next_table;
1135 u8 *buff;
1136 u16 rlen;
1137
1138 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1139 &cluster_id, &table_id, &index);
1140 if (cnt != 3) {
1141 dev_info(&pf->pdev->dev,
1142 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1143 goto command_write_done;
1144 }
1145
1146 dev_info(&pf->pdev->dev,
1147 "AQ debug dump fwdata params %x %x %x %x\n",
1148 cluster_id, table_id, index, buff_len);
1149 buff = kzalloc(buff_len, GFP_KERNEL);
1150 if (!buff)
1151 goto command_write_done;
1152
1153 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
1154 index, buff_len, buff, &rlen,
1155 &next_table, &next_index,
1156 NULL);
1157 if (ret) {
1158 dev_info(&pf->pdev->dev,
1159 "debug dump fwdata AQ Failed %d 0x%x\n",
1160 ret, pf->hw.aq.asq_last_status);
1161 kfree(buff);
1162 buff = NULL;
1163 goto command_write_done;
1164 }
1165 dev_info(&pf->pdev->dev,
1166 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1167 rlen, next_table, next_index);
1168 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1169 DUMP_PREFIX_OFFSET, 16, 1,
1170 buff, rlen, true);
1171 kfree(buff);
1172 buff = NULL;
1173 } else {
1174 dev_info(&pf->pdev->dev,
1175 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1176 dev_info(&pf->pdev->dev, "dump switch\n");
1177 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1178 dev_info(&pf->pdev->dev, "dump reset stats\n");
1179 dev_info(&pf->pdev->dev, "dump port\n");
1180 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1181 dev_info(&pf->pdev->dev,
1182 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1183 }
1184 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1185 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1186 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1187
1188 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1189 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1190 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1191
1192 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1193 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1194 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1195
1196 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1197 u32 address;
1198 u32 value;
1199
1200 cnt = sscanf(&cmd_buf[4], "%i", &address);
1201 if (cnt != 1) {
1202 dev_info(&pf->pdev->dev, "read <reg>\n");
1203 goto command_write_done;
1204 }
1205
1206 /* check the range on address */
1207 if (address > (pf->ioremap_len - sizeof(u32))) {
1208 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1209 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1210 goto command_write_done;
1211 }
1212
1213 value = rd32(&pf->hw, address);
1214 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1215 address, value);
1216
1217 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1218 u32 address, value;
1219
1220 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1221 if (cnt != 2) {
1222 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1223 goto command_write_done;
1224 }
1225
1226 /* check the range on address */
1227 if (address > (pf->ioremap_len - sizeof(u32))) {
1228 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1229 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1230 goto command_write_done;
1231 }
1232 wr32(&pf->hw, address, value);
1233 value = rd32(&pf->hw, address);
1234 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1235 address, value);
1236 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1237 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1238 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1239 if (cnt == 0) {
1240 int i;
1241
1242 i40e_pf_for_each_vsi(pf, i, vsi)
1243 i40e_vsi_reset_stats(vsi);
1244 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1245 } else if (cnt == 1) {
1246 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1247 if (!vsi) {
1248 dev_info(&pf->pdev->dev,
1249 "clear_stats vsi: bad vsi %d\n",
1250 vsi_seid);
1251 goto command_write_done;
1252 }
1253 i40e_vsi_reset_stats(vsi);
1254 dev_info(&pf->pdev->dev,
1255 "vsi clear stats called for vsi %d\n",
1256 vsi_seid);
1257 } else {
1258 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1259 }
1260 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1261 if (pf->hw.partition_id == 1) {
1262 i40e_pf_reset_stats(pf);
1263 dev_info(&pf->pdev->dev, "port stats cleared\n");
1264 } else {
1265 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1266 }
1267 } else {
1268 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1269 }
1270 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1271 struct i40e_aq_desc *desc;
1272 int ret;
1273
1274 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1275 if (!desc)
1276 goto command_write_done;
1277 cnt = sscanf(&cmd_buf[11],
1278 "%hi %hi %hi %hi %i %i %i %i %i %i",
1279 &desc->flags,
1280 &desc->opcode, &desc->datalen, &desc->retval,
1281 &desc->cookie_high, &desc->cookie_low,
1282 &desc->params.internal.param0,
1283 &desc->params.internal.param1,
1284 &desc->params.internal.param2,
1285 &desc->params.internal.param3);
1286 if (cnt != 10) {
1287 dev_info(&pf->pdev->dev,
1288 "send aq_cmd: bad command string, cnt=%d\n",
1289 cnt);
1290 kfree(desc);
1291 desc = NULL;
1292 goto command_write_done;
1293 }
1294 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
1295 if (!ret) {
1296 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1297 } else if (ret == -EIO) {
1298 dev_info(&pf->pdev->dev,
1299 "AQ command send failed Opcode %x AQ Error: %d\n",
1300 desc->opcode, pf->hw.aq.asq_last_status);
1301 } else {
1302 dev_info(&pf->pdev->dev,
1303 "AQ command send failed Opcode %x Status: %d\n",
1304 desc->opcode, ret);
1305 }
1306 dev_info(&pf->pdev->dev,
1307 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1308 desc->flags, desc->opcode, desc->datalen, desc->retval,
1309 desc->cookie_high, desc->cookie_low,
1310 desc->params.internal.param0,
1311 desc->params.internal.param1,
1312 desc->params.internal.param2,
1313 desc->params.internal.param3);
1314 kfree(desc);
1315 desc = NULL;
1316 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1317 struct i40e_aq_desc *desc;
1318 u16 buffer_len;
1319 u8 *buff;
1320 int ret;
1321
1322 desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
1323 if (!desc)
1324 goto command_write_done;
1325 cnt = sscanf(&cmd_buf[20],
1326 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1327 &desc->flags,
1328 &desc->opcode, &desc->datalen, &desc->retval,
1329 &desc->cookie_high, &desc->cookie_low,
1330 &desc->params.internal.param0,
1331 &desc->params.internal.param1,
1332 &desc->params.internal.param2,
1333 &desc->params.internal.param3,
1334 &buffer_len);
1335 if (cnt != 11) {
1336 dev_info(&pf->pdev->dev,
1337 "send indirect aq_cmd: bad command string, cnt=%d\n",
1338 cnt);
1339 kfree(desc);
1340 desc = NULL;
1341 goto command_write_done;
1342 }
1343 /* Just stub a buffer big enough in case user messed up */
1344 if (buffer_len == 0)
1345 buffer_len = 1280;
1346
1347 buff = kzalloc(buffer_len, GFP_KERNEL);
1348 if (!buff) {
1349 kfree(desc);
1350 desc = NULL;
1351 goto command_write_done;
1352 }
1353 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1354 ret = i40e_asq_send_command(&pf->hw, desc, buff,
1355 buffer_len, NULL);
1356 if (!ret) {
1357 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1358 } else if (ret == -EIO) {
1359 dev_info(&pf->pdev->dev,
1360 "AQ command send failed Opcode %x AQ Error: %d\n",
1361 desc->opcode, pf->hw.aq.asq_last_status);
1362 } else {
1363 dev_info(&pf->pdev->dev,
1364 "AQ command send failed Opcode %x Status: %d\n",
1365 desc->opcode, ret);
1366 }
1367 dev_info(&pf->pdev->dev,
1368 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1369 desc->flags, desc->opcode, desc->datalen, desc->retval,
1370 desc->cookie_high, desc->cookie_low,
1371 desc->params.internal.param0,
1372 desc->params.internal.param1,
1373 desc->params.internal.param2,
1374 desc->params.internal.param3);
1375 print_hex_dump(KERN_INFO, "AQ buffer WB: ",
1376 DUMP_PREFIX_OFFSET, 16, 1,
1377 buff, buffer_len, true);
1378 kfree(buff);
1379 buff = NULL;
1380 kfree(desc);
1381 desc = NULL;
1382 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1383 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1384 i40e_get_current_fd_count(pf));
1385 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1386 /* Get main VSI */
1387 struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
1388
1389 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1390 int ret;
1391
1392 ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
1393 if (ret) {
1394 dev_info(&pf->pdev->dev,
1395 "Stop LLDP AQ command failed =0x%x\n",
1396 pf->hw.aq.asq_last_status);
1397 goto command_write_done;
1398 }
1399 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1400 pf->hw.mac.addr, ETH_P_LLDP, 0,
1401 main_vsi->seid, 0, true, NULL,
1402 NULL);
1403 if (ret) {
1404 dev_info(&pf->pdev->dev,
1405 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1406 __func__, pf->hw.aq.asq_last_status);
1407 goto command_write_done;
1408 }
1409#ifdef CONFIG_I40E_DCB
1410 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1411 DCB_CAP_DCBX_VER_IEEE;
1412#endif /* CONFIG_I40E_DCB */
1413 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1414 int ret;
1415
1416 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
1417 pf->hw.mac.addr, ETH_P_LLDP, 0,
1418 main_vsi->seid, 0, false, NULL,
1419 NULL);
1420 if (ret) {
1421 dev_info(&pf->pdev->dev,
1422 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1423 __func__, pf->hw.aq.asq_last_status);
1424 /* Continue and start FW LLDP anyways */
1425 }
1426
1427 ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
1428 if (ret) {
1429 dev_info(&pf->pdev->dev,
1430 "Start LLDP AQ command failed =0x%x\n",
1431 pf->hw.aq.asq_last_status);
1432 goto command_write_done;
1433 }
1434#ifdef CONFIG_I40E_DCB
1435 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1436 DCB_CAP_DCBX_VER_IEEE;
1437#endif /* CONFIG_I40E_DCB */
1438 } else if (strncmp(&cmd_buf[5],
1439 "get local", 9) == 0) {
1440 u16 llen, rlen;
1441 int ret;
1442 u8 *buff;
1443
1444 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1445 if (!buff)
1446 goto command_write_done;
1447
1448 ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
1449 I40E_AQ_LLDP_MIB_LOCAL,
1450 buff, I40E_LLDPDU_SIZE,
1451 &llen, &rlen, NULL);
1452 if (ret) {
1453 dev_info(&pf->pdev->dev,
1454 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1455 pf->hw.aq.asq_last_status);
1456 kfree(buff);
1457 buff = NULL;
1458 goto command_write_done;
1459 }
1460 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1461 print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
1462 DUMP_PREFIX_OFFSET, 16, 1,
1463 buff, I40E_LLDPDU_SIZE, true);
1464 kfree(buff);
1465 buff = NULL;
1466 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1467 u16 llen, rlen;
1468 int ret;
1469 u8 *buff;
1470
1471 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1472 if (!buff)
1473 goto command_write_done;
1474
1475 ret = i40e_aq_get_lldp_mib(&pf->hw,
1476 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1477 I40E_AQ_LLDP_MIB_REMOTE,
1478 buff, I40E_LLDPDU_SIZE,
1479 &llen, &rlen, NULL);
1480 if (ret) {
1481 dev_info(&pf->pdev->dev,
1482 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1483 pf->hw.aq.asq_last_status);
1484 kfree(buff);
1485 buff = NULL;
1486 goto command_write_done;
1487 }
1488 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1489 print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
1490 DUMP_PREFIX_OFFSET, 16, 1,
1491 buff, I40E_LLDPDU_SIZE, true);
1492 kfree(buff);
1493 buff = NULL;
1494 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1495 int ret;
1496
1497 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1498 true, NULL);
1499 if (ret) {
1500 dev_info(&pf->pdev->dev,
1501 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1502 pf->hw.aq.asq_last_status);
1503 goto command_write_done;
1504 }
1505 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1506 int ret;
1507
1508 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
1509 false, NULL);
1510 if (ret) {
1511 dev_info(&pf->pdev->dev,
1512 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1513 pf->hw.aq.asq_last_status);
1514 goto command_write_done;
1515 }
1516 }
1517 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1518 u16 buffer_len, bytes;
1519 u16 module;
1520 u32 offset;
1521 u16 *buff;
1522 int ret;
1523
1524 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1525 &module, &offset, &buffer_len);
1526 if (cnt == 0) {
1527 module = 0;
1528 offset = 0;
1529 buffer_len = 0;
1530 } else if (cnt == 1) {
1531 offset = 0;
1532 buffer_len = 0;
1533 } else if (cnt == 2) {
1534 buffer_len = 0;
1535 } else if (cnt > 3) {
1536 dev_info(&pf->pdev->dev,
1537 "nvm read: bad command string, cnt=%d\n", cnt);
1538 goto command_write_done;
1539 }
1540
1541 /* set the max length */
1542 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1543
1544 bytes = 2 * buffer_len;
1545
1546 /* read at least 1k bytes, no more than 4kB */
1547 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1548 buff = kzalloc(bytes, GFP_KERNEL);
1549 if (!buff)
1550 goto command_write_done;
1551
1552 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
1553 if (ret) {
1554 dev_info(&pf->pdev->dev,
1555 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1556 ret, pf->hw.aq.asq_last_status);
1557 kfree(buff);
1558 goto command_write_done;
1559 }
1560
1561 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
1562 bytes, (u8 *)buff, true, NULL);
1563 i40e_release_nvm(&pf->hw);
1564 if (ret) {
1565 dev_info(&pf->pdev->dev,
1566 "Read NVM AQ failed err=%d status=0x%x\n",
1567 ret, pf->hw.aq.asq_last_status);
1568 } else {
1569 dev_info(&pf->pdev->dev,
1570 "Read NVM module=0x%x offset=0x%x words=%d\n",
1571 module, offset, buffer_len);
1572 if (bytes)
1573 print_hex_dump(KERN_INFO, "NVM Dump: ",
1574 DUMP_PREFIX_OFFSET, 16, 2,
1575 buff, bytes, true);
1576 }
1577 kfree(buff);
1578 buff = NULL;
1579 } else {
1580 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1581 dev_info(&pf->pdev->dev, "available commands\n");
1582 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1583 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1584 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1585 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1586 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1587 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1588 dev_info(&pf->pdev->dev, " dump switch\n");
1589 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1590 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1591 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1592 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1593 dev_info(&pf->pdev->dev, " dump desc aq\n");
1594 dev_info(&pf->pdev->dev, " dump reset stats\n");
1595 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1596 dev_info(&pf->pdev->dev, " read <reg>\n");
1597 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1598 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1599 dev_info(&pf->pdev->dev, " clear_stats port\n");
1600 dev_info(&pf->pdev->dev, " pfr\n");
1601 dev_info(&pf->pdev->dev, " corer\n");
1602 dev_info(&pf->pdev->dev, " globr\n");
1603 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1604 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1605 dev_info(&pf->pdev->dev, " fd current cnt");
1606 dev_info(&pf->pdev->dev, " lldp start\n");
1607 dev_info(&pf->pdev->dev, " lldp stop\n");
1608 dev_info(&pf->pdev->dev, " lldp get local\n");
1609 dev_info(&pf->pdev->dev, " lldp get remote\n");
1610 dev_info(&pf->pdev->dev, " lldp event on\n");
1611 dev_info(&pf->pdev->dev, " lldp event off\n");
1612 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1613 }
1614
1615command_write_done:
1616 kfree(cmd_buf);
1617 cmd_buf = NULL;
1618 return count;
1619}
1620
1621static const struct file_operations i40e_dbg_command_fops = {
1622 .owner = THIS_MODULE,
1623 .open = simple_open,
1624 .read = i40e_dbg_command_read,
1625 .write = i40e_dbg_command_write,
1626};
1627
1628/**************************************************************
1629 * netdev_ops
1630 * The netdev_ops entry in debugfs is for giving the driver commands
1631 * to be executed from the netdev operations.
1632 **************************************************************/
1633static char i40e_dbg_netdev_ops_buf[256] = "";
1634
1635/**
1636 * i40e_dbg_netdev_ops_read - read for netdev_ops datum
1637 * @filp: the opened file
1638 * @buffer: where to write the data for the user to read
1639 * @count: the size of the user's buffer
1640 * @ppos: file position offset
1641 **/
1642static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1643 size_t count, loff_t *ppos)
1644{
1645 struct i40e_pf *pf = filp->private_data;
1646 struct i40e_vsi *main_vsi;
1647 int bytes_not_copied;
1648 int buf_size = 256;
1649 char *buf;
1650 int len;
1651
1652 /* don't allow partal reads */
1653 if (*ppos != 0)
1654 return 0;
1655 if (count < buf_size)
1656 return -ENOSPC;
1657
1658 buf = kzalloc(buf_size, GFP_KERNEL);
1659 if (!buf)
1660 return -ENOSPC;
1661
1662 main_vsi = i40e_pf_get_main_vsi(pf);
1663 len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
1664 i40e_dbg_netdev_ops_buf);
1665
1666 bytes_not_copied = copy_to_user(buffer, buf, len);
1667 kfree(buf);
1668
1669 if (bytes_not_copied)
1670 return -EFAULT;
1671
1672 *ppos = len;
1673 return len;
1674}
1675
1676/**
1677 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1678 * @filp: the opened file
1679 * @buffer: where to find the user's data
1680 * @count: the length of the user's data
1681 * @ppos: file position offset
1682 **/
1683static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1684 const char __user *buffer,
1685 size_t count, loff_t *ppos)
1686{
1687 struct i40e_pf *pf = filp->private_data;
1688 int bytes_not_copied;
1689 struct i40e_vsi *vsi;
1690 char *buf_tmp;
1691 int vsi_seid;
1692 int i, cnt;
1693
1694 /* don't allow partial writes */
1695 if (*ppos != 0)
1696 return 0;
1697 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1698 return -ENOSPC;
1699
1700 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1701 bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
1702 buffer, count);
1703 if (bytes_not_copied)
1704 return -EFAULT;
1705 i40e_dbg_netdev_ops_buf[count] = '\0';
1706
1707 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1708 if (buf_tmp) {
1709 *buf_tmp = '\0';
1710 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1711 }
1712
1713 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1714 int mtu;
1715
1716 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1717 &vsi_seid, &mtu);
1718 if (cnt != 2) {
1719 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1720 goto netdev_ops_write_done;
1721 }
1722 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1723 if (!vsi) {
1724 dev_info(&pf->pdev->dev,
1725 "change_mtu: VSI %d not found\n", vsi_seid);
1726 } else if (!vsi->netdev) {
1727 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1728 vsi_seid);
1729 } else if (rtnl_trylock()) {
1730 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1731 mtu);
1732 rtnl_unlock();
1733 dev_info(&pf->pdev->dev, "change_mtu called\n");
1734 } else {
1735 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1736 }
1737
1738 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1739 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1740 if (cnt != 1) {
1741 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1742 goto netdev_ops_write_done;
1743 }
1744 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1745 if (!vsi) {
1746 dev_info(&pf->pdev->dev,
1747 "set_rx_mode: VSI %d not found\n", vsi_seid);
1748 } else if (!vsi->netdev) {
1749 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1750 vsi_seid);
1751 } else if (rtnl_trylock()) {
1752 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1753 rtnl_unlock();
1754 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1755 } else {
1756 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1757 }
1758
1759 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1760 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1761 if (cnt != 1) {
1762 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1763 goto netdev_ops_write_done;
1764 }
1765 vsi = i40e_dbg_find_vsi(pf, vsi_seid);
1766 if (!vsi) {
1767 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1768 vsi_seid);
1769 } else if (!vsi->netdev) {
1770 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1771 vsi_seid);
1772 } else {
1773 for (i = 0; i < vsi->num_q_vectors; i++)
1774 napi_schedule(&vsi->q_vectors[i]->napi);
1775 dev_info(&pf->pdev->dev, "napi called\n");
1776 }
1777 } else {
1778 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
1779 i40e_dbg_netdev_ops_buf);
1780 dev_info(&pf->pdev->dev, "available commands\n");
1781 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1782 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1783 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1784 }
1785netdev_ops_write_done:
1786 return count;
1787}
1788
1789static const struct file_operations i40e_dbg_netdev_ops_fops = {
1790 .owner = THIS_MODULE,
1791 .open = simple_open,
1792 .read = i40e_dbg_netdev_ops_read,
1793 .write = i40e_dbg_netdev_ops_write,
1794};
1795
1796/**
1797 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1798 * @pf: the PF that is starting up
1799 **/
1800void i40e_dbg_pf_init(struct i40e_pf *pf)
1801{
1802 const char *name = pci_name(pf->pdev);
1803
1804 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
1805
1806 debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
1807 &i40e_dbg_command_fops);
1808
1809 debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
1810 &i40e_dbg_netdev_ops_fops);
1811}
1812
1813/**
1814 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1815 * @pf: the PF that is stopping
1816 **/
1817void i40e_dbg_pf_exit(struct i40e_pf *pf)
1818{
1819 debugfs_remove_recursive(pf->i40e_dbg_pf);
1820 pf->i40e_dbg_pf = NULL;
1821}
1822
1823/**
1824 * i40e_dbg_init - start up debugfs for the driver
1825 **/
1826void i40e_dbg_init(void)
1827{
1828 i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
1829 if (IS_ERR(i40e_dbg_root))
1830 pr_info("init of debugfs failed\n");
1831}
1832
1833/**
1834 * i40e_dbg_exit - clean out the driver's debugfs entries
1835 **/
1836void i40e_dbg_exit(void)
1837{
1838 debugfs_remove_recursive(i40e_dbg_root);
1839 i40e_dbg_root = NULL;
1840}
1841
1842#endif /* CONFIG_DEBUG_FS */