Loading...
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#include <linux/inetdevice.h>
5#include <linux/etherdevice.h>
6#include <linux/ethtool.h>
7
8#include <net/mana/mana.h>
9
10static const struct {
11 char name[ETH_GSTRING_LEN];
12 u16 offset;
13} mana_eth_stats[] = {
14 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
15 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
16 {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
17 hc_rx_discards_no_wqe)},
18 {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
19 hc_rx_err_vport_disabled)},
20 {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
21 {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
22 hc_rx_ucast_pkts)},
23 {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
24 hc_rx_ucast_bytes)},
25 {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
26 hc_rx_bcast_pkts)},
27 {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
28 hc_rx_bcast_bytes)},
29 {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
30 hc_rx_mcast_pkts)},
31 {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
32 hc_rx_mcast_bytes)},
33 {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
34 hc_tx_err_gf_disabled)},
35 {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
36 hc_tx_err_vport_disabled)},
37 {"hc_tx_err_inval_vportoffset_pkt",
38 offsetof(struct mana_ethtool_stats,
39 hc_tx_err_inval_vportoffset_pkt)},
40 {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
41 hc_tx_err_vlan_enforcement)},
42 {"hc_tx_err_eth_type_enforcement",
43 offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
44 {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
45 hc_tx_err_sa_enforcement)},
46 {"hc_tx_err_sqpdid_enforcement",
47 offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
48 {"hc_tx_err_cqpdid_enforcement",
49 offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
50 {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
51 hc_tx_err_mtu_violation)},
52 {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
53 hc_tx_err_inval_oob)},
54 {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
55 hc_tx_err_gdma)},
56 {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
57 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
58 hc_tx_ucast_pkts)},
59 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
60 hc_tx_ucast_bytes)},
61 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
62 hc_tx_bcast_pkts)},
63 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
64 hc_tx_bcast_bytes)},
65 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
66 hc_tx_mcast_pkts)},
67 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
68 hc_tx_mcast_bytes)},
69 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
70 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
71 tx_cqe_unknown_type)},
72 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
73 rx_coalesced_err)},
74 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
75 rx_cqe_unknown_type)},
76};
77
78static int mana_get_sset_count(struct net_device *ndev, int stringset)
79{
80 struct mana_port_context *apc = netdev_priv(ndev);
81 unsigned int num_queues = apc->num_queues;
82
83 if (stringset != ETH_SS_STATS)
84 return -EINVAL;
85
86 return ARRAY_SIZE(mana_eth_stats) + num_queues *
87 (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
88}
89
90static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
91{
92 struct mana_port_context *apc = netdev_priv(ndev);
93 unsigned int num_queues = apc->num_queues;
94 u8 *p = data;
95 int i;
96
97 if (stringset != ETH_SS_STATS)
98 return;
99
100 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
101 memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
102 p += ETH_GSTRING_LEN;
103 }
104
105 for (i = 0; i < num_queues; i++) {
106 sprintf(p, "rx_%d_packets", i);
107 p += ETH_GSTRING_LEN;
108 sprintf(p, "rx_%d_bytes", i);
109 p += ETH_GSTRING_LEN;
110 sprintf(p, "rx_%d_xdp_drop", i);
111 p += ETH_GSTRING_LEN;
112 sprintf(p, "rx_%d_xdp_tx", i);
113 p += ETH_GSTRING_LEN;
114 sprintf(p, "rx_%d_xdp_redirect", i);
115 p += ETH_GSTRING_LEN;
116 }
117
118 for (i = 0; i < num_queues; i++) {
119 sprintf(p, "tx_%d_packets", i);
120 p += ETH_GSTRING_LEN;
121 sprintf(p, "tx_%d_bytes", i);
122 p += ETH_GSTRING_LEN;
123 sprintf(p, "tx_%d_xdp_xmit", i);
124 p += ETH_GSTRING_LEN;
125 sprintf(p, "tx_%d_tso_packets", i);
126 p += ETH_GSTRING_LEN;
127 sprintf(p, "tx_%d_tso_bytes", i);
128 p += ETH_GSTRING_LEN;
129 sprintf(p, "tx_%d_tso_inner_packets", i);
130 p += ETH_GSTRING_LEN;
131 sprintf(p, "tx_%d_tso_inner_bytes", i);
132 p += ETH_GSTRING_LEN;
133 sprintf(p, "tx_%d_long_pkt_fmt", i);
134 p += ETH_GSTRING_LEN;
135 sprintf(p, "tx_%d_short_pkt_fmt", i);
136 p += ETH_GSTRING_LEN;
137 sprintf(p, "tx_%d_csum_partial", i);
138 p += ETH_GSTRING_LEN;
139 sprintf(p, "tx_%d_mana_map_err", i);
140 p += ETH_GSTRING_LEN;
141 }
142}
143
144static void mana_get_ethtool_stats(struct net_device *ndev,
145 struct ethtool_stats *e_stats, u64 *data)
146{
147 struct mana_port_context *apc = netdev_priv(ndev);
148 unsigned int num_queues = apc->num_queues;
149 void *eth_stats = &apc->eth_stats;
150 struct mana_stats_rx *rx_stats;
151 struct mana_stats_tx *tx_stats;
152 unsigned int start;
153 u64 packets, bytes;
154 u64 xdp_redirect;
155 u64 xdp_xmit;
156 u64 xdp_drop;
157 u64 xdp_tx;
158 u64 tso_packets;
159 u64 tso_bytes;
160 u64 tso_inner_packets;
161 u64 tso_inner_bytes;
162 u64 long_pkt_fmt;
163 u64 short_pkt_fmt;
164 u64 csum_partial;
165 u64 mana_map_err;
166 int q, i = 0;
167
168 if (!apc->port_is_up)
169 return;
170 /* we call mana function to update stats from GDMA */
171 mana_query_gf_stats(apc);
172
173 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
174 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
175
176 for (q = 0; q < num_queues; q++) {
177 rx_stats = &apc->rxqs[q]->stats;
178
179 do {
180 start = u64_stats_fetch_begin(&rx_stats->syncp);
181 packets = rx_stats->packets;
182 bytes = rx_stats->bytes;
183 xdp_drop = rx_stats->xdp_drop;
184 xdp_tx = rx_stats->xdp_tx;
185 xdp_redirect = rx_stats->xdp_redirect;
186 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
187
188 data[i++] = packets;
189 data[i++] = bytes;
190 data[i++] = xdp_drop;
191 data[i++] = xdp_tx;
192 data[i++] = xdp_redirect;
193 }
194
195 for (q = 0; q < num_queues; q++) {
196 tx_stats = &apc->tx_qp[q].txq.stats;
197
198 do {
199 start = u64_stats_fetch_begin(&tx_stats->syncp);
200 packets = tx_stats->packets;
201 bytes = tx_stats->bytes;
202 xdp_xmit = tx_stats->xdp_xmit;
203 tso_packets = tx_stats->tso_packets;
204 tso_bytes = tx_stats->tso_bytes;
205 tso_inner_packets = tx_stats->tso_inner_packets;
206 tso_inner_bytes = tx_stats->tso_inner_bytes;
207 long_pkt_fmt = tx_stats->long_pkt_fmt;
208 short_pkt_fmt = tx_stats->short_pkt_fmt;
209 csum_partial = tx_stats->csum_partial;
210 mana_map_err = tx_stats->mana_map_err;
211 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
212
213 data[i++] = packets;
214 data[i++] = bytes;
215 data[i++] = xdp_xmit;
216 data[i++] = tso_packets;
217 data[i++] = tso_bytes;
218 data[i++] = tso_inner_packets;
219 data[i++] = tso_inner_bytes;
220 data[i++] = long_pkt_fmt;
221 data[i++] = short_pkt_fmt;
222 data[i++] = csum_partial;
223 data[i++] = mana_map_err;
224 }
225}
226
227static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
228 u32 *rules)
229{
230 struct mana_port_context *apc = netdev_priv(ndev);
231
232 switch (cmd->cmd) {
233 case ETHTOOL_GRXRINGS:
234 cmd->data = apc->num_queues;
235 return 0;
236 }
237
238 return -EOPNOTSUPP;
239}
240
241static u32 mana_get_rxfh_key_size(struct net_device *ndev)
242{
243 return MANA_HASH_KEY_SIZE;
244}
245
246static u32 mana_rss_indir_size(struct net_device *ndev)
247{
248 return MANA_INDIRECT_TABLE_SIZE;
249}
250
251static int mana_get_rxfh(struct net_device *ndev,
252 struct ethtool_rxfh_param *rxfh)
253{
254 struct mana_port_context *apc = netdev_priv(ndev);
255 int i;
256
257 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
258
259 if (rxfh->indir) {
260 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
261 rxfh->indir[i] = apc->indir_table[i];
262 }
263
264 if (rxfh->key)
265 memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
266
267 return 0;
268}
269
270static int mana_set_rxfh(struct net_device *ndev,
271 struct ethtool_rxfh_param *rxfh,
272 struct netlink_ext_ack *extack)
273{
274 struct mana_port_context *apc = netdev_priv(ndev);
275 bool update_hash = false, update_table = false;
276 u32 save_table[MANA_INDIRECT_TABLE_SIZE];
277 u8 save_key[MANA_HASH_KEY_SIZE];
278 int i, err;
279
280 if (!apc->port_is_up)
281 return -EOPNOTSUPP;
282
283 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
284 rxfh->hfunc != ETH_RSS_HASH_TOP)
285 return -EOPNOTSUPP;
286
287 if (rxfh->indir) {
288 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
289 if (rxfh->indir[i] >= apc->num_queues)
290 return -EINVAL;
291
292 update_table = true;
293 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
294 save_table[i] = apc->indir_table[i];
295 apc->indir_table[i] = rxfh->indir[i];
296 }
297 }
298
299 if (rxfh->key) {
300 update_hash = true;
301 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
302 memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
303 }
304
305 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
306
307 if (err) { /* recover to original values */
308 if (update_table) {
309 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
310 apc->indir_table[i] = save_table[i];
311 }
312
313 if (update_hash)
314 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
315
316 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
317 }
318
319 return err;
320}
321
322static void mana_get_channels(struct net_device *ndev,
323 struct ethtool_channels *channel)
324{
325 struct mana_port_context *apc = netdev_priv(ndev);
326
327 channel->max_combined = apc->max_queues;
328 channel->combined_count = apc->num_queues;
329}
330
331static int mana_set_channels(struct net_device *ndev,
332 struct ethtool_channels *channels)
333{
334 struct mana_port_context *apc = netdev_priv(ndev);
335 unsigned int new_count = channels->combined_count;
336 unsigned int old_count = apc->num_queues;
337 int err, err2;
338
339 err = mana_detach(ndev, false);
340 if (err) {
341 netdev_err(ndev, "mana_detach failed: %d\n", err);
342 return err;
343 }
344
345 apc->num_queues = new_count;
346 err = mana_attach(ndev);
347 if (!err)
348 return 0;
349
350 netdev_err(ndev, "mana_attach failed: %d\n", err);
351
352 /* Try to roll it back to the old configuration. */
353 apc->num_queues = old_count;
354 err2 = mana_attach(ndev);
355 if (err2)
356 netdev_err(ndev, "mana re-attach failed: %d\n", err2);
357
358 return err;
359}
360
361const struct ethtool_ops mana_ethtool_ops = {
362 .get_ethtool_stats = mana_get_ethtool_stats,
363 .get_sset_count = mana_get_sset_count,
364 .get_strings = mana_get_strings,
365 .get_rxnfc = mana_get_rxnfc,
366 .get_rxfh_key_size = mana_get_rxfh_key_size,
367 .get_rxfh_indir_size = mana_rss_indir_size,
368 .get_rxfh = mana_get_rxfh,
369 .set_rxfh = mana_set_rxfh,
370 .get_channels = mana_get_channels,
371 .set_channels = mana_set_channels,
372};
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#include <linux/inetdevice.h>
5#include <linux/etherdevice.h>
6#include <linux/ethtool.h>
7
8#include <net/mana/mana.h>
9
10static const struct {
11 char name[ETH_GSTRING_LEN];
12 u16 offset;
13} mana_eth_stats[] = {
14 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
15 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
16 {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
17 hc_rx_discards_no_wqe)},
18 {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
19 hc_rx_err_vport_disabled)},
20 {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
21 {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
22 hc_rx_ucast_pkts)},
23 {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
24 hc_rx_ucast_bytes)},
25 {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
26 hc_rx_bcast_pkts)},
27 {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
28 hc_rx_bcast_bytes)},
29 {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
30 hc_rx_mcast_pkts)},
31 {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
32 hc_rx_mcast_bytes)},
33 {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
34 hc_tx_err_gf_disabled)},
35 {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
36 hc_tx_err_vport_disabled)},
37 {"hc_tx_err_inval_vportoffset_pkt",
38 offsetof(struct mana_ethtool_stats,
39 hc_tx_err_inval_vportoffset_pkt)},
40 {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
41 hc_tx_err_vlan_enforcement)},
42 {"hc_tx_err_eth_type_enforcement",
43 offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
44 {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
45 hc_tx_err_sa_enforcement)},
46 {"hc_tx_err_sqpdid_enforcement",
47 offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
48 {"hc_tx_err_cqpdid_enforcement",
49 offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
50 {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
51 hc_tx_err_mtu_violation)},
52 {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
53 hc_tx_err_inval_oob)},
54 {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
55 hc_tx_err_gdma)},
56 {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
57 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
58 hc_tx_ucast_pkts)},
59 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
60 hc_tx_ucast_bytes)},
61 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
62 hc_tx_bcast_pkts)},
63 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
64 hc_tx_bcast_bytes)},
65 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
66 hc_tx_mcast_pkts)},
67 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
68 hc_tx_mcast_bytes)},
69 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
70 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
71 tx_cqe_unknown_type)},
72 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
73 rx_coalesced_err)},
74 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
75 rx_cqe_unknown_type)},
76};
77
78static int mana_get_sset_count(struct net_device *ndev, int stringset)
79{
80 struct mana_port_context *apc = netdev_priv(ndev);
81 unsigned int num_queues = apc->num_queues;
82
83 if (stringset != ETH_SS_STATS)
84 return -EINVAL;
85
86 return ARRAY_SIZE(mana_eth_stats) + num_queues *
87 (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
88}
89
90static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
91{
92 struct mana_port_context *apc = netdev_priv(ndev);
93 unsigned int num_queues = apc->num_queues;
94 int i;
95
96 if (stringset != ETH_SS_STATS)
97 return;
98
99 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
100 ethtool_puts(&data, mana_eth_stats[i].name);
101
102 for (i = 0; i < num_queues; i++) {
103 ethtool_sprintf(&data, "rx_%d_packets", i);
104 ethtool_sprintf(&data, "rx_%d_bytes", i);
105 ethtool_sprintf(&data, "rx_%d_xdp_drop", i);
106 ethtool_sprintf(&data, "rx_%d_xdp_tx", i);
107 ethtool_sprintf(&data, "rx_%d_xdp_redirect", i);
108 }
109
110 for (i = 0; i < num_queues; i++) {
111 ethtool_sprintf(&data, "tx_%d_packets", i);
112 ethtool_sprintf(&data, "tx_%d_bytes", i);
113 ethtool_sprintf(&data, "tx_%d_xdp_xmit", i);
114 ethtool_sprintf(&data, "tx_%d_tso_packets", i);
115 ethtool_sprintf(&data, "tx_%d_tso_bytes", i);
116 ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i);
117 ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i);
118 ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i);
119 ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i);
120 ethtool_sprintf(&data, "tx_%d_csum_partial", i);
121 ethtool_sprintf(&data, "tx_%d_mana_map_err", i);
122 }
123}
124
125static void mana_get_ethtool_stats(struct net_device *ndev,
126 struct ethtool_stats *e_stats, u64 *data)
127{
128 struct mana_port_context *apc = netdev_priv(ndev);
129 unsigned int num_queues = apc->num_queues;
130 void *eth_stats = &apc->eth_stats;
131 struct mana_stats_rx *rx_stats;
132 struct mana_stats_tx *tx_stats;
133 unsigned int start;
134 u64 packets, bytes;
135 u64 xdp_redirect;
136 u64 xdp_xmit;
137 u64 xdp_drop;
138 u64 xdp_tx;
139 u64 tso_packets;
140 u64 tso_bytes;
141 u64 tso_inner_packets;
142 u64 tso_inner_bytes;
143 u64 long_pkt_fmt;
144 u64 short_pkt_fmt;
145 u64 csum_partial;
146 u64 mana_map_err;
147 int q, i = 0;
148
149 if (!apc->port_is_up)
150 return;
151 /* we call mana function to update stats from GDMA */
152 mana_query_gf_stats(apc);
153
154 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
155 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
156
157 for (q = 0; q < num_queues; q++) {
158 rx_stats = &apc->rxqs[q]->stats;
159
160 do {
161 start = u64_stats_fetch_begin(&rx_stats->syncp);
162 packets = rx_stats->packets;
163 bytes = rx_stats->bytes;
164 xdp_drop = rx_stats->xdp_drop;
165 xdp_tx = rx_stats->xdp_tx;
166 xdp_redirect = rx_stats->xdp_redirect;
167 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
168
169 data[i++] = packets;
170 data[i++] = bytes;
171 data[i++] = xdp_drop;
172 data[i++] = xdp_tx;
173 data[i++] = xdp_redirect;
174 }
175
176 for (q = 0; q < num_queues; q++) {
177 tx_stats = &apc->tx_qp[q].txq.stats;
178
179 do {
180 start = u64_stats_fetch_begin(&tx_stats->syncp);
181 packets = tx_stats->packets;
182 bytes = tx_stats->bytes;
183 xdp_xmit = tx_stats->xdp_xmit;
184 tso_packets = tx_stats->tso_packets;
185 tso_bytes = tx_stats->tso_bytes;
186 tso_inner_packets = tx_stats->tso_inner_packets;
187 tso_inner_bytes = tx_stats->tso_inner_bytes;
188 long_pkt_fmt = tx_stats->long_pkt_fmt;
189 short_pkt_fmt = tx_stats->short_pkt_fmt;
190 csum_partial = tx_stats->csum_partial;
191 mana_map_err = tx_stats->mana_map_err;
192 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
193
194 data[i++] = packets;
195 data[i++] = bytes;
196 data[i++] = xdp_xmit;
197 data[i++] = tso_packets;
198 data[i++] = tso_bytes;
199 data[i++] = tso_inner_packets;
200 data[i++] = tso_inner_bytes;
201 data[i++] = long_pkt_fmt;
202 data[i++] = short_pkt_fmt;
203 data[i++] = csum_partial;
204 data[i++] = mana_map_err;
205 }
206}
207
208static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
209 u32 *rules)
210{
211 struct mana_port_context *apc = netdev_priv(ndev);
212
213 switch (cmd->cmd) {
214 case ETHTOOL_GRXRINGS:
215 cmd->data = apc->num_queues;
216 return 0;
217 }
218
219 return -EOPNOTSUPP;
220}
221
222static u32 mana_get_rxfh_key_size(struct net_device *ndev)
223{
224 return MANA_HASH_KEY_SIZE;
225}
226
227static u32 mana_rss_indir_size(struct net_device *ndev)
228{
229 struct mana_port_context *apc = netdev_priv(ndev);
230
231 return apc->indir_table_sz;
232}
233
234static int mana_get_rxfh(struct net_device *ndev,
235 struct ethtool_rxfh_param *rxfh)
236{
237 struct mana_port_context *apc = netdev_priv(ndev);
238 int i;
239
240 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
241
242 if (rxfh->indir) {
243 for (i = 0; i < apc->indir_table_sz; i++)
244 rxfh->indir[i] = apc->indir_table[i];
245 }
246
247 if (rxfh->key)
248 memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
249
250 return 0;
251}
252
253static int mana_set_rxfh(struct net_device *ndev,
254 struct ethtool_rxfh_param *rxfh,
255 struct netlink_ext_ack *extack)
256{
257 struct mana_port_context *apc = netdev_priv(ndev);
258 bool update_hash = false, update_table = false;
259 u8 save_key[MANA_HASH_KEY_SIZE];
260 u32 *save_table;
261 int i, err;
262
263 if (!apc->port_is_up)
264 return -EOPNOTSUPP;
265
266 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
267 rxfh->hfunc != ETH_RSS_HASH_TOP)
268 return -EOPNOTSUPP;
269
270 save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
271 if (!save_table)
272 return -ENOMEM;
273
274 if (rxfh->indir) {
275 for (i = 0; i < apc->indir_table_sz; i++)
276 if (rxfh->indir[i] >= apc->num_queues) {
277 err = -EINVAL;
278 goto cleanup;
279 }
280
281 update_table = true;
282 for (i = 0; i < apc->indir_table_sz; i++) {
283 save_table[i] = apc->indir_table[i];
284 apc->indir_table[i] = rxfh->indir[i];
285 }
286 }
287
288 if (rxfh->key) {
289 update_hash = true;
290 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
291 memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
292 }
293
294 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
295
296 if (err) { /* recover to original values */
297 if (update_table) {
298 for (i = 0; i < apc->indir_table_sz; i++)
299 apc->indir_table[i] = save_table[i];
300 }
301
302 if (update_hash)
303 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
304
305 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
306 }
307
308cleanup:
309 kfree(save_table);
310
311 return err;
312}
313
314static void mana_get_channels(struct net_device *ndev,
315 struct ethtool_channels *channel)
316{
317 struct mana_port_context *apc = netdev_priv(ndev);
318
319 channel->max_combined = apc->max_queues;
320 channel->combined_count = apc->num_queues;
321}
322
323static int mana_set_channels(struct net_device *ndev,
324 struct ethtool_channels *channels)
325{
326 struct mana_port_context *apc = netdev_priv(ndev);
327 unsigned int new_count = channels->combined_count;
328 unsigned int old_count = apc->num_queues;
329 int err;
330
331 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
332 if (err) {
333 netdev_err(ndev, "Insufficient memory for new allocations");
334 return err;
335 }
336
337 err = mana_detach(ndev, false);
338 if (err) {
339 netdev_err(ndev, "mana_detach failed: %d\n", err);
340 goto out;
341 }
342
343 apc->num_queues = new_count;
344 err = mana_attach(ndev);
345 if (err) {
346 apc->num_queues = old_count;
347 netdev_err(ndev, "mana_attach failed: %d\n", err);
348 }
349
350out:
351 mana_pre_dealloc_rxbufs(apc);
352 return err;
353}
354
355static void mana_get_ringparam(struct net_device *ndev,
356 struct ethtool_ringparam *ring,
357 struct kernel_ethtool_ringparam *kernel_ring,
358 struct netlink_ext_ack *extack)
359{
360 struct mana_port_context *apc = netdev_priv(ndev);
361
362 ring->rx_pending = apc->rx_queue_size;
363 ring->tx_pending = apc->tx_queue_size;
364 ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
365 ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
366}
367
368static int mana_set_ringparam(struct net_device *ndev,
369 struct ethtool_ringparam *ring,
370 struct kernel_ethtool_ringparam *kernel_ring,
371 struct netlink_ext_ack *extack)
372{
373 struct mana_port_context *apc = netdev_priv(ndev);
374 u32 new_tx, new_rx;
375 u32 old_tx, old_rx;
376 int err;
377
378 old_tx = apc->tx_queue_size;
379 old_rx = apc->rx_queue_size;
380
381 if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
382 NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
383 MIN_TX_BUFFERS_PER_QUEUE);
384 return -EINVAL;
385 }
386
387 if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
388 NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
389 MIN_RX_BUFFERS_PER_QUEUE);
390 return -EINVAL;
391 }
392
393 new_rx = roundup_pow_of_two(ring->rx_pending);
394 new_tx = roundup_pow_of_two(ring->tx_pending);
395 netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
396 new_tx, new_rx);
397
398 /* pre-allocating new buffers to prevent failures in mana_attach() later */
399 apc->rx_queue_size = new_rx;
400 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
401 apc->rx_queue_size = old_rx;
402 if (err) {
403 netdev_err(ndev, "Insufficient memory for new allocations\n");
404 return err;
405 }
406
407 err = mana_detach(ndev, false);
408 if (err) {
409 netdev_err(ndev, "mana_detach failed: %d\n", err);
410 goto out;
411 }
412
413 apc->tx_queue_size = new_tx;
414 apc->rx_queue_size = new_rx;
415
416 err = mana_attach(ndev);
417 if (err) {
418 netdev_err(ndev, "mana_attach failed: %d\n", err);
419 apc->tx_queue_size = old_tx;
420 apc->rx_queue_size = old_rx;
421 }
422out:
423 mana_pre_dealloc_rxbufs(apc);
424 return err;
425}
426
427static int mana_get_link_ksettings(struct net_device *ndev,
428 struct ethtool_link_ksettings *cmd)
429{
430 cmd->base.duplex = DUPLEX_FULL;
431 cmd->base.port = PORT_OTHER;
432
433 return 0;
434}
435
436const struct ethtool_ops mana_ethtool_ops = {
437 .get_ethtool_stats = mana_get_ethtool_stats,
438 .get_sset_count = mana_get_sset_count,
439 .get_strings = mana_get_strings,
440 .get_rxnfc = mana_get_rxnfc,
441 .get_rxfh_key_size = mana_get_rxfh_key_size,
442 .get_rxfh_indir_size = mana_rss_indir_size,
443 .get_rxfh = mana_get_rxfh,
444 .set_rxfh = mana_set_rxfh,
445 .get_channels = mana_get_channels,
446 .set_channels = mana_set_channels,
447 .get_ringparam = mana_get_ringparam,
448 .set_ringparam = mana_set_ringparam,
449 .get_link_ksettings = mana_get_link_ksettings,
450 .get_link = ethtool_op_get_link,
451};