Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2/* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7#include <linux/crash_dump.h>
8#include <linux/module.h>
9#include <linux/pci.h>
10#include <linux/device.h>
11#include <linux/netdevice.h>
12#include <linux/etherdevice.h>
13#include <linux/skbuff.h>
14#include <linux/errno.h>
15#include <linux/list.h>
16#include <linux/string.h>
17#include <linux/dma-mapping.h>
18#include <linux/interrupt.h>
19#include <asm/byteorder.h>
20#include <asm/param.h>
21#include <linux/io.h>
22#include <linux/netdev_features.h>
23#include <linux/udp.h>
24#include <linux/tcp.h>
25#include <net/udp_tunnel.h>
26#include <linux/ip.h>
27#include <net/ipv6.h>
28#include <net/tcp.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
31#include <linux/pkt_sched.h>
32#include <linux/ethtool.h>
33#include <linux/in.h>
34#include <linux/random.h>
35#include <net/ip6_checksum.h>
36#include <linux/bitops.h>
37#include <linux/vmalloc.h>
38#include "qede.h"
39#include "qede_ptp.h"
40
41MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
42MODULE_LICENSE("GPL");
43
44static uint debug;
45module_param(debug, uint, 0);
46MODULE_PARM_DESC(debug, " Default debug msglevel");
47
48static const struct qed_eth_ops *qed_ops;
49
50#define CHIP_NUM_57980S_40 0x1634
51#define CHIP_NUM_57980S_10 0x1666
52#define CHIP_NUM_57980S_MF 0x1636
53#define CHIP_NUM_57980S_100 0x1644
54#define CHIP_NUM_57980S_50 0x1654
55#define CHIP_NUM_57980S_25 0x1656
56#define CHIP_NUM_57980S_IOV 0x1664
57#define CHIP_NUM_AH 0x8070
58#define CHIP_NUM_AH_IOV 0x8090
59
60#ifndef PCI_DEVICE_ID_NX2_57980E
61#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
62#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
63#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
64#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
65#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
66#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
67#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
68#define PCI_DEVICE_ID_AH CHIP_NUM_AH
69#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
70
71#endif
72
73enum qede_pci_private {
74 QEDE_PRIVATE_PF,
75 QEDE_PRIVATE_VF
76};
77
78static const struct pci_device_id qede_pci_tbl[] = {
79 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
80 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
81 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
82 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
83 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
85#ifdef CONFIG_QED_SRIOV
86 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
87#endif
88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
89#ifdef CONFIG_QED_SRIOV
90 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
91#endif
92 { 0 }
93};
94
95MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
96
97static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
98static pci_ers_result_t
99qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
100
101#define TX_TIMEOUT (5 * HZ)
102
103/* Utilize last protocol index for XDP */
104#define XDP_PI 11
105
106static void qede_remove(struct pci_dev *pdev);
107static void qede_shutdown(struct pci_dev *pdev);
108static void qede_link_update(void *dev, struct qed_link_output *link);
109static void qede_schedule_recovery_handler(void *dev);
110static void qede_recovery_handler(struct qede_dev *edev);
111static void qede_schedule_hw_err_handler(void *dev,
112 enum qed_hw_err_type err_type);
113static void qede_get_eth_tlv_data(void *edev, void *data);
114static void qede_get_generic_tlv_data(void *edev,
115 struct qed_generic_tlvs *data);
116static void qede_generic_hw_err_handler(struct qede_dev *edev);
117#ifdef CONFIG_QED_SRIOV
118static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
119 __be16 vlan_proto)
120{
121 struct qede_dev *edev = netdev_priv(ndev);
122
123 if (vlan > 4095) {
124 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
125 return -EINVAL;
126 }
127
128 if (vlan_proto != htons(ETH_P_8021Q))
129 return -EPROTONOSUPPORT;
130
131 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
132 vlan, vf);
133
134 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
135}
136
137static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
138{
139 struct qede_dev *edev = netdev_priv(ndev);
140
141 DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
142
143 if (!is_valid_ether_addr(mac)) {
144 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
145 return -EINVAL;
146 }
147
148 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
149}
150
151static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
152{
153 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
154 struct qed_dev_info *qed_info = &edev->dev_info.common;
155 struct qed_update_vport_params *vport_params;
156 int rc;
157
158 vport_params = vzalloc(sizeof(*vport_params));
159 if (!vport_params)
160 return -ENOMEM;
161 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
162
163 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
164
165 /* Enable/Disable Tx switching for PF */
166 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
167 !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
168 vport_params->vport_id = 0;
169 vport_params->update_tx_switching_flg = 1;
170 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
171 edev->ops->vport_update(edev->cdev, vport_params);
172 }
173
174 vfree(vport_params);
175 return rc;
176}
177#endif
178
179static int __maybe_unused qede_suspend(struct device *dev)
180{
181 dev_info(dev, "Device does not support suspend operation\n");
182
183 return -EOPNOTSUPP;
184}
185
186static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
187
188static const struct pci_error_handlers qede_err_handler = {
189 .error_detected = qede_io_error_detected,
190};
191
192static struct pci_driver qede_pci_driver = {
193 .name = "qede",
194 .id_table = qede_pci_tbl,
195 .probe = qede_probe,
196 .remove = qede_remove,
197 .shutdown = qede_shutdown,
198#ifdef CONFIG_QED_SRIOV
199 .sriov_configure = qede_sriov_configure,
200#endif
201 .err_handler = &qede_err_handler,
202 .driver.pm = &qede_pm_ops,
203};
204
205static struct qed_eth_cb_ops qede_ll_ops = {
206 {
207#ifdef CONFIG_RFS_ACCEL
208 .arfs_filter_op = qede_arfs_filter_op,
209#endif
210 .link_update = qede_link_update,
211 .schedule_recovery_handler = qede_schedule_recovery_handler,
212 .schedule_hw_err_handler = qede_schedule_hw_err_handler,
213 .get_generic_tlv_data = qede_get_generic_tlv_data,
214 .get_protocol_tlv_data = qede_get_eth_tlv_data,
215 },
216 .force_mac = qede_force_mac,
217 .ports_update = qede_udp_ports_update,
218};
219
220static int qede_netdev_event(struct notifier_block *this, unsigned long event,
221 void *ptr)
222{
223 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
224 struct ethtool_drvinfo drvinfo;
225 struct qede_dev *edev;
226
227 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
228 goto done;
229
230 /* Check whether this is a qede device */
231 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
232 goto done;
233
234 memset(&drvinfo, 0, sizeof(drvinfo));
235 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
236 if (strcmp(drvinfo.driver, "qede"))
237 goto done;
238 edev = netdev_priv(ndev);
239
240 switch (event) {
241 case NETDEV_CHANGENAME:
242 /* Notify qed of the name change */
243 if (!edev->ops || !edev->ops->common)
244 goto done;
245 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
246 break;
247 case NETDEV_CHANGEADDR:
248 edev = netdev_priv(ndev);
249 qede_rdma_event_changeaddr(edev);
250 break;
251 }
252
253done:
254 return NOTIFY_DONE;
255}
256
257static struct notifier_block qede_netdev_notifier = {
258 .notifier_call = qede_netdev_event,
259};
260
261static
262int __init qede_init(void)
263{
264 int ret;
265
266 pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
267
268 qede_forced_speed_maps_init();
269
270 qed_ops = qed_get_eth_ops();
271 if (!qed_ops) {
272 pr_notice("Failed to get qed ethtool operations\n");
273 return -EINVAL;
274 }
275
276 /* Must register notifier before pci ops, since we might miss
277 * interface rename after pci probe and netdev registration.
278 */
279 ret = register_netdevice_notifier(&qede_netdev_notifier);
280 if (ret) {
281 pr_notice("Failed to register netdevice_notifier\n");
282 qed_put_eth_ops();
283 return -EINVAL;
284 }
285
286 ret = pci_register_driver(&qede_pci_driver);
287 if (ret) {
288 pr_notice("Failed to register driver\n");
289 unregister_netdevice_notifier(&qede_netdev_notifier);
290 qed_put_eth_ops();
291 return -EINVAL;
292 }
293
294 return 0;
295}
296
297static void __exit qede_cleanup(void)
298{
299 if (debug & QED_LOG_INFO_MASK)
300 pr_info("qede_cleanup called\n");
301
302 unregister_netdevice_notifier(&qede_netdev_notifier);
303 pci_unregister_driver(&qede_pci_driver);
304 qed_put_eth_ops();
305}
306
307module_init(qede_init);
308module_exit(qede_cleanup);
309
310static int qede_open(struct net_device *ndev);
311static int qede_close(struct net_device *ndev);
312
313void qede_fill_by_demand_stats(struct qede_dev *edev)
314{
315 struct qede_stats_common *p_common = &edev->stats.common;
316 struct qed_eth_stats stats;
317
318 edev->ops->get_vport_stats(edev->cdev, &stats);
319
320 spin_lock(&edev->stats_lock);
321
322 p_common->no_buff_discards = stats.common.no_buff_discards;
323 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
324 p_common->ttl0_discard = stats.common.ttl0_discard;
325 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
326 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
327 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
328 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
329 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
330 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
331 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
332 p_common->mac_filter_discards = stats.common.mac_filter_discards;
333 p_common->gft_filter_drop = stats.common.gft_filter_drop;
334
335 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
336 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
337 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
338 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
339 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
340 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
341 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
342 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
343 p_common->coalesced_events = stats.common.tpa_coalesced_events;
344 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
345 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
346 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
347
348 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
349 p_common->rx_65_to_127_byte_packets =
350 stats.common.rx_65_to_127_byte_packets;
351 p_common->rx_128_to_255_byte_packets =
352 stats.common.rx_128_to_255_byte_packets;
353 p_common->rx_256_to_511_byte_packets =
354 stats.common.rx_256_to_511_byte_packets;
355 p_common->rx_512_to_1023_byte_packets =
356 stats.common.rx_512_to_1023_byte_packets;
357 p_common->rx_1024_to_1518_byte_packets =
358 stats.common.rx_1024_to_1518_byte_packets;
359 p_common->rx_crc_errors = stats.common.rx_crc_errors;
360 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
361 p_common->rx_pause_frames = stats.common.rx_pause_frames;
362 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
363 p_common->rx_align_errors = stats.common.rx_align_errors;
364 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
365 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
366 p_common->rx_jabbers = stats.common.rx_jabbers;
367 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
368 p_common->rx_fragments = stats.common.rx_fragments;
369 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
370 p_common->tx_65_to_127_byte_packets =
371 stats.common.tx_65_to_127_byte_packets;
372 p_common->tx_128_to_255_byte_packets =
373 stats.common.tx_128_to_255_byte_packets;
374 p_common->tx_256_to_511_byte_packets =
375 stats.common.tx_256_to_511_byte_packets;
376 p_common->tx_512_to_1023_byte_packets =
377 stats.common.tx_512_to_1023_byte_packets;
378 p_common->tx_1024_to_1518_byte_packets =
379 stats.common.tx_1024_to_1518_byte_packets;
380 p_common->tx_pause_frames = stats.common.tx_pause_frames;
381 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
382 p_common->brb_truncates = stats.common.brb_truncates;
383 p_common->brb_discards = stats.common.brb_discards;
384 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
385 p_common->link_change_count = stats.common.link_change_count;
386 p_common->ptp_skip_txts = edev->ptp_skip_txts;
387
388 if (QEDE_IS_BB(edev)) {
389 struct qede_stats_bb *p_bb = &edev->stats.bb;
390
391 p_bb->rx_1519_to_1522_byte_packets =
392 stats.bb.rx_1519_to_1522_byte_packets;
393 p_bb->rx_1519_to_2047_byte_packets =
394 stats.bb.rx_1519_to_2047_byte_packets;
395 p_bb->rx_2048_to_4095_byte_packets =
396 stats.bb.rx_2048_to_4095_byte_packets;
397 p_bb->rx_4096_to_9216_byte_packets =
398 stats.bb.rx_4096_to_9216_byte_packets;
399 p_bb->rx_9217_to_16383_byte_packets =
400 stats.bb.rx_9217_to_16383_byte_packets;
401 p_bb->tx_1519_to_2047_byte_packets =
402 stats.bb.tx_1519_to_2047_byte_packets;
403 p_bb->tx_2048_to_4095_byte_packets =
404 stats.bb.tx_2048_to_4095_byte_packets;
405 p_bb->tx_4096_to_9216_byte_packets =
406 stats.bb.tx_4096_to_9216_byte_packets;
407 p_bb->tx_9217_to_16383_byte_packets =
408 stats.bb.tx_9217_to_16383_byte_packets;
409 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
410 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
411 } else {
412 struct qede_stats_ah *p_ah = &edev->stats.ah;
413
414 p_ah->rx_1519_to_max_byte_packets =
415 stats.ah.rx_1519_to_max_byte_packets;
416 p_ah->tx_1519_to_max_byte_packets =
417 stats.ah.tx_1519_to_max_byte_packets;
418 }
419
420 spin_unlock(&edev->stats_lock);
421}
422
423static void qede_get_stats64(struct net_device *dev,
424 struct rtnl_link_stats64 *stats)
425{
426 struct qede_dev *edev = netdev_priv(dev);
427 struct qede_stats_common *p_common;
428
429 p_common = &edev->stats.common;
430
431 spin_lock(&edev->stats_lock);
432
433 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
434 p_common->rx_bcast_pkts;
435 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
436 p_common->tx_bcast_pkts;
437
438 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
439 p_common->rx_bcast_bytes;
440 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
441 p_common->tx_bcast_bytes;
442
443 stats->tx_errors = p_common->tx_err_drop_pkts;
444 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
445
446 stats->rx_fifo_errors = p_common->no_buff_discards;
447
448 if (QEDE_IS_BB(edev))
449 stats->collisions = edev->stats.bb.tx_total_collisions;
450 stats->rx_crc_errors = p_common->rx_crc_errors;
451 stats->rx_frame_errors = p_common->rx_align_errors;
452
453 spin_unlock(&edev->stats_lock);
454}
455
456#ifdef CONFIG_QED_SRIOV
457static int qede_get_vf_config(struct net_device *dev, int vfidx,
458 struct ifla_vf_info *ivi)
459{
460 struct qede_dev *edev = netdev_priv(dev);
461
462 if (!edev->ops)
463 return -EINVAL;
464
465 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
466}
467
468static int qede_set_vf_rate(struct net_device *dev, int vfidx,
469 int min_tx_rate, int max_tx_rate)
470{
471 struct qede_dev *edev = netdev_priv(dev);
472
473 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
474 max_tx_rate);
475}
476
477static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
478{
479 struct qede_dev *edev = netdev_priv(dev);
480
481 if (!edev->ops)
482 return -EINVAL;
483
484 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
485}
486
487static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
488 int link_state)
489{
490 struct qede_dev *edev = netdev_priv(dev);
491
492 if (!edev->ops)
493 return -EINVAL;
494
495 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
496}
497
498static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
499{
500 struct qede_dev *edev = netdev_priv(dev);
501
502 if (!edev->ops)
503 return -EINVAL;
504
505 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
506}
507#endif
508
509static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
510{
511 struct qede_dev *edev = netdev_priv(dev);
512
513 if (!netif_running(dev))
514 return -EAGAIN;
515
516 switch (cmd) {
517 case SIOCSHWTSTAMP:
518 return qede_ptp_hw_ts(edev, ifr);
519 default:
520 DP_VERBOSE(edev, QED_MSG_DEBUG,
521 "default IOCTL cmd 0x%x\n", cmd);
522 return -EOPNOTSUPP;
523 }
524
525 return 0;
526}
527
528static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
529{
530 char *p_sb = (char *)fp->sb_info->sb_virt;
531 u32 sb_size, i;
532
533 sb_size = sizeof(struct status_block);
534
535 for (i = 0; i < sb_size; i += 8)
536 DP_NOTICE(edev,
537 "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
538 p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
539 p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
540}
541
542static void
543qede_txq_fp_log_metadata(struct qede_dev *edev,
544 struct qede_fastpath *fp, struct qede_tx_queue *txq)
545{
546 struct qed_chain *p_chain = &txq->tx_pbl;
547
548 /* Dump txq/fp/sb ids etc. other metadata */
549 DP_NOTICE(edev,
550 "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
551 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
552 p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
553
554 /* Dump all the relevant prod/cons indexes */
555 DP_NOTICE(edev,
556 "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
557 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
558 qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
559}
560
561static void
562qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
563{
564 struct qed_sb_info_dbg sb_dbg;
565 int rc;
566
567 /* sb info */
568 qede_fp_sb_dump(edev, fp);
569
570 memset(&sb_dbg, 0, sizeof(sb_dbg));
571 rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
572
573 DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
574 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
575
576 /* report to mfw */
577 edev->ops->common->mfw_report(edev->cdev,
578 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
579 txq->index, le16_to_cpu(*txq->hw_cons_ptr),
580 qed_chain_get_cons_idx(&txq->tx_pbl),
581 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
582 if (!rc)
583 edev->ops->common->mfw_report(edev->cdev,
584 "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
585 txq->index, fp->sb_info->igu_sb_id,
586 sb_dbg.igu_prod, sb_dbg.igu_cons,
587 sb_dbg.pi[TX_PI(txq->cos)]);
588}
589
590static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
591{
592 struct qede_dev *edev = netdev_priv(dev);
593 int i;
594
595 netif_carrier_off(dev);
596 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
597
598 for_each_queue(i) {
599 struct qede_tx_queue *txq;
600 struct qede_fastpath *fp;
601 int cos;
602
603 fp = &edev->fp_array[i];
604 if (!(fp->type & QEDE_FASTPATH_TX))
605 continue;
606
607 for_each_cos_in_txq(edev, cos) {
608 txq = &fp->txq[cos];
609
610 /* Dump basic metadata for all queues */
611 qede_txq_fp_log_metadata(edev, fp, txq);
612
613 if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
614 qed_chain_get_prod_idx(&txq->tx_pbl))
615 qede_tx_log_print(edev, fp, txq);
616 }
617 }
618
619 if (IS_VF(edev))
620 return;
621
622 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
623 edev->state == QEDE_STATE_RECOVERY) {
624 DP_INFO(edev,
625 "Avoid handling a Tx timeout while another HW error is being handled\n");
626 return;
627 }
628
629 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
630 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
631 schedule_delayed_work(&edev->sp_task, 0);
632}
633
634static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
635{
636 struct qede_dev *edev = netdev_priv(ndev);
637 int cos, count, offset;
638
639 if (num_tc > edev->dev_info.num_tc)
640 return -EINVAL;
641
642 netdev_reset_tc(ndev);
643 netdev_set_num_tc(ndev, num_tc);
644
645 for_each_cos_in_txq(edev, cos) {
646 count = QEDE_TSS_COUNT(edev);
647 offset = cos * QEDE_TSS_COUNT(edev);
648 netdev_set_tc_queue(ndev, cos, count, offset);
649 }
650
651 return 0;
652}
653
654static int
655qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
656 __be16 proto)
657{
658 switch (f->command) {
659 case FLOW_CLS_REPLACE:
660 return qede_add_tc_flower_fltr(edev, proto, f);
661 case FLOW_CLS_DESTROY:
662 return qede_delete_flow_filter(edev, f->cookie);
663 default:
664 return -EOPNOTSUPP;
665 }
666}
667
668static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
669 void *cb_priv)
670{
671 struct flow_cls_offload *f;
672 struct qede_dev *edev = cb_priv;
673
674 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
675 return -EOPNOTSUPP;
676
677 switch (type) {
678 case TC_SETUP_CLSFLOWER:
679 f = type_data;
680 return qede_set_flower(edev, f, f->common.protocol);
681 default:
682 return -EOPNOTSUPP;
683 }
684}
685
686static LIST_HEAD(qede_block_cb_list);
687
688static int
689qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
690 void *type_data)
691{
692 struct qede_dev *edev = netdev_priv(dev);
693 struct tc_mqprio_qopt *mqprio;
694
695 switch (type) {
696 case TC_SETUP_BLOCK:
697 return flow_block_cb_setup_simple(type_data,
698 &qede_block_cb_list,
699 qede_setup_tc_block_cb,
700 edev, edev, true);
701 case TC_SETUP_QDISC_MQPRIO:
702 mqprio = type_data;
703
704 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
705 return qede_setup_tc(dev, mqprio->num_tc);
706 default:
707 return -EOPNOTSUPP;
708 }
709}
710
711static const struct net_device_ops qede_netdev_ops = {
712 .ndo_open = qede_open,
713 .ndo_stop = qede_close,
714 .ndo_start_xmit = qede_start_xmit,
715 .ndo_select_queue = qede_select_queue,
716 .ndo_set_rx_mode = qede_set_rx_mode,
717 .ndo_set_mac_address = qede_set_mac_addr,
718 .ndo_validate_addr = eth_validate_addr,
719 .ndo_change_mtu = qede_change_mtu,
720 .ndo_eth_ioctl = qede_ioctl,
721 .ndo_tx_timeout = qede_tx_timeout,
722#ifdef CONFIG_QED_SRIOV
723 .ndo_set_vf_mac = qede_set_vf_mac,
724 .ndo_set_vf_vlan = qede_set_vf_vlan,
725 .ndo_set_vf_trust = qede_set_vf_trust,
726#endif
727 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
728 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
729 .ndo_fix_features = qede_fix_features,
730 .ndo_set_features = qede_set_features,
731 .ndo_get_stats64 = qede_get_stats64,
732#ifdef CONFIG_QED_SRIOV
733 .ndo_set_vf_link_state = qede_set_vf_link_state,
734 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
735 .ndo_get_vf_config = qede_get_vf_config,
736 .ndo_set_vf_rate = qede_set_vf_rate,
737#endif
738 .ndo_features_check = qede_features_check,
739 .ndo_bpf = qede_xdp,
740#ifdef CONFIG_RFS_ACCEL
741 .ndo_rx_flow_steer = qede_rx_flow_steer,
742#endif
743 .ndo_xdp_xmit = qede_xdp_transmit,
744 .ndo_setup_tc = qede_setup_tc_offload,
745};
746
747static const struct net_device_ops qede_netdev_vf_ops = {
748 .ndo_open = qede_open,
749 .ndo_stop = qede_close,
750 .ndo_start_xmit = qede_start_xmit,
751 .ndo_select_queue = qede_select_queue,
752 .ndo_set_rx_mode = qede_set_rx_mode,
753 .ndo_set_mac_address = qede_set_mac_addr,
754 .ndo_validate_addr = eth_validate_addr,
755 .ndo_change_mtu = qede_change_mtu,
756 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
757 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
758 .ndo_fix_features = qede_fix_features,
759 .ndo_set_features = qede_set_features,
760 .ndo_get_stats64 = qede_get_stats64,
761 .ndo_features_check = qede_features_check,
762};
763
764static const struct net_device_ops qede_netdev_vf_xdp_ops = {
765 .ndo_open = qede_open,
766 .ndo_stop = qede_close,
767 .ndo_start_xmit = qede_start_xmit,
768 .ndo_select_queue = qede_select_queue,
769 .ndo_set_rx_mode = qede_set_rx_mode,
770 .ndo_set_mac_address = qede_set_mac_addr,
771 .ndo_validate_addr = eth_validate_addr,
772 .ndo_change_mtu = qede_change_mtu,
773 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
774 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
775 .ndo_fix_features = qede_fix_features,
776 .ndo_set_features = qede_set_features,
777 .ndo_get_stats64 = qede_get_stats64,
778 .ndo_features_check = qede_features_check,
779 .ndo_bpf = qede_xdp,
780 .ndo_xdp_xmit = qede_xdp_transmit,
781};
782
783/* -------------------------------------------------------------------------
784 * START OF PROBE / REMOVE
785 * -------------------------------------------------------------------------
786 */
787
788static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
789 struct pci_dev *pdev,
790 struct qed_dev_eth_info *info,
791 u32 dp_module, u8 dp_level)
792{
793 struct net_device *ndev;
794 struct qede_dev *edev;
795
796 ndev = alloc_etherdev_mqs(sizeof(*edev),
797 info->num_queues * info->num_tc,
798 info->num_queues);
799 if (!ndev) {
800 pr_err("etherdev allocation failed\n");
801 return NULL;
802 }
803
804 edev = netdev_priv(ndev);
805 edev->ndev = ndev;
806 edev->cdev = cdev;
807 edev->pdev = pdev;
808 edev->dp_module = dp_module;
809 edev->dp_level = dp_level;
810 edev->ops = qed_ops;
811
812 if (is_kdump_kernel()) {
813 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
814 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
815 } else {
816 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
817 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
818 }
819
820 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
821 info->num_queues, info->num_queues);
822
823 SET_NETDEV_DEV(ndev, &pdev->dev);
824
825 memset(&edev->stats, 0, sizeof(edev->stats));
826 memcpy(&edev->dev_info, info, sizeof(*info));
827
828 /* As ethtool doesn't have the ability to show WoL behavior as
829 * 'default', if device supports it declare it's enabled.
830 */
831 if (edev->dev_info.common.wol_support)
832 edev->wol_enabled = true;
833
834 INIT_LIST_HEAD(&edev->vlan_list);
835
836 return edev;
837}
838
839static void qede_init_ndev(struct qede_dev *edev)
840{
841 struct net_device *ndev = edev->ndev;
842 struct pci_dev *pdev = edev->pdev;
843 bool udp_tunnel_enable = false;
844 netdev_features_t hw_features;
845
846 pci_set_drvdata(pdev, ndev);
847
848 ndev->mem_start = edev->dev_info.common.pci_mem_start;
849 ndev->base_addr = ndev->mem_start;
850 ndev->mem_end = edev->dev_info.common.pci_mem_end;
851 ndev->irq = edev->dev_info.common.pci_irq;
852
853 ndev->watchdog_timeo = TX_TIMEOUT;
854
855 if (IS_VF(edev)) {
856 if (edev->dev_info.xdp_supported)
857 ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
858 else
859 ndev->netdev_ops = &qede_netdev_vf_ops;
860 } else {
861 ndev->netdev_ops = &qede_netdev_ops;
862 }
863
864 qede_set_ethtool_ops(ndev);
865
866 ndev->priv_flags |= IFF_UNICAST_FLT;
867
868 /* user-changeble features */
869 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
870 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
871 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
872
873 if (edev->dev_info.common.b_arfs_capable)
874 hw_features |= NETIF_F_NTUPLE;
875
876 if (edev->dev_info.common.vxlan_enable ||
877 edev->dev_info.common.geneve_enable)
878 udp_tunnel_enable = true;
879
880 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
881 hw_features |= NETIF_F_TSO_ECN;
882 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
883 NETIF_F_SG | NETIF_F_TSO |
884 NETIF_F_TSO_ECN | NETIF_F_TSO6 |
885 NETIF_F_RXCSUM;
886 }
887
888 if (udp_tunnel_enable) {
889 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
890 NETIF_F_GSO_UDP_TUNNEL_CSUM);
891 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
892 NETIF_F_GSO_UDP_TUNNEL_CSUM);
893
894 qede_set_udp_tunnels(edev);
895 }
896
897 if (edev->dev_info.common.gre_enable) {
898 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
899 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
900 NETIF_F_GSO_GRE_CSUM);
901 }
902
903 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
904 NETIF_F_HIGHDMA;
905 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
906 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
907 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
908
909 ndev->hw_features = hw_features;
910
911 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
912 NETDEV_XDP_ACT_NDO_XMIT;
913
914 /* MTU range: 46 - 9600 */
915 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
916 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
917
918 /* Set network device HW mac */
919 eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
920
921 ndev->mtu = edev->dev_info.common.mtu;
922}
923
924/* This function converts from 32b param to two params of level and module
925 * Input 32b decoding:
926 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
927 * 'happy' flow, e.g. memory allocation failed.
928 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
929 * and provide important parameters.
930 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
931 * module. VERBOSE prints are for tracking the specific flow in low level.
932 *
933 * Notice that the level should be that of the lowest required logs.
934 */
935void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
936{
937 *p_dp_level = QED_LEVEL_NOTICE;
938 *p_dp_module = 0;
939
940 if (debug & QED_LOG_VERBOSE_MASK) {
941 *p_dp_level = QED_LEVEL_VERBOSE;
942 *p_dp_module = (debug & 0x3FFFFFFF);
943 } else if (debug & QED_LOG_INFO_MASK) {
944 *p_dp_level = QED_LEVEL_INFO;
945 } else if (debug & QED_LOG_NOTICE_MASK) {
946 *p_dp_level = QED_LEVEL_NOTICE;
947 }
948}
949
950static void qede_free_fp_array(struct qede_dev *edev)
951{
952 if (edev->fp_array) {
953 struct qede_fastpath *fp;
954 int i;
955
956 for_each_queue(i) {
957 fp = &edev->fp_array[i];
958
959 kfree(fp->sb_info);
960 /* Handle mem alloc failure case where qede_init_fp
961 * didn't register xdp_rxq_info yet.
962 * Implicit only (fp->type & QEDE_FASTPATH_RX)
963 */
964 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
965 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
966 kfree(fp->rxq);
967 kfree(fp->xdp_tx);
968 kfree(fp->txq);
969 }
970 kfree(edev->fp_array);
971 }
972
973 edev->num_queues = 0;
974 edev->fp_num_tx = 0;
975 edev->fp_num_rx = 0;
976}
977
978static int qede_alloc_fp_array(struct qede_dev *edev)
979{
980 u8 fp_combined, fp_rx = edev->fp_num_rx;
981 struct qede_fastpath *fp;
982 int i;
983
984 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
985 sizeof(*edev->fp_array), GFP_KERNEL);
986 if (!edev->fp_array) {
987 DP_NOTICE(edev, "fp array allocation failed\n");
988 goto err;
989 }
990
991 if (!edev->coal_entry) {
992 edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
993 sizeof(*edev->coal_entry),
994 GFP_KERNEL);
995 if (!edev->coal_entry) {
996 DP_ERR(edev, "coalesce entry allocation failed\n");
997 goto err;
998 }
999 }
1000
1001 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
1002
1003 /* Allocate the FP elements for Rx queues followed by combined and then
1004 * the Tx. This ordering should be maintained so that the respective
1005 * queues (Rx or Tx) will be together in the fastpath array and the
1006 * associated ids will be sequential.
1007 */
1008 for_each_queue(i) {
1009 fp = &edev->fp_array[i];
1010
1011 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
1012 if (!fp->sb_info) {
1013 DP_NOTICE(edev, "sb info struct allocation failed\n");
1014 goto err;
1015 }
1016
1017 if (fp_rx) {
1018 fp->type = QEDE_FASTPATH_RX;
1019 fp_rx--;
1020 } else if (fp_combined) {
1021 fp->type = QEDE_FASTPATH_COMBINED;
1022 fp_combined--;
1023 } else {
1024 fp->type = QEDE_FASTPATH_TX;
1025 }
1026
1027 if (fp->type & QEDE_FASTPATH_TX) {
1028 fp->txq = kcalloc(edev->dev_info.num_tc,
1029 sizeof(*fp->txq), GFP_KERNEL);
1030 if (!fp->txq)
1031 goto err;
1032 }
1033
1034 if (fp->type & QEDE_FASTPATH_RX) {
1035 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
1036 if (!fp->rxq)
1037 goto err;
1038
1039 if (edev->xdp_prog) {
1040 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
1041 GFP_KERNEL);
1042 if (!fp->xdp_tx)
1043 goto err;
1044 fp->type |= QEDE_FASTPATH_XDP;
1045 }
1046 }
1047 }
1048
1049 return 0;
1050err:
1051 qede_free_fp_array(edev);
1052 return -ENOMEM;
1053}
1054
1055/* The qede lock is used to protect driver state change and driver flows that
1056 * are not reentrant.
1057 */
1058void __qede_lock(struct qede_dev *edev)
1059{
1060 mutex_lock(&edev->qede_lock);
1061}
1062
1063void __qede_unlock(struct qede_dev *edev)
1064{
1065 mutex_unlock(&edev->qede_lock);
1066}
1067
1068/* This version of the lock should be used when acquiring the RTNL lock is also
1069 * needed in addition to the internal qede lock.
1070 */
1071static void qede_lock(struct qede_dev *edev)
1072{
1073 rtnl_lock();
1074 __qede_lock(edev);
1075}
1076
1077static void qede_unlock(struct qede_dev *edev)
1078{
1079 __qede_unlock(edev);
1080 rtnl_unlock();
1081}
1082
1083static void qede_periodic_task(struct work_struct *work)
1084{
1085 struct qede_dev *edev = container_of(work, struct qede_dev,
1086 periodic_task.work);
1087
1088 qede_fill_by_demand_stats(edev);
1089 schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
1090}
1091
1092static void qede_init_periodic_task(struct qede_dev *edev)
1093{
1094 INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
1095 spin_lock_init(&edev->stats_lock);
1096 edev->stats_coal_usecs = USEC_PER_SEC;
1097 edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
1098}
1099
1100static void qede_sp_task(struct work_struct *work)
1101{
1102 struct qede_dev *edev = container_of(work, struct qede_dev,
1103 sp_task.work);
1104
1105 /* Disable execution of this deferred work once
1106 * qede removal is in progress, this stop any future
1107 * scheduling of sp_task.
1108 */
1109 if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1110 return;
1111
1112 /* The locking scheme depends on the specific flag:
1113 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1114 * ensure that ongoing flows are ended and new ones are not started.
1115 * In other cases - only the internal qede lock should be acquired.
1116 */
1117
1118 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1119 cancel_delayed_work_sync(&edev->periodic_task);
1120#ifdef CONFIG_QED_SRIOV
1121 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1122 * The recovery of the active VFs is currently not supported.
1123 */
1124 if (pci_num_vf(edev->pdev))
1125 qede_sriov_configure(edev->pdev, 0);
1126#endif
1127 qede_lock(edev);
1128 qede_recovery_handler(edev);
1129 qede_unlock(edev);
1130 }
1131
1132 __qede_lock(edev);
1133
1134 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1135 if (edev->state == QEDE_STATE_OPEN)
1136 qede_config_rx_mode(edev->ndev);
1137
1138#ifdef CONFIG_RFS_ACCEL
1139 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1140 if (edev->state == QEDE_STATE_OPEN)
1141 qede_process_arfs_filters(edev, false);
1142 }
1143#endif
1144 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1145 qede_generic_hw_err_handler(edev);
1146 __qede_unlock(edev);
1147
1148 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1149#ifdef CONFIG_QED_SRIOV
1150 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1151 * The recovery of the active VFs is currently not supported.
1152 */
1153 if (pci_num_vf(edev->pdev))
1154 qede_sriov_configure(edev->pdev, 0);
1155#endif
1156 edev->ops->common->recovery_process(edev->cdev);
1157 }
1158}
1159
1160static void qede_update_pf_params(struct qed_dev *cdev)
1161{
1162 struct qed_pf_params pf_params;
1163 u16 num_cons;
1164
1165 /* 64 rx + 64 tx + 64 XDP */
1166 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1167
1168 /* 1 rx + 1 xdp + max tx cos */
1169 num_cons = QED_MIN_L2_CONS;
1170
1171 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1172
1173 /* Same for VFs - make sure they'll have sufficient connections
1174 * to support XDP Tx queues.
1175 */
1176 pf_params.eth_pf_params.num_vf_cons = 48;
1177
1178 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1179 qed_ops->common->update_pf_params(cdev, &pf_params);
1180}
1181
1182#define QEDE_FW_VER_STR_SIZE 80
1183
1184static void qede_log_probe(struct qede_dev *edev)
1185{
1186 struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1187 u8 buf[QEDE_FW_VER_STR_SIZE];
1188 size_t left_size;
1189
1190 snprintf(buf, QEDE_FW_VER_STR_SIZE,
1191 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1192 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1193 p_dev_info->fw_eng,
1194 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1195 QED_MFW_VERSION_3_OFFSET,
1196 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1197 QED_MFW_VERSION_2_OFFSET,
1198 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1199 QED_MFW_VERSION_1_OFFSET,
1200 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1201 QED_MFW_VERSION_0_OFFSET);
1202
1203 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1204 if (p_dev_info->mbi_version && left_size)
1205 snprintf(buf + strlen(buf), left_size,
1206 " [MBI %d.%d.%d]",
1207 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1208 QED_MBI_VERSION_2_OFFSET,
1209 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1210 QED_MBI_VERSION_1_OFFSET,
1211 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1212 QED_MBI_VERSION_0_OFFSET);
1213
1214 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1215 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1216 buf, edev->ndev->name);
1217}
1218
1219enum qede_probe_mode {
1220 QEDE_PROBE_NORMAL,
1221 QEDE_PROBE_RECOVERY,
1222};
1223
1224static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1225 bool is_vf, enum qede_probe_mode mode)
1226{
1227 struct qed_probe_params probe_params;
1228 struct qed_slowpath_params sp_params;
1229 struct qed_dev_eth_info dev_info;
1230 struct qede_dev *edev;
1231 struct qed_dev *cdev;
1232 int rc;
1233
1234 if (unlikely(dp_level & QED_LEVEL_INFO))
1235 pr_notice("Starting qede probe\n");
1236
1237 memset(&probe_params, 0, sizeof(probe_params));
1238 probe_params.protocol = QED_PROTOCOL_ETH;
1239 probe_params.dp_module = dp_module;
1240 probe_params.dp_level = dp_level;
1241 probe_params.is_vf = is_vf;
1242 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1243 cdev = qed_ops->common->probe(pdev, &probe_params);
1244 if (!cdev) {
1245 rc = -ENODEV;
1246 goto err0;
1247 }
1248
1249 qede_update_pf_params(cdev);
1250
1251 /* Start the Slowpath-process */
1252 memset(&sp_params, 0, sizeof(sp_params));
1253 sp_params.int_mode = QED_INT_MODE_MSIX;
1254 strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1255 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1256 if (rc) {
1257 pr_notice("Cannot start slowpath\n");
1258 goto err1;
1259 }
1260
1261 /* Learn information crucial for qede to progress */
1262 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1263 if (rc)
1264 goto err2;
1265
1266 if (mode != QEDE_PROBE_RECOVERY) {
1267 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1268 dp_level);
1269 if (!edev) {
1270 rc = -ENOMEM;
1271 goto err2;
1272 }
1273
1274 edev->devlink = qed_ops->common->devlink_register(cdev);
1275 if (IS_ERR(edev->devlink)) {
1276 DP_NOTICE(edev, "Cannot register devlink\n");
1277 rc = PTR_ERR(edev->devlink);
1278 edev->devlink = NULL;
1279 goto err3;
1280 }
1281 } else {
1282 struct net_device *ndev = pci_get_drvdata(pdev);
1283 struct qed_devlink *qdl;
1284
1285 edev = netdev_priv(ndev);
1286 qdl = devlink_priv(edev->devlink);
1287 qdl->cdev = cdev;
1288 edev->cdev = cdev;
1289 memset(&edev->stats, 0, sizeof(edev->stats));
1290 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1291 }
1292
1293 if (is_vf)
1294 set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1295
1296 qede_init_ndev(edev);
1297
1298 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1299 if (rc)
1300 goto err3;
1301
1302 if (mode != QEDE_PROBE_RECOVERY) {
1303 /* Prepare the lock prior to the registration of the netdev,
1304 * as once it's registered we might reach flows requiring it
1305 * [it's even possible to reach a flow needing it directly
1306 * from there, although it's unlikely].
1307 */
1308 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1309 mutex_init(&edev->qede_lock);
1310 qede_init_periodic_task(edev);
1311
1312 rc = register_netdev(edev->ndev);
1313 if (rc) {
1314 DP_NOTICE(edev, "Cannot register net-device\n");
1315 goto err4;
1316 }
1317 }
1318
1319 edev->ops->common->set_name(cdev, edev->ndev->name);
1320
1321 /* PTP not supported on VFs */
1322 if (!is_vf)
1323 qede_ptp_enable(edev);
1324
1325 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1326
1327#ifdef CONFIG_DCB
1328 if (!IS_VF(edev))
1329 qede_set_dcbnl_ops(edev->ndev);
1330#endif
1331
1332 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1333
1334 qede_log_probe(edev);
1335
1336 /* retain user config (for example - after recovery) */
1337 if (edev->stats_coal_usecs)
1338 schedule_delayed_work(&edev->periodic_task, 0);
1339
1340 return 0;
1341
1342err4:
1343 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1344err3:
1345 if (mode != QEDE_PROBE_RECOVERY)
1346 free_netdev(edev->ndev);
1347 else
1348 edev->cdev = NULL;
1349err2:
1350 qed_ops->common->slowpath_stop(cdev);
1351err1:
1352 qed_ops->common->remove(cdev);
1353err0:
1354 return rc;
1355}
1356
1357static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1358{
1359 bool is_vf = false;
1360 u32 dp_module = 0;
1361 u8 dp_level = 0;
1362
1363 switch ((enum qede_pci_private)id->driver_data) {
1364 case QEDE_PRIVATE_VF:
1365 if (debug & QED_LOG_VERBOSE_MASK)
1366 dev_err(&pdev->dev, "Probing a VF\n");
1367 is_vf = true;
1368 break;
1369 default:
1370 if (debug & QED_LOG_VERBOSE_MASK)
1371 dev_err(&pdev->dev, "Probing a PF\n");
1372 }
1373
1374 qede_config_debug(debug, &dp_module, &dp_level);
1375
1376 return __qede_probe(pdev, dp_module, dp_level, is_vf,
1377 QEDE_PROBE_NORMAL);
1378}
1379
1380enum qede_remove_mode {
1381 QEDE_REMOVE_NORMAL,
1382 QEDE_REMOVE_RECOVERY,
1383};
1384
1385static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1386{
1387 struct net_device *ndev = pci_get_drvdata(pdev);
1388 struct qede_dev *edev;
1389 struct qed_dev *cdev;
1390
1391 if (!ndev) {
1392 dev_info(&pdev->dev, "Device has already been removed\n");
1393 return;
1394 }
1395
1396 edev = netdev_priv(ndev);
1397 cdev = edev->cdev;
1398
1399 DP_INFO(edev, "Starting qede_remove\n");
1400
1401 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1402
1403 if (mode != QEDE_REMOVE_RECOVERY) {
1404 set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1405 unregister_netdev(ndev);
1406
1407 cancel_delayed_work_sync(&edev->sp_task);
1408 cancel_delayed_work_sync(&edev->periodic_task);
1409
1410 edev->ops->common->set_power_state(cdev, PCI_D0);
1411
1412 pci_set_drvdata(pdev, NULL);
1413 }
1414
1415 qede_ptp_disable(edev);
1416
1417 /* Use global ops since we've freed edev */
1418 qed_ops->common->slowpath_stop(cdev);
1419 if (system_state == SYSTEM_POWER_OFF)
1420 return;
1421
1422 if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1423 qed_ops->common->devlink_unregister(edev->devlink);
1424 edev->devlink = NULL;
1425 }
1426 qed_ops->common->remove(cdev);
1427 edev->cdev = NULL;
1428
1429 /* Since this can happen out-of-sync with other flows,
1430 * don't release the netdevice until after slowpath stop
1431 * has been called to guarantee various other contexts
1432 * [e.g., QED register callbacks] won't break anything when
1433 * accessing the netdevice.
1434 */
1435 if (mode != QEDE_REMOVE_RECOVERY) {
1436 kfree(edev->coal_entry);
1437 free_netdev(ndev);
1438 }
1439
1440 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1441}
1442
1443static void qede_remove(struct pci_dev *pdev)
1444{
1445 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1446}
1447
1448static void qede_shutdown(struct pci_dev *pdev)
1449{
1450 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1451}
1452
1453/* -------------------------------------------------------------------------
1454 * START OF LOAD / UNLOAD
1455 * -------------------------------------------------------------------------
1456 */
1457
1458static int qede_set_num_queues(struct qede_dev *edev)
1459{
1460 int rc;
1461 u16 rss_num;
1462
1463 /* Setup queues according to possible resources*/
1464 if (edev->req_queues)
1465 rss_num = edev->req_queues;
1466 else
1467 rss_num = netif_get_num_default_rss_queues() *
1468 edev->dev_info.common.num_hwfns;
1469
1470 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1471
1472 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1473 if (rc > 0) {
1474 /* Managed to request interrupts for our queues */
1475 edev->num_queues = rc;
1476 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1477 QEDE_QUEUE_CNT(edev), rss_num);
1478 rc = 0;
1479 }
1480
1481 edev->fp_num_tx = edev->req_num_tx;
1482 edev->fp_num_rx = edev->req_num_rx;
1483
1484 return rc;
1485}
1486
1487static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1488 u16 sb_id)
1489{
1490 if (sb_info->sb_virt) {
1491 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1492 QED_SB_TYPE_L2_QUEUE);
1493 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1494 (void *)sb_info->sb_virt, sb_info->sb_phys);
1495 memset(sb_info, 0, sizeof(*sb_info));
1496 }
1497}
1498
1499/* This function allocates fast-path status block memory */
1500static int qede_alloc_mem_sb(struct qede_dev *edev,
1501 struct qed_sb_info *sb_info, u16 sb_id)
1502{
1503 struct status_block *sb_virt;
1504 dma_addr_t sb_phys;
1505 int rc;
1506
1507 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1508 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1509 if (!sb_virt) {
1510 DP_ERR(edev, "Status block allocation failed\n");
1511 return -ENOMEM;
1512 }
1513
1514 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1515 sb_virt, sb_phys, sb_id,
1516 QED_SB_TYPE_L2_QUEUE);
1517 if (rc) {
1518 DP_ERR(edev, "Status block initialization failed\n");
1519 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1520 sb_virt, sb_phys);
1521 return rc;
1522 }
1523
1524 return 0;
1525}
1526
1527static void qede_free_rx_buffers(struct qede_dev *edev,
1528 struct qede_rx_queue *rxq)
1529{
1530 u16 i;
1531
1532 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1533 struct sw_rx_data *rx_buf;
1534 struct page *data;
1535
1536 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1537 data = rx_buf->data;
1538
1539 dma_unmap_page(&edev->pdev->dev,
1540 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1541
1542 rx_buf->data = NULL;
1543 __free_page(data);
1544 }
1545}
1546
1547static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1548{
1549 /* Free rx buffers */
1550 qede_free_rx_buffers(edev, rxq);
1551
1552 /* Free the parallel SW ring */
1553 kfree(rxq->sw_rx_ring);
1554
1555 /* Free the real RQ ring used by FW */
1556 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1557 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1558}
1559
1560static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1561{
1562 int i;
1563
1564 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1565 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1566
1567 tpa_info->state = QEDE_AGG_STATE_NONE;
1568 }
1569}
1570
1571/* This function allocates all memory needed per Rx queue */
1572static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1573{
1574 struct qed_chain_init_params params = {
1575 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1576 .num_elems = RX_RING_SIZE,
1577 };
1578 struct qed_dev *cdev = edev->cdev;
1579 int i, rc, size;
1580
1581 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1582
1583 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1584
1585 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1586 size = rxq->rx_headroom +
1587 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1588
1589 /* Make sure that the headroom and payload fit in a single page */
1590 if (rxq->rx_buf_size + size > PAGE_SIZE)
1591 rxq->rx_buf_size = PAGE_SIZE - size;
1592
1593 /* Segment size to split a page in multiple equal parts,
1594 * unless XDP is used in which case we'd use the entire page.
1595 */
1596 if (!edev->xdp_prog) {
1597 size = size + rxq->rx_buf_size;
1598 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1599 } else {
1600 rxq->rx_buf_seg_size = PAGE_SIZE;
1601 edev->ndev->features &= ~NETIF_F_GRO_HW;
1602 }
1603
1604 /* Allocate the parallel driver ring for Rx buffers */
1605 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1606 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1607 if (!rxq->sw_rx_ring) {
1608 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1609 rc = -ENOMEM;
1610 goto err;
1611 }
1612
1613 /* Allocate FW Rx ring */
1614 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1615 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1616 params.elem_size = sizeof(struct eth_rx_bd);
1617
1618 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
1619 if (rc)
1620 goto err;
1621
1622 /* Allocate FW completion ring */
1623 params.mode = QED_CHAIN_MODE_PBL;
1624 params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1625 params.elem_size = sizeof(union eth_rx_cqe);
1626
1627 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
1628 if (rc)
1629 goto err;
1630
1631 /* Allocate buffers for the Rx ring */
1632 rxq->filled_buffers = 0;
1633 for (i = 0; i < rxq->num_rx_buffers; i++) {
1634 rc = qede_alloc_rx_buffer(rxq, false);
1635 if (rc) {
1636 DP_ERR(edev,
1637 "Rx buffers allocation failed at index %d\n", i);
1638 goto err;
1639 }
1640 }
1641
1642 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1643 if (!edev->gro_disable)
1644 qede_set_tpa_param(rxq);
1645err:
1646 return rc;
1647}
1648
1649static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1650{
1651 /* Free the parallel SW ring */
1652 if (txq->is_xdp)
1653 kfree(txq->sw_tx_ring.xdp);
1654 else
1655 kfree(txq->sw_tx_ring.skbs);
1656
1657 /* Free the real RQ ring used by FW */
1658 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1659}
1660
1661/* This function allocates all memory needed per Tx queue */
1662static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1663{
1664 struct qed_chain_init_params params = {
1665 .mode = QED_CHAIN_MODE_PBL,
1666 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1667 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1668 .num_elems = edev->q_num_tx_buffers,
1669 .elem_size = sizeof(union eth_tx_bd_types),
1670 };
1671 int size, rc;
1672
1673 txq->num_tx_buffers = edev->q_num_tx_buffers;
1674
1675 /* Allocate the parallel driver ring for Tx buffers */
1676 if (txq->is_xdp) {
1677 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1678 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1679 if (!txq->sw_tx_ring.xdp)
1680 goto err;
1681 } else {
1682 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1683 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1684 if (!txq->sw_tx_ring.skbs)
1685 goto err;
1686 }
1687
1688 rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
1689 if (rc)
1690 goto err;
1691
1692 return 0;
1693
1694err:
1695 qede_free_mem_txq(edev, txq);
1696 return -ENOMEM;
1697}
1698
1699/* This function frees all memory of a single fp */
1700static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1701{
1702 qede_free_mem_sb(edev, fp->sb_info, fp->id);
1703
1704 if (fp->type & QEDE_FASTPATH_RX)
1705 qede_free_mem_rxq(edev, fp->rxq);
1706
1707 if (fp->type & QEDE_FASTPATH_XDP)
1708 qede_free_mem_txq(edev, fp->xdp_tx);
1709
1710 if (fp->type & QEDE_FASTPATH_TX) {
1711 int cos;
1712
1713 for_each_cos_in_txq(edev, cos)
1714 qede_free_mem_txq(edev, &fp->txq[cos]);
1715 }
1716}
1717
1718/* This function allocates all memory needed for a single fp (i.e. an entity
1719 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1720 */
1721static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1722{
1723 int rc = 0;
1724
1725 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1726 if (rc)
1727 goto out;
1728
1729 if (fp->type & QEDE_FASTPATH_RX) {
1730 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1731 if (rc)
1732 goto out;
1733 }
1734
1735 if (fp->type & QEDE_FASTPATH_XDP) {
1736 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1737 if (rc)
1738 goto out;
1739 }
1740
1741 if (fp->type & QEDE_FASTPATH_TX) {
1742 int cos;
1743
1744 for_each_cos_in_txq(edev, cos) {
1745 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1746 if (rc)
1747 goto out;
1748 }
1749 }
1750
1751out:
1752 return rc;
1753}
1754
1755static void qede_free_mem_load(struct qede_dev *edev)
1756{
1757 int i;
1758
1759 for_each_queue(i) {
1760 struct qede_fastpath *fp = &edev->fp_array[i];
1761
1762 qede_free_mem_fp(edev, fp);
1763 }
1764}
1765
1766/* This function allocates all qede memory at NIC load. */
1767static int qede_alloc_mem_load(struct qede_dev *edev)
1768{
1769 int rc = 0, queue_id;
1770
1771 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1772 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1773
1774 rc = qede_alloc_mem_fp(edev, fp);
1775 if (rc) {
1776 DP_ERR(edev,
1777 "Failed to allocate memory for fastpath - rss id = %d\n",
1778 queue_id);
1779 qede_free_mem_load(edev);
1780 return rc;
1781 }
1782 }
1783
1784 return 0;
1785}
1786
1787static void qede_empty_tx_queue(struct qede_dev *edev,
1788 struct qede_tx_queue *txq)
1789{
1790 unsigned int pkts_compl = 0, bytes_compl = 0;
1791 struct netdev_queue *netdev_txq;
1792 int rc, len = 0;
1793
1794 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1795
1796 while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1797 qed_chain_get_prod_idx(&txq->tx_pbl)) {
1798 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1799 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1800 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1801 qed_chain_get_prod_idx(&txq->tx_pbl));
1802
1803 rc = qede_free_tx_pkt(edev, txq, &len);
1804 if (rc) {
1805 DP_NOTICE(edev,
1806 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1807 txq->index,
1808 qed_chain_get_cons_idx(&txq->tx_pbl),
1809 qed_chain_get_prod_idx(&txq->tx_pbl));
1810 break;
1811 }
1812
1813 bytes_compl += len;
1814 pkts_compl++;
1815 txq->sw_tx_cons++;
1816 }
1817
1818 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1819}
1820
1821static void qede_empty_tx_queues(struct qede_dev *edev)
1822{
1823 int i;
1824
1825 for_each_queue(i)
1826 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1827 int cos;
1828
1829 for_each_cos_in_txq(edev, cos) {
1830 struct qede_fastpath *fp;
1831
1832 fp = &edev->fp_array[i];
1833 qede_empty_tx_queue(edev,
1834 &fp->txq[cos]);
1835 }
1836 }
1837}
1838
1839/* This function inits fp content and resets the SB, RXQ and TXQ structures */
1840static void qede_init_fp(struct qede_dev *edev)
1841{
1842 int queue_id, rxq_index = 0, txq_index = 0;
1843 struct qede_fastpath *fp;
1844 bool init_xdp = false;
1845
1846 for_each_queue(queue_id) {
1847 fp = &edev->fp_array[queue_id];
1848
1849 fp->edev = edev;
1850 fp->id = queue_id;
1851
1852 if (fp->type & QEDE_FASTPATH_XDP) {
1853 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1854 rxq_index);
1855 fp->xdp_tx->is_xdp = 1;
1856
1857 spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1858 init_xdp = true;
1859 }
1860
1861 if (fp->type & QEDE_FASTPATH_RX) {
1862 fp->rxq->rxq_id = rxq_index++;
1863
1864 /* Determine how to map buffers for this queue */
1865 if (fp->type & QEDE_FASTPATH_XDP)
1866 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1867 else
1868 fp->rxq->data_direction = DMA_FROM_DEVICE;
1869 fp->rxq->dev = &edev->pdev->dev;
1870
1871 /* Driver have no error path from here */
1872 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1873 fp->rxq->rxq_id, 0) < 0);
1874
1875 if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1876 MEM_TYPE_PAGE_ORDER0,
1877 NULL)) {
1878 DP_NOTICE(edev,
1879 "Failed to register XDP memory model\n");
1880 }
1881 }
1882
1883 if (fp->type & QEDE_FASTPATH_TX) {
1884 int cos;
1885
1886 for_each_cos_in_txq(edev, cos) {
1887 struct qede_tx_queue *txq = &fp->txq[cos];
1888 u16 ndev_tx_id;
1889
1890 txq->cos = cos;
1891 txq->index = txq_index;
1892 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1893 txq->ndev_txq_id = ndev_tx_id;
1894
1895 if (edev->dev_info.is_legacy)
1896 txq->is_legacy = true;
1897 txq->dev = &edev->pdev->dev;
1898 }
1899
1900 txq_index++;
1901 }
1902
1903 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1904 edev->ndev->name, queue_id);
1905 }
1906
1907 if (init_xdp) {
1908 edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1909 DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1910 }
1911}
1912
1913static int qede_set_real_num_queues(struct qede_dev *edev)
1914{
1915 int rc = 0;
1916
1917 rc = netif_set_real_num_tx_queues(edev->ndev,
1918 QEDE_TSS_COUNT(edev) *
1919 edev->dev_info.num_tc);
1920 if (rc) {
1921 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1922 return rc;
1923 }
1924
1925 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1926 if (rc) {
1927 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1928 return rc;
1929 }
1930
1931 return 0;
1932}
1933
1934static void qede_napi_disable_remove(struct qede_dev *edev)
1935{
1936 int i;
1937
1938 for_each_queue(i) {
1939 napi_disable(&edev->fp_array[i].napi);
1940
1941 netif_napi_del(&edev->fp_array[i].napi);
1942 }
1943}
1944
1945static void qede_napi_add_enable(struct qede_dev *edev)
1946{
1947 int i;
1948
1949 /* Add NAPI objects */
1950 for_each_queue(i) {
1951 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
1952 napi_enable(&edev->fp_array[i].napi);
1953 }
1954}
1955
1956static void qede_sync_free_irqs(struct qede_dev *edev)
1957{
1958 int i;
1959
1960 for (i = 0; i < edev->int_info.used_cnt; i++) {
1961 if (edev->int_info.msix_cnt) {
1962 free_irq(edev->int_info.msix[i].vector,
1963 &edev->fp_array[i]);
1964 } else {
1965 edev->ops->common->simd_handler_clean(edev->cdev, i);
1966 }
1967 }
1968
1969 edev->int_info.used_cnt = 0;
1970 edev->int_info.msix_cnt = 0;
1971}
1972
1973static int qede_req_msix_irqs(struct qede_dev *edev)
1974{
1975 int i, rc;
1976
1977 /* Sanitize number of interrupts == number of prepared RSS queues */
1978 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1979 DP_ERR(edev,
1980 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1981 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1982 return -EINVAL;
1983 }
1984
1985 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1986#ifdef CONFIG_RFS_ACCEL
1987 struct qede_fastpath *fp = &edev->fp_array[i];
1988
1989 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1990 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1991 edev->int_info.msix[i].vector);
1992 if (rc) {
1993 DP_ERR(edev, "Failed to add CPU rmap\n");
1994 qede_free_arfs(edev);
1995 }
1996 }
1997#endif
1998 rc = request_irq(edev->int_info.msix[i].vector,
1999 qede_msix_fp_int, 0, edev->fp_array[i].name,
2000 &edev->fp_array[i]);
2001 if (rc) {
2002 DP_ERR(edev, "Request fp %d irq failed\n", i);
2003#ifdef CONFIG_RFS_ACCEL
2004 if (edev->ndev->rx_cpu_rmap)
2005 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2006
2007 edev->ndev->rx_cpu_rmap = NULL;
2008#endif
2009 qede_sync_free_irqs(edev);
2010 return rc;
2011 }
2012 DP_VERBOSE(edev, NETIF_MSG_INTR,
2013 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
2014 edev->fp_array[i].name, i,
2015 &edev->fp_array[i]);
2016 edev->int_info.used_cnt++;
2017 }
2018
2019 return 0;
2020}
2021
2022static void qede_simd_fp_handler(void *cookie)
2023{
2024 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
2025
2026 napi_schedule_irqoff(&fp->napi);
2027}
2028
2029static int qede_setup_irqs(struct qede_dev *edev)
2030{
2031 int i, rc = 0;
2032
2033 /* Learn Interrupt configuration */
2034 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
2035 if (rc)
2036 return rc;
2037
2038 if (edev->int_info.msix_cnt) {
2039 rc = qede_req_msix_irqs(edev);
2040 if (rc)
2041 return rc;
2042 edev->ndev->irq = edev->int_info.msix[0].vector;
2043 } else {
2044 const struct qed_common_ops *ops;
2045
2046 /* qed should learn receive the RSS ids and callbacks */
2047 ops = edev->ops->common;
2048 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
2049 ops->simd_handler_config(edev->cdev,
2050 &edev->fp_array[i], i,
2051 qede_simd_fp_handler);
2052 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
2053 }
2054 return 0;
2055}
2056
2057static int qede_drain_txq(struct qede_dev *edev,
2058 struct qede_tx_queue *txq, bool allow_drain)
2059{
2060 int rc, cnt = 1000;
2061
2062 while (txq->sw_tx_cons != txq->sw_tx_prod) {
2063 if (!cnt) {
2064 if (allow_drain) {
2065 DP_NOTICE(edev,
2066 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2067 txq->index);
2068 rc = edev->ops->common->drain(edev->cdev);
2069 if (rc)
2070 return rc;
2071 return qede_drain_txq(edev, txq, false);
2072 }
2073 DP_NOTICE(edev,
2074 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2075 txq->index, txq->sw_tx_prod,
2076 txq->sw_tx_cons);
2077 return -ENODEV;
2078 }
2079 cnt--;
2080 usleep_range(1000, 2000);
2081 barrier();
2082 }
2083
2084 /* FW finished processing, wait for HW to transmit all tx packets */
2085 usleep_range(1000, 2000);
2086
2087 return 0;
2088}
2089
2090static int qede_stop_txq(struct qede_dev *edev,
2091 struct qede_tx_queue *txq, int rss_id)
2092{
2093 /* delete doorbell from doorbell recovery mechanism */
2094 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2095 &txq->tx_db);
2096
2097 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2098}
2099
2100static int qede_stop_queues(struct qede_dev *edev)
2101{
2102 struct qed_update_vport_params *vport_update_params;
2103 struct qed_dev *cdev = edev->cdev;
2104 struct qede_fastpath *fp;
2105 int rc, i;
2106
2107 /* Disable the vport */
2108 vport_update_params = vzalloc(sizeof(*vport_update_params));
2109 if (!vport_update_params)
2110 return -ENOMEM;
2111
2112 vport_update_params->vport_id = 0;
2113 vport_update_params->update_vport_active_flg = 1;
2114 vport_update_params->vport_active_flg = 0;
2115 vport_update_params->update_rss_flg = 0;
2116
2117 rc = edev->ops->vport_update(cdev, vport_update_params);
2118 vfree(vport_update_params);
2119
2120 if (rc) {
2121 DP_ERR(edev, "Failed to update vport\n");
2122 return rc;
2123 }
2124
2125 /* Flush Tx queues. If needed, request drain from MCP */
2126 for_each_queue(i) {
2127 fp = &edev->fp_array[i];
2128
2129 if (fp->type & QEDE_FASTPATH_TX) {
2130 int cos;
2131
2132 for_each_cos_in_txq(edev, cos) {
2133 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2134 if (rc)
2135 return rc;
2136 }
2137 }
2138
2139 if (fp->type & QEDE_FASTPATH_XDP) {
2140 rc = qede_drain_txq(edev, fp->xdp_tx, true);
2141 if (rc)
2142 return rc;
2143 }
2144 }
2145
2146 /* Stop all Queues in reverse order */
2147 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2148 fp = &edev->fp_array[i];
2149
2150 /* Stop the Tx Queue(s) */
2151 if (fp->type & QEDE_FASTPATH_TX) {
2152 int cos;
2153
2154 for_each_cos_in_txq(edev, cos) {
2155 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2156 if (rc)
2157 return rc;
2158 }
2159 }
2160
2161 /* Stop the Rx Queue */
2162 if (fp->type & QEDE_FASTPATH_RX) {
2163 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2164 if (rc) {
2165 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2166 return rc;
2167 }
2168 }
2169
2170 /* Stop the XDP forwarding queue */
2171 if (fp->type & QEDE_FASTPATH_XDP) {
2172 rc = qede_stop_txq(edev, fp->xdp_tx, i);
2173 if (rc)
2174 return rc;
2175
2176 bpf_prog_put(fp->rxq->xdp_prog);
2177 }
2178 }
2179
2180 /* Stop the vport */
2181 rc = edev->ops->vport_stop(cdev, 0);
2182 if (rc)
2183 DP_ERR(edev, "Failed to stop VPORT\n");
2184
2185 return rc;
2186}
2187
2188static int qede_start_txq(struct qede_dev *edev,
2189 struct qede_fastpath *fp,
2190 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2191{
2192 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2193 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2194 struct qed_queue_start_common_params params;
2195 struct qed_txq_start_ret_params ret_params;
2196 int rc;
2197
2198 memset(¶ms, 0, sizeof(params));
2199 memset(&ret_params, 0, sizeof(ret_params));
2200
2201 /* Let the XDP queue share the queue-zone with one of the regular txq.
2202 * We don't really care about its coalescing.
2203 */
2204 if (txq->is_xdp)
2205 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2206 else
2207 params.queue_id = txq->index;
2208
2209 params.p_sb = fp->sb_info;
2210 params.sb_idx = sb_idx;
2211 params.tc = txq->cos;
2212
2213 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
2214 page_cnt, &ret_params);
2215 if (rc) {
2216 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2217 return rc;
2218 }
2219
2220 txq->doorbell_addr = ret_params.p_doorbell;
2221 txq->handle = ret_params.p_handle;
2222
2223 /* Determine the FW consumer address associated */
2224 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2225
2226 /* Prepare the doorbell parameters */
2227 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2228 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2229 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2230 DQ_XCM_ETH_TX_BD_PROD_CMD);
2231 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2232
2233 /* register doorbell with doorbell recovery mechanism */
2234 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2235 &txq->tx_db, DB_REC_WIDTH_32B,
2236 DB_REC_KERNEL);
2237
2238 return rc;
2239}
2240
2241static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2242{
2243 int vlan_removal_en = 1;
2244 struct qed_dev *cdev = edev->cdev;
2245 struct qed_dev_info *qed_info = &edev->dev_info.common;
2246 struct qed_update_vport_params *vport_update_params;
2247 struct qed_queue_start_common_params q_params;
2248 struct qed_start_vport_params start = {0};
2249 int rc, i;
2250
2251 if (!edev->num_queues) {
2252 DP_ERR(edev,
2253 "Cannot update V-VPORT as active as there are no Rx queues\n");
2254 return -EINVAL;
2255 }
2256
2257 vport_update_params = vzalloc(sizeof(*vport_update_params));
2258 if (!vport_update_params)
2259 return -ENOMEM;
2260
2261 start.handle_ptp_pkts = !!(edev->ptp);
2262 start.gro_enable = !edev->gro_disable;
2263 start.mtu = edev->ndev->mtu;
2264 start.vport_id = 0;
2265 start.drop_ttl0 = true;
2266 start.remove_inner_vlan = vlan_removal_en;
2267 start.clear_stats = clear_stats;
2268
2269 rc = edev->ops->vport_start(cdev, &start);
2270
2271 if (rc) {
2272 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2273 goto out;
2274 }
2275
2276 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2277 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2278 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2279
2280 for_each_queue(i) {
2281 struct qede_fastpath *fp = &edev->fp_array[i];
2282 dma_addr_t p_phys_table;
2283 u32 page_cnt;
2284
2285 if (fp->type & QEDE_FASTPATH_RX) {
2286 struct qed_rxq_start_ret_params ret_params;
2287 struct qede_rx_queue *rxq = fp->rxq;
2288 __le16 *val;
2289
2290 memset(&ret_params, 0, sizeof(ret_params));
2291 memset(&q_params, 0, sizeof(q_params));
2292 q_params.queue_id = rxq->rxq_id;
2293 q_params.vport_id = 0;
2294 q_params.p_sb = fp->sb_info;
2295 q_params.sb_idx = RX_PI;
2296
2297 p_phys_table =
2298 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2299 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2300
2301 rc = edev->ops->q_rx_start(cdev, i, &q_params,
2302 rxq->rx_buf_size,
2303 rxq->rx_bd_ring.p_phys_addr,
2304 p_phys_table,
2305 page_cnt, &ret_params);
2306 if (rc) {
2307 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2308 rc);
2309 goto out;
2310 }
2311
2312 /* Use the return parameters */
2313 rxq->hw_rxq_prod_addr = ret_params.p_prod;
2314 rxq->handle = ret_params.p_handle;
2315
2316 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2317 rxq->hw_cons_ptr = val;
2318
2319 qede_update_rx_prod(edev, rxq);
2320 }
2321
2322 if (fp->type & QEDE_FASTPATH_XDP) {
2323 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2324 if (rc)
2325 goto out;
2326
2327 bpf_prog_add(edev->xdp_prog, 1);
2328 fp->rxq->xdp_prog = edev->xdp_prog;
2329 }
2330
2331 if (fp->type & QEDE_FASTPATH_TX) {
2332 int cos;
2333
2334 for_each_cos_in_txq(edev, cos) {
2335 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2336 TX_PI(cos));
2337 if (rc)
2338 goto out;
2339 }
2340 }
2341 }
2342
2343 /* Prepare and send the vport enable */
2344 vport_update_params->vport_id = start.vport_id;
2345 vport_update_params->update_vport_active_flg = 1;
2346 vport_update_params->vport_active_flg = 1;
2347
2348 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2349 qed_info->tx_switching) {
2350 vport_update_params->update_tx_switching_flg = 1;
2351 vport_update_params->tx_switching_flg = 1;
2352 }
2353
2354 qede_fill_rss_params(edev, &vport_update_params->rss_params,
2355 &vport_update_params->update_rss_flg);
2356
2357 rc = edev->ops->vport_update(cdev, vport_update_params);
2358 if (rc)
2359 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2360
2361out:
2362 vfree(vport_update_params);
2363 return rc;
2364}
2365
2366enum qede_unload_mode {
2367 QEDE_UNLOAD_NORMAL,
2368 QEDE_UNLOAD_RECOVERY,
2369};
2370
2371static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2372 bool is_locked)
2373{
2374 struct qed_link_params link_params;
2375 int rc;
2376
2377 DP_INFO(edev, "Starting qede unload\n");
2378
2379 if (!is_locked)
2380 __qede_lock(edev);
2381
2382 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2383
2384 if (mode != QEDE_UNLOAD_RECOVERY)
2385 edev->state = QEDE_STATE_CLOSED;
2386
2387 qede_rdma_dev_event_close(edev);
2388
2389 /* Close OS Tx */
2390 netif_tx_disable(edev->ndev);
2391 netif_carrier_off(edev->ndev);
2392
2393 if (mode != QEDE_UNLOAD_RECOVERY) {
2394 /* Reset the link */
2395 memset(&link_params, 0, sizeof(link_params));
2396 link_params.link_up = false;
2397 edev->ops->common->set_link(edev->cdev, &link_params);
2398
2399 rc = qede_stop_queues(edev);
2400 if (rc) {
2401#ifdef CONFIG_RFS_ACCEL
2402 if (edev->dev_info.common.b_arfs_capable) {
2403 qede_poll_for_freeing_arfs_filters(edev);
2404 if (edev->ndev->rx_cpu_rmap)
2405 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2406
2407 edev->ndev->rx_cpu_rmap = NULL;
2408 }
2409#endif
2410 qede_sync_free_irqs(edev);
2411 goto out;
2412 }
2413
2414 DP_INFO(edev, "Stopped Queues\n");
2415 }
2416
2417 qede_vlan_mark_nonconfigured(edev);
2418 edev->ops->fastpath_stop(edev->cdev);
2419
2420 if (edev->dev_info.common.b_arfs_capable) {
2421 qede_poll_for_freeing_arfs_filters(edev);
2422 qede_free_arfs(edev);
2423 }
2424
2425 /* Release the interrupts */
2426 qede_sync_free_irqs(edev);
2427 edev->ops->common->set_fp_int(edev->cdev, 0);
2428
2429 qede_napi_disable_remove(edev);
2430
2431 if (mode == QEDE_UNLOAD_RECOVERY)
2432 qede_empty_tx_queues(edev);
2433
2434 qede_free_mem_load(edev);
2435 qede_free_fp_array(edev);
2436
2437out:
2438 if (!is_locked)
2439 __qede_unlock(edev);
2440
2441 if (mode != QEDE_UNLOAD_RECOVERY)
2442 DP_NOTICE(edev, "Link is down\n");
2443
2444 edev->ptp_skip_txts = 0;
2445
2446 DP_INFO(edev, "Ending qede unload\n");
2447}
2448
2449enum qede_load_mode {
2450 QEDE_LOAD_NORMAL,
2451 QEDE_LOAD_RELOAD,
2452 QEDE_LOAD_RECOVERY,
2453};
2454
2455static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2456 bool is_locked)
2457{
2458 struct qed_link_params link_params;
2459 struct ethtool_coalesce coal = {};
2460 u8 num_tc;
2461 int rc, i;
2462
2463 DP_INFO(edev, "Starting qede load\n");
2464
2465 if (!is_locked)
2466 __qede_lock(edev);
2467
2468 rc = qede_set_num_queues(edev);
2469 if (rc)
2470 goto out;
2471
2472 rc = qede_alloc_fp_array(edev);
2473 if (rc)
2474 goto out;
2475
2476 qede_init_fp(edev);
2477
2478 rc = qede_alloc_mem_load(edev);
2479 if (rc)
2480 goto err1;
2481 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2482 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2483
2484 rc = qede_set_real_num_queues(edev);
2485 if (rc)
2486 goto err2;
2487
2488 if (qede_alloc_arfs(edev)) {
2489 edev->ndev->features &= ~NETIF_F_NTUPLE;
2490 edev->dev_info.common.b_arfs_capable = false;
2491 }
2492
2493 qede_napi_add_enable(edev);
2494 DP_INFO(edev, "Napi added and enabled\n");
2495
2496 rc = qede_setup_irqs(edev);
2497 if (rc)
2498 goto err3;
2499 DP_INFO(edev, "Setup IRQs succeeded\n");
2500
2501 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2502 if (rc)
2503 goto err4;
2504 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2505
2506 num_tc = netdev_get_num_tc(edev->ndev);
2507 num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2508 qede_setup_tc(edev->ndev, num_tc);
2509
2510 /* Program un-configured VLANs */
2511 qede_configure_vlan_filters(edev);
2512
2513 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2514
2515 /* Ask for link-up using current configuration */
2516 memset(&link_params, 0, sizeof(link_params));
2517 link_params.link_up = true;
2518 edev->ops->common->set_link(edev->cdev, &link_params);
2519
2520 edev->state = QEDE_STATE_OPEN;
2521
2522 coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2523 coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2524
2525 for_each_queue(i) {
2526 if (edev->coal_entry[i].isvalid) {
2527 coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2528 coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2529 }
2530 __qede_unlock(edev);
2531 qede_set_per_coalesce(edev->ndev, i, &coal);
2532 __qede_lock(edev);
2533 }
2534 DP_INFO(edev, "Ending successfully qede load\n");
2535
2536 goto out;
2537err4:
2538 qede_sync_free_irqs(edev);
2539err3:
2540 qede_napi_disable_remove(edev);
2541err2:
2542 qede_free_mem_load(edev);
2543err1:
2544 edev->ops->common->set_fp_int(edev->cdev, 0);
2545 qede_free_fp_array(edev);
2546 edev->num_queues = 0;
2547 edev->fp_num_tx = 0;
2548 edev->fp_num_rx = 0;
2549out:
2550 if (!is_locked)
2551 __qede_unlock(edev);
2552
2553 return rc;
2554}
2555
2556/* 'func' should be able to run between unload and reload assuming interface
2557 * is actually running, or afterwards in case it's currently DOWN.
2558 */
2559void qede_reload(struct qede_dev *edev,
2560 struct qede_reload_args *args, bool is_locked)
2561{
2562 if (!is_locked)
2563 __qede_lock(edev);
2564
2565 /* Since qede_lock is held, internal state wouldn't change even
2566 * if netdev state would start transitioning. Check whether current
2567 * internal configuration indicates device is up, then reload.
2568 */
2569 if (edev->state == QEDE_STATE_OPEN) {
2570 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2571 if (args)
2572 args->func(edev, args);
2573 qede_load(edev, QEDE_LOAD_RELOAD, true);
2574
2575 /* Since no one is going to do it for us, re-configure */
2576 qede_config_rx_mode(edev->ndev);
2577 } else if (args) {
2578 args->func(edev, args);
2579 }
2580
2581 if (!is_locked)
2582 __qede_unlock(edev);
2583}
2584
2585/* called with rtnl_lock */
2586static int qede_open(struct net_device *ndev)
2587{
2588 struct qede_dev *edev = netdev_priv(ndev);
2589 int rc;
2590
2591 netif_carrier_off(ndev);
2592
2593 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2594
2595 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2596 if (rc)
2597 return rc;
2598
2599 udp_tunnel_nic_reset_ntf(ndev);
2600
2601 edev->ops->common->update_drv_state(edev->cdev, true);
2602
2603 return 0;
2604}
2605
2606static int qede_close(struct net_device *ndev)
2607{
2608 struct qede_dev *edev = netdev_priv(ndev);
2609
2610 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2611
2612 if (edev->cdev)
2613 edev->ops->common->update_drv_state(edev->cdev, false);
2614
2615 return 0;
2616}
2617
2618static void qede_link_update(void *dev, struct qed_link_output *link)
2619{
2620 struct qede_dev *edev = dev;
2621
2622 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2623 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2624 return;
2625 }
2626
2627 if (link->link_up) {
2628 if (!netif_carrier_ok(edev->ndev)) {
2629 DP_NOTICE(edev, "Link is up\n");
2630 netif_tx_start_all_queues(edev->ndev);
2631 netif_carrier_on(edev->ndev);
2632 qede_rdma_dev_event_open(edev);
2633 }
2634 } else {
2635 if (netif_carrier_ok(edev->ndev)) {
2636 DP_NOTICE(edev, "Link is down\n");
2637 netif_tx_disable(edev->ndev);
2638 netif_carrier_off(edev->ndev);
2639 qede_rdma_dev_event_close(edev);
2640 }
2641 }
2642}
2643
2644static void qede_schedule_recovery_handler(void *dev)
2645{
2646 struct qede_dev *edev = dev;
2647
2648 if (edev->state == QEDE_STATE_RECOVERY) {
2649 DP_NOTICE(edev,
2650 "Avoid scheduling a recovery handling since already in recovery state\n");
2651 return;
2652 }
2653
2654 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2655 schedule_delayed_work(&edev->sp_task, 0);
2656
2657 DP_INFO(edev, "Scheduled a recovery handler\n");
2658}
2659
2660static void qede_recovery_failed(struct qede_dev *edev)
2661{
2662 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2663
2664 netif_device_detach(edev->ndev);
2665
2666 if (edev->cdev)
2667 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2668}
2669
2670static void qede_recovery_handler(struct qede_dev *edev)
2671{
2672 u32 curr_state = edev->state;
2673 int rc;
2674
2675 DP_NOTICE(edev, "Starting a recovery process\n");
2676
2677 /* No need to acquire first the qede_lock since is done by qede_sp_task
2678 * before calling this function.
2679 */
2680 edev->state = QEDE_STATE_RECOVERY;
2681
2682 edev->ops->common->recovery_prolog(edev->cdev);
2683
2684 if (curr_state == QEDE_STATE_OPEN)
2685 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2686
2687 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2688
2689 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2690 IS_VF(edev), QEDE_PROBE_RECOVERY);
2691 if (rc) {
2692 edev->cdev = NULL;
2693 goto err;
2694 }
2695
2696 if (curr_state == QEDE_STATE_OPEN) {
2697 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2698 if (rc)
2699 goto err;
2700
2701 qede_config_rx_mode(edev->ndev);
2702 udp_tunnel_nic_reset_ntf(edev->ndev);
2703 }
2704
2705 edev->state = curr_state;
2706
2707 DP_NOTICE(edev, "Recovery handling is done\n");
2708
2709 return;
2710
2711err:
2712 qede_recovery_failed(edev);
2713}
2714
2715static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2716{
2717 struct qed_dev *cdev = edev->cdev;
2718
2719 DP_NOTICE(edev,
2720 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2721 edev->err_flags);
2722
2723 /* Get a call trace of the flow that led to the error */
2724 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2725
2726 /* Prevent HW attentions from being reasserted */
2727 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2728 edev->ops->common->attn_clr_enable(cdev, true);
2729
2730 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2731}
2732
2733static void qede_generic_hw_err_handler(struct qede_dev *edev)
2734{
2735 DP_NOTICE(edev,
2736 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2737 edev->err_flags);
2738
2739 if (edev->devlink) {
2740 DP_NOTICE(edev, "Reporting fatal error to devlink\n");
2741 edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2742 }
2743
2744 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2745
2746 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2747}
2748
2749static void qede_set_hw_err_flags(struct qede_dev *edev,
2750 enum qed_hw_err_type err_type)
2751{
2752 unsigned long err_flags = 0;
2753
2754 switch (err_type) {
2755 case QED_HW_ERR_DMAE_FAIL:
2756 set_bit(QEDE_ERR_WARN, &err_flags);
2757 fallthrough;
2758 case QED_HW_ERR_MFW_RESP_FAIL:
2759 case QED_HW_ERR_HW_ATTN:
2760 case QED_HW_ERR_RAMROD_FAIL:
2761 case QED_HW_ERR_FW_ASSERT:
2762 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2763 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2764 /* make this error as recoverable and start recovery*/
2765 set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2766 break;
2767
2768 default:
2769 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2770 break;
2771 }
2772
2773 edev->err_flags |= err_flags;
2774}
2775
2776static void qede_schedule_hw_err_handler(void *dev,
2777 enum qed_hw_err_type err_type)
2778{
2779 struct qede_dev *edev = dev;
2780
2781 /* Fan failure cannot be masked by handling of another HW error or by a
2782 * concurrent recovery process.
2783 */
2784 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2785 edev->state == QEDE_STATE_RECOVERY) &&
2786 err_type != QED_HW_ERR_FAN_FAIL) {
2787 DP_INFO(edev,
2788 "Avoid scheduling an error handling while another HW error is being handled\n");
2789 return;
2790 }
2791
2792 if (err_type >= QED_HW_ERR_LAST) {
2793 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2794 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2795 return;
2796 }
2797
2798 edev->last_err_type = err_type;
2799 qede_set_hw_err_flags(edev, err_type);
2800 qede_atomic_hw_err_handler(edev);
2801 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2802 schedule_delayed_work(&edev->sp_task, 0);
2803
2804 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2805}
2806
2807static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2808{
2809 struct netdev_queue *netdev_txq;
2810
2811 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2812 if (netif_xmit_stopped(netdev_txq))
2813 return true;
2814
2815 return false;
2816}
2817
2818static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2819{
2820 struct qede_dev *edev = dev;
2821 struct netdev_hw_addr *ha;
2822 int i;
2823
2824 if (edev->ndev->features & NETIF_F_IP_CSUM)
2825 data->feat_flags |= QED_TLV_IP_CSUM;
2826 if (edev->ndev->features & NETIF_F_TSO)
2827 data->feat_flags |= QED_TLV_LSO;
2828
2829 ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2830 eth_zero_addr(data->mac[1]);
2831 eth_zero_addr(data->mac[2]);
2832 /* Copy the first two UC macs */
2833 netif_addr_lock_bh(edev->ndev);
2834 i = 1;
2835 netdev_for_each_uc_addr(ha, edev->ndev) {
2836 ether_addr_copy(data->mac[i++], ha->addr);
2837 if (i == QED_TLV_MAC_COUNT)
2838 break;
2839 }
2840
2841 netif_addr_unlock_bh(edev->ndev);
2842}
2843
2844static void qede_get_eth_tlv_data(void *dev, void *data)
2845{
2846 struct qed_mfw_tlv_eth *etlv = data;
2847 struct qede_dev *edev = dev;
2848 struct qede_fastpath *fp;
2849 int i;
2850
2851 etlv->lso_maxoff_size = 0XFFFF;
2852 etlv->lso_maxoff_size_set = true;
2853 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2854 etlv->lso_minseg_size_set = true;
2855 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2856 etlv->prom_mode_set = true;
2857 etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2858 etlv->tx_descr_size_set = true;
2859 etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2860 etlv->rx_descr_size_set = true;
2861 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2862 etlv->iov_offload_set = true;
2863
2864 /* Fill information regarding queues; Should be done under the qede
2865 * lock to guarantee those don't change beneath our feet.
2866 */
2867 etlv->txqs_empty = true;
2868 etlv->rxqs_empty = true;
2869 etlv->num_txqs_full = 0;
2870 etlv->num_rxqs_full = 0;
2871
2872 __qede_lock(edev);
2873 for_each_queue(i) {
2874 fp = &edev->fp_array[i];
2875 if (fp->type & QEDE_FASTPATH_TX) {
2876 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2877
2878 if (txq->sw_tx_cons != txq->sw_tx_prod)
2879 etlv->txqs_empty = false;
2880 if (qede_is_txq_full(edev, txq))
2881 etlv->num_txqs_full++;
2882 }
2883 if (fp->type & QEDE_FASTPATH_RX) {
2884 if (qede_has_rx_work(fp->rxq))
2885 etlv->rxqs_empty = false;
2886
2887 /* This one is a bit tricky; Firmware might stop
2888 * placing packets if ring is not yet full.
2889 * Give an approximation.
2890 */
2891 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2892 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2893 RX_RING_SIZE - 100)
2894 etlv->num_rxqs_full++;
2895 }
2896 }
2897 __qede_unlock(edev);
2898
2899 etlv->txqs_empty_set = true;
2900 etlv->rxqs_empty_set = true;
2901 etlv->num_txqs_full_set = true;
2902 etlv->num_rxqs_full_set = true;
2903}
2904
2905/**
2906 * qede_io_error_detected(): Called when PCI error is detected
2907 *
2908 * @pdev: Pointer to PCI device
2909 * @state: The current pci connection state
2910 *
2911 *Return: pci_ers_result_t.
2912 *
2913 * This function is called after a PCI bus error affecting
2914 * this device has been detected.
2915 */
2916static pci_ers_result_t
2917qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2918{
2919 struct net_device *dev = pci_get_drvdata(pdev);
2920 struct qede_dev *edev = netdev_priv(dev);
2921
2922 if (!edev)
2923 return PCI_ERS_RESULT_NONE;
2924
2925 DP_NOTICE(edev, "IO error detected [%d]\n", state);
2926
2927 __qede_lock(edev);
2928 if (edev->state == QEDE_STATE_RECOVERY) {
2929 DP_NOTICE(edev, "Device already in the recovery state\n");
2930 __qede_unlock(edev);
2931 return PCI_ERS_RESULT_NONE;
2932 }
2933
2934 /* PF handles the recovery of its VFs */
2935 if (IS_VF(edev)) {
2936 DP_VERBOSE(edev, QED_MSG_IOV,
2937 "VF recovery is handled by its PF\n");
2938 __qede_unlock(edev);
2939 return PCI_ERS_RESULT_RECOVERED;
2940 }
2941
2942 /* Close OS Tx */
2943 netif_tx_disable(edev->ndev);
2944 netif_carrier_off(edev->ndev);
2945
2946 set_bit(QEDE_SP_AER, &edev->sp_flags);
2947 schedule_delayed_work(&edev->sp_task, 0);
2948
2949 __qede_unlock(edev);
2950
2951 return PCI_ERS_RESULT_CAN_RECOVER;
2952}
1/* QLogic qede NIC Driver
2* Copyright (c) 2015 QLogic Corporation
3*
4* This software is available under the terms of the GNU General Public License
5* (GPL) Version 2, available from the file COPYING in the main directory of
6* this source tree.
7*/
8
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/version.h>
12#include <linux/device.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/skbuff.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/string.h>
19#include <linux/dma-mapping.h>
20#include <linux/interrupt.h>
21#include <asm/byteorder.h>
22#include <asm/param.h>
23#include <linux/io.h>
24#include <linux/netdev_features.h>
25#include <linux/udp.h>
26#include <linux/tcp.h>
27#include <net/vxlan.h>
28#include <linux/ip.h>
29#include <net/ipv6.h>
30#include <net/tcp.h>
31#include <linux/if_ether.h>
32#include <linux/if_vlan.h>
33#include <linux/pkt_sched.h>
34#include <linux/ethtool.h>
35#include <linux/in.h>
36#include <linux/random.h>
37#include <net/ip6_checksum.h>
38#include <linux/bitops.h>
39
40#include "qede.h"
41
42static char version[] =
43 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
44
45MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
46MODULE_LICENSE("GPL");
47MODULE_VERSION(DRV_MODULE_VERSION);
48
49static uint debug;
50module_param(debug, uint, 0);
51MODULE_PARM_DESC(debug, " Default debug msglevel");
52
53static const struct qed_eth_ops *qed_ops;
54
55#define CHIP_NUM_57980S_40 0x1634
56#define CHIP_NUM_57980S_10 0x1666
57#define CHIP_NUM_57980S_MF 0x1636
58#define CHIP_NUM_57980S_100 0x1644
59#define CHIP_NUM_57980S_50 0x1654
60#define CHIP_NUM_57980S_25 0x1656
61
62#ifndef PCI_DEVICE_ID_NX2_57980E
63#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
64#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
65#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
66#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
67#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
68#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
69#endif
70
71static const struct pci_device_id qede_pci_tbl[] = {
72 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
73 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
74 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
75 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
76 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
77 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
78 { 0 }
79};
80
81MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
82
83static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
84
85#define TX_TIMEOUT (5 * HZ)
86
87static void qede_remove(struct pci_dev *pdev);
88static int qede_alloc_rx_buffer(struct qede_dev *edev,
89 struct qede_rx_queue *rxq);
90static void qede_link_update(void *dev, struct qed_link_output *link);
91
92static struct pci_driver qede_pci_driver = {
93 .name = "qede",
94 .id_table = qede_pci_tbl,
95 .probe = qede_probe,
96 .remove = qede_remove,
97};
98
99static struct qed_eth_cb_ops qede_ll_ops = {
100 {
101 .link_update = qede_link_update,
102 },
103};
104
105static int qede_netdev_event(struct notifier_block *this, unsigned long event,
106 void *ptr)
107{
108 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
109 struct ethtool_drvinfo drvinfo;
110 struct qede_dev *edev;
111
112 /* Currently only support name change */
113 if (event != NETDEV_CHANGENAME)
114 goto done;
115
116 /* Check whether this is a qede device */
117 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
118 goto done;
119
120 memset(&drvinfo, 0, sizeof(drvinfo));
121 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
122 if (strcmp(drvinfo.driver, "qede"))
123 goto done;
124 edev = netdev_priv(ndev);
125
126 /* Notify qed of the name change */
127 if (!edev->ops || !edev->ops->common)
128 goto done;
129 edev->ops->common->set_id(edev->cdev, edev->ndev->name,
130 "qede");
131
132done:
133 return NOTIFY_DONE;
134}
135
136static struct notifier_block qede_netdev_notifier = {
137 .notifier_call = qede_netdev_event,
138};
139
140static
141int __init qede_init(void)
142{
143 int ret;
144 u32 qed_ver;
145
146 pr_notice("qede_init: %s\n", version);
147
148 qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
149 if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
150 pr_notice("Version mismatch [%08x != %08x]\n",
151 qed_ver,
152 QEDE_ETH_INTERFACE_VERSION);
153 return -EINVAL;
154 }
155
156 qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
157 if (!qed_ops) {
158 pr_notice("Failed to get qed ethtool operations\n");
159 return -EINVAL;
160 }
161
162 /* Must register notifier before pci ops, since we might miss
163 * interface rename after pci probe and netdev registeration.
164 */
165 ret = register_netdevice_notifier(&qede_netdev_notifier);
166 if (ret) {
167 pr_notice("Failed to register netdevice_notifier\n");
168 qed_put_eth_ops();
169 return -EINVAL;
170 }
171
172 ret = pci_register_driver(&qede_pci_driver);
173 if (ret) {
174 pr_notice("Failed to register driver\n");
175 unregister_netdevice_notifier(&qede_netdev_notifier);
176 qed_put_eth_ops();
177 return -EINVAL;
178 }
179
180 return 0;
181}
182
183static void __exit qede_cleanup(void)
184{
185 pr_notice("qede_cleanup called\n");
186
187 unregister_netdevice_notifier(&qede_netdev_notifier);
188 pci_unregister_driver(&qede_pci_driver);
189 qed_put_eth_ops();
190}
191
192module_init(qede_init);
193module_exit(qede_cleanup);
194
195/* -------------------------------------------------------------------------
196 * START OF FAST-PATH
197 * -------------------------------------------------------------------------
198 */
199
200/* Unmap the data and free skb */
201static int qede_free_tx_pkt(struct qede_dev *edev,
202 struct qede_tx_queue *txq,
203 int *len)
204{
205 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
206 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
207 struct eth_tx_1st_bd *first_bd;
208 struct eth_tx_bd *tx_data_bd;
209 int bds_consumed = 0;
210 int nbds;
211 bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
212 int i, split_bd_len = 0;
213
214 if (unlikely(!skb)) {
215 DP_ERR(edev,
216 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
217 idx, txq->sw_tx_cons, txq->sw_tx_prod);
218 return -1;
219 }
220
221 *len = skb->len;
222
223 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
224
225 bds_consumed++;
226
227 nbds = first_bd->data.nbds;
228
229 if (data_split) {
230 struct eth_tx_bd *split = (struct eth_tx_bd *)
231 qed_chain_consume(&txq->tx_pbl);
232 split_bd_len = BD_UNMAP_LEN(split);
233 bds_consumed++;
234 }
235 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
236 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
237
238 /* Unmap the data of the skb frags */
239 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
240 tx_data_bd = (struct eth_tx_bd *)
241 qed_chain_consume(&txq->tx_pbl);
242 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
243 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
244 }
245
246 while (bds_consumed++ < nbds)
247 qed_chain_consume(&txq->tx_pbl);
248
249 /* Free skb */
250 dev_kfree_skb_any(skb);
251 txq->sw_tx_ring[idx].skb = NULL;
252 txq->sw_tx_ring[idx].flags = 0;
253
254 return 0;
255}
256
257/* Unmap the data and free skb when mapping failed during start_xmit */
258static void qede_free_failed_tx_pkt(struct qede_dev *edev,
259 struct qede_tx_queue *txq,
260 struct eth_tx_1st_bd *first_bd,
261 int nbd,
262 bool data_split)
263{
264 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
265 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
266 struct eth_tx_bd *tx_data_bd;
267 int i, split_bd_len = 0;
268
269 /* Return prod to its position before this skb was handled */
270 qed_chain_set_prod(&txq->tx_pbl,
271 le16_to_cpu(txq->tx_db.data.bd_prod),
272 first_bd);
273
274 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
275
276 if (data_split) {
277 struct eth_tx_bd *split = (struct eth_tx_bd *)
278 qed_chain_produce(&txq->tx_pbl);
279 split_bd_len = BD_UNMAP_LEN(split);
280 nbd--;
281 }
282
283 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
284 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
285
286 /* Unmap the data of the skb frags */
287 for (i = 0; i < nbd; i++) {
288 tx_data_bd = (struct eth_tx_bd *)
289 qed_chain_produce(&txq->tx_pbl);
290 if (tx_data_bd->nbytes)
291 dma_unmap_page(&edev->pdev->dev,
292 BD_UNMAP_ADDR(tx_data_bd),
293 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
294 }
295
296 /* Return again prod to its position before this skb was handled */
297 qed_chain_set_prod(&txq->tx_pbl,
298 le16_to_cpu(txq->tx_db.data.bd_prod),
299 first_bd);
300
301 /* Free skb */
302 dev_kfree_skb_any(skb);
303 txq->sw_tx_ring[idx].skb = NULL;
304 txq->sw_tx_ring[idx].flags = 0;
305}
306
307static u32 qede_xmit_type(struct qede_dev *edev,
308 struct sk_buff *skb,
309 int *ipv6_ext)
310{
311 u32 rc = XMIT_L4_CSUM;
312 __be16 l3_proto;
313
314 if (skb->ip_summed != CHECKSUM_PARTIAL)
315 return XMIT_PLAIN;
316
317 l3_proto = vlan_get_protocol(skb);
318 if (l3_proto == htons(ETH_P_IPV6) &&
319 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
320 *ipv6_ext = 1;
321
322 if (skb_is_gso(skb))
323 rc |= XMIT_LSO;
324
325 return rc;
326}
327
328static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
329 struct eth_tx_2nd_bd *second_bd,
330 struct eth_tx_3rd_bd *third_bd)
331{
332 u8 l4_proto;
333 u16 bd2_bits1 = 0, bd2_bits2 = 0;
334
335 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
336
337 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
338 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
339 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
340
341 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
342 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
343
344 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
345 l4_proto = ipv6_hdr(skb)->nexthdr;
346 else
347 l4_proto = ip_hdr(skb)->protocol;
348
349 if (l4_proto == IPPROTO_UDP)
350 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
351
352 if (third_bd)
353 third_bd->data.bitfields |=
354 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
355 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
356 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
357
358 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
359 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
360}
361
362static int map_frag_to_bd(struct qede_dev *edev,
363 skb_frag_t *frag,
364 struct eth_tx_bd *bd)
365{
366 dma_addr_t mapping;
367
368 /* Map skb non-linear frag data for DMA */
369 mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
370 skb_frag_size(frag),
371 DMA_TO_DEVICE);
372 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
373 DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
374 return -ENOMEM;
375 }
376
377 /* Setup the data pointer of the frag data */
378 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
379
380 return 0;
381}
382
383/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
384#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
385static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
386 u8 xmit_type)
387{
388 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
389
390 if (xmit_type & XMIT_LSO) {
391 int hlen;
392
393 hlen = skb_transport_header(skb) +
394 tcp_hdrlen(skb) - skb->data;
395
396 /* linear payload would require its own BD */
397 if (skb_headlen(skb) > hlen)
398 allowed_frags--;
399 }
400
401 return (skb_shinfo(skb)->nr_frags > allowed_frags);
402}
403#endif
404
405/* Main transmit function */
406static
407netdev_tx_t qede_start_xmit(struct sk_buff *skb,
408 struct net_device *ndev)
409{
410 struct qede_dev *edev = netdev_priv(ndev);
411 struct netdev_queue *netdev_txq;
412 struct qede_tx_queue *txq;
413 struct eth_tx_1st_bd *first_bd;
414 struct eth_tx_2nd_bd *second_bd = NULL;
415 struct eth_tx_3rd_bd *third_bd = NULL;
416 struct eth_tx_bd *tx_data_bd = NULL;
417 u16 txq_index;
418 u8 nbd = 0;
419 dma_addr_t mapping;
420 int rc, frag_idx = 0, ipv6_ext = 0;
421 u8 xmit_type;
422 u16 idx;
423 u16 hlen;
424 bool data_split = false;
425
426 /* Get tx-queue context and netdev index */
427 txq_index = skb_get_queue_mapping(skb);
428 WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
429 txq = QEDE_TX_QUEUE(edev, txq_index);
430 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
431
432 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
433 (MAX_SKB_FRAGS + 1));
434
435 xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
436
437#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
438 if (qede_pkt_req_lin(edev, skb, xmit_type)) {
439 if (skb_linearize(skb)) {
440 DP_NOTICE(edev,
441 "SKB linearization failed - silently dropping this SKB\n");
442 dev_kfree_skb_any(skb);
443 return NETDEV_TX_OK;
444 }
445 }
446#endif
447
448 /* Fill the entry in the SW ring and the BDs in the FW ring */
449 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
450 txq->sw_tx_ring[idx].skb = skb;
451 first_bd = (struct eth_tx_1st_bd *)
452 qed_chain_produce(&txq->tx_pbl);
453 memset(first_bd, 0, sizeof(*first_bd));
454 first_bd->data.bd_flags.bitfields =
455 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
456
457 /* Map skb linear data for DMA and set in the first BD */
458 mapping = dma_map_single(&edev->pdev->dev, skb->data,
459 skb_headlen(skb), DMA_TO_DEVICE);
460 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
461 DP_NOTICE(edev, "SKB mapping failed\n");
462 qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
463 return NETDEV_TX_OK;
464 }
465 nbd++;
466 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
467
468 /* In case there is IPv6 with extension headers or LSO we need 2nd and
469 * 3rd BDs.
470 */
471 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
472 second_bd = (struct eth_tx_2nd_bd *)
473 qed_chain_produce(&txq->tx_pbl);
474 memset(second_bd, 0, sizeof(*second_bd));
475
476 nbd++;
477 third_bd = (struct eth_tx_3rd_bd *)
478 qed_chain_produce(&txq->tx_pbl);
479 memset(third_bd, 0, sizeof(*third_bd));
480
481 nbd++;
482 /* We need to fill in additional data in second_bd... */
483 tx_data_bd = (struct eth_tx_bd *)second_bd;
484 }
485
486 if (skb_vlan_tag_present(skb)) {
487 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
488 first_bd->data.bd_flags.bitfields |=
489 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
490 }
491
492 /* Fill the parsing flags & params according to the requested offload */
493 if (xmit_type & XMIT_L4_CSUM) {
494 u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
495
496 /* We don't re-calculate IP checksum as it is already done by
497 * the upper stack
498 */
499 first_bd->data.bd_flags.bitfields |=
500 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
501
502 first_bd->data.bitfields |= cpu_to_le16(temp);
503
504 /* If the packet is IPv6 with extension header, indicate that
505 * to FW and pass few params, since the device cracker doesn't
506 * support parsing IPv6 with extension header/s.
507 */
508 if (unlikely(ipv6_ext))
509 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
510 }
511
512 if (xmit_type & XMIT_LSO) {
513 first_bd->data.bd_flags.bitfields |=
514 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
515 third_bd->data.lso_mss =
516 cpu_to_le16(skb_shinfo(skb)->gso_size);
517
518 first_bd->data.bd_flags.bitfields |=
519 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
520 hlen = skb_transport_header(skb) +
521 tcp_hdrlen(skb) - skb->data;
522
523 /* @@@TBD - if will not be removed need to check */
524 third_bd->data.bitfields |=
525 cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
526
527 /* Make life easier for FW guys who can't deal with header and
528 * data on same BD. If we need to split, use the second bd...
529 */
530 if (unlikely(skb_headlen(skb) > hlen)) {
531 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
532 "TSO split header size is %d (%x:%x)\n",
533 first_bd->nbytes, first_bd->addr.hi,
534 first_bd->addr.lo);
535
536 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
537 le32_to_cpu(first_bd->addr.lo)) +
538 hlen;
539
540 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
541 le16_to_cpu(first_bd->nbytes) -
542 hlen);
543
544 /* this marks the BD as one that has no
545 * individual mapping
546 */
547 txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
548
549 first_bd->nbytes = cpu_to_le16(hlen);
550
551 tx_data_bd = (struct eth_tx_bd *)third_bd;
552 data_split = true;
553 }
554 }
555
556 /* Handle fragmented skb */
557 /* special handle for frags inside 2nd and 3rd bds.. */
558 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
559 rc = map_frag_to_bd(edev,
560 &skb_shinfo(skb)->frags[frag_idx],
561 tx_data_bd);
562 if (rc) {
563 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
564 data_split);
565 return NETDEV_TX_OK;
566 }
567
568 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
569 tx_data_bd = (struct eth_tx_bd *)third_bd;
570 else
571 tx_data_bd = NULL;
572
573 frag_idx++;
574 }
575
576 /* map last frags into 4th, 5th .... */
577 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
578 tx_data_bd = (struct eth_tx_bd *)
579 qed_chain_produce(&txq->tx_pbl);
580
581 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
582
583 rc = map_frag_to_bd(edev,
584 &skb_shinfo(skb)->frags[frag_idx],
585 tx_data_bd);
586 if (rc) {
587 qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
588 data_split);
589 return NETDEV_TX_OK;
590 }
591 }
592
593 /* update the first BD with the actual num BDs */
594 first_bd->data.nbds = nbd;
595
596 netdev_tx_sent_queue(netdev_txq, skb->len);
597
598 skb_tx_timestamp(skb);
599
600 /* Advance packet producer only before sending the packet since mapping
601 * of pages may fail.
602 */
603 txq->sw_tx_prod++;
604
605 /* 'next page' entries are counted in the producer value */
606 txq->tx_db.data.bd_prod =
607 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
608
609 /* wmb makes sure that the BDs data is updated before updating the
610 * producer, otherwise FW may read old data from the BDs.
611 */
612 wmb();
613 barrier();
614 writel(txq->tx_db.raw, txq->doorbell_addr);
615
616 /* mmiowb is needed to synchronize doorbell writes from more than one
617 * processor. It guarantees that the write arrives to the device before
618 * the queue lock is released and another start_xmit is called (possibly
619 * on another CPU). Without this barrier, the next doorbell can bypass
620 * this doorbell. This is applicable to IA64/Altix systems.
621 */
622 mmiowb();
623
624 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
625 < (MAX_SKB_FRAGS + 1))) {
626 netif_tx_stop_queue(netdev_txq);
627 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
628 "Stop queue was called\n");
629 /* paired memory barrier is in qede_tx_int(), we have to keep
630 * ordering of set_bit() in netif_tx_stop_queue() and read of
631 * fp->bd_tx_cons
632 */
633 smp_mb();
634
635 if (qed_chain_get_elem_left(&txq->tx_pbl)
636 >= (MAX_SKB_FRAGS + 1) &&
637 (edev->state == QEDE_STATE_OPEN)) {
638 netif_tx_wake_queue(netdev_txq);
639 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
640 "Wake queue was called\n");
641 }
642 }
643
644 return NETDEV_TX_OK;
645}
646
647static int qede_txq_has_work(struct qede_tx_queue *txq)
648{
649 u16 hw_bd_cons;
650
651 /* Tell compiler that consumer and producer can change */
652 barrier();
653 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
654 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
655 return 0;
656
657 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
658}
659
660static int qede_tx_int(struct qede_dev *edev,
661 struct qede_tx_queue *txq)
662{
663 struct netdev_queue *netdev_txq;
664 u16 hw_bd_cons;
665 unsigned int pkts_compl = 0, bytes_compl = 0;
666 int rc;
667
668 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
669
670 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
671 barrier();
672
673 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
674 int len = 0;
675
676 rc = qede_free_tx_pkt(edev, txq, &len);
677 if (rc) {
678 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
679 hw_bd_cons,
680 qed_chain_get_cons_idx(&txq->tx_pbl));
681 break;
682 }
683
684 bytes_compl += len;
685 pkts_compl++;
686 txq->sw_tx_cons++;
687 }
688
689 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
690
691 /* Need to make the tx_bd_cons update visible to start_xmit()
692 * before checking for netif_tx_queue_stopped(). Without the
693 * memory barrier, there is a small possibility that
694 * start_xmit() will miss it and cause the queue to be stopped
695 * forever.
696 * On the other hand we need an rmb() here to ensure the proper
697 * ordering of bit testing in the following
698 * netif_tx_queue_stopped(txq) call.
699 */
700 smp_mb();
701
702 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
703 /* Taking tx_lock is needed to prevent reenabling the queue
704 * while it's empty. This could have happen if rx_action() gets
705 * suspended in qede_tx_int() after the condition before
706 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
707 *
708 * stops the queue->sees fresh tx_bd_cons->releases the queue->
709 * sends some packets consuming the whole queue again->
710 * stops the queue
711 */
712
713 __netif_tx_lock(netdev_txq, smp_processor_id());
714
715 if ((netif_tx_queue_stopped(netdev_txq)) &&
716 (edev->state == QEDE_STATE_OPEN) &&
717 (qed_chain_get_elem_left(&txq->tx_pbl)
718 >= (MAX_SKB_FRAGS + 1))) {
719 netif_tx_wake_queue(netdev_txq);
720 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
721 "Wake queue was called\n");
722 }
723
724 __netif_tx_unlock(netdev_txq);
725 }
726
727 return 0;
728}
729
730static bool qede_has_rx_work(struct qede_rx_queue *rxq)
731{
732 u16 hw_comp_cons, sw_comp_cons;
733
734 /* Tell compiler that status block fields can change */
735 barrier();
736
737 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
738 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
739
740 return hw_comp_cons != sw_comp_cons;
741}
742
743static bool qede_has_tx_work(struct qede_fastpath *fp)
744{
745 u8 tc;
746
747 for (tc = 0; tc < fp->edev->num_tc; tc++)
748 if (qede_txq_has_work(&fp->txqs[tc]))
749 return true;
750 return false;
751}
752
753static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
754{
755 qed_chain_consume(&rxq->rx_bd_ring);
756 rxq->sw_rx_cons++;
757}
758
759/* This function reuses the buffer(from an offset) from
760 * consumer index to producer index in the bd ring
761 */
762static inline void qede_reuse_page(struct qede_dev *edev,
763 struct qede_rx_queue *rxq,
764 struct sw_rx_data *curr_cons)
765{
766 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
767 struct sw_rx_data *curr_prod;
768 dma_addr_t new_mapping;
769
770 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
771 *curr_prod = *curr_cons;
772
773 new_mapping = curr_prod->mapping + curr_prod->page_offset;
774
775 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
776 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
777
778 rxq->sw_rx_prod++;
779 curr_cons->data = NULL;
780}
781
782/* In case of allocation failures reuse buffers
783 * from consumer index to produce buffers for firmware
784 */
785static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
786 struct qede_dev *edev, u8 count)
787{
788 struct sw_rx_data *curr_cons;
789
790 for (; count > 0; count--) {
791 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
792 qede_reuse_page(edev, rxq, curr_cons);
793 qede_rx_bd_ring_consume(rxq);
794 }
795}
796
797static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
798 struct qede_rx_queue *rxq,
799 struct sw_rx_data *curr_cons)
800{
801 /* Move to the next segment in the page */
802 curr_cons->page_offset += rxq->rx_buf_seg_size;
803
804 if (curr_cons->page_offset == PAGE_SIZE) {
805 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
806 /* Since we failed to allocate new buffer
807 * current buffer can be used again.
808 */
809 curr_cons->page_offset -= rxq->rx_buf_seg_size;
810
811 return -ENOMEM;
812 }
813
814 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
815 PAGE_SIZE, DMA_FROM_DEVICE);
816 } else {
817 /* Increment refcount of the page as we don't want
818 * network stack to take the ownership of the page
819 * which can be recycled multiple times by the driver.
820 */
821 atomic_inc(&curr_cons->data->_count);
822 qede_reuse_page(edev, rxq, curr_cons);
823 }
824
825 return 0;
826}
827
828static inline void qede_update_rx_prod(struct qede_dev *edev,
829 struct qede_rx_queue *rxq)
830{
831 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
832 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
833 struct eth_rx_prod_data rx_prods = {0};
834
835 /* Update producers */
836 rx_prods.bd_prod = cpu_to_le16(bd_prod);
837 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
838
839 /* Make sure that the BD and SGE data is updated before updating the
840 * producers since FW might read the BD/SGE right after the producer
841 * is updated.
842 */
843 wmb();
844
845 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
846 (u32 *)&rx_prods);
847
848 /* mmiowb is needed to synchronize doorbell writes from more than one
849 * processor. It guarantees that the write arrives to the device before
850 * the napi lock is released and another qede_poll is called (possibly
851 * on another CPU). Without this barrier, the next doorbell can bypass
852 * this doorbell. This is applicable to IA64/Altix systems.
853 */
854 mmiowb();
855}
856
857static u32 qede_get_rxhash(struct qede_dev *edev,
858 u8 bitfields,
859 __le32 rss_hash,
860 enum pkt_hash_types *rxhash_type)
861{
862 enum rss_hash_type htype;
863
864 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
865
866 if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
867 *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
868 (htype == RSS_HASH_TYPE_IPV6)) ?
869 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
870 return le32_to_cpu(rss_hash);
871 }
872 *rxhash_type = PKT_HASH_TYPE_NONE;
873 return 0;
874}
875
876static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
877{
878 skb_checksum_none_assert(skb);
879
880 if (csum_flag & QEDE_CSUM_UNNECESSARY)
881 skb->ip_summed = CHECKSUM_UNNECESSARY;
882}
883
884static inline void qede_skb_receive(struct qede_dev *edev,
885 struct qede_fastpath *fp,
886 struct sk_buff *skb,
887 u16 vlan_tag)
888{
889 if (vlan_tag)
890 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
891 vlan_tag);
892
893 napi_gro_receive(&fp->napi, skb);
894}
895
896static void qede_set_gro_params(struct qede_dev *edev,
897 struct sk_buff *skb,
898 struct eth_fast_path_rx_tpa_start_cqe *cqe)
899{
900 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
901
902 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
903 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
904 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
905 else
906 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
907
908 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
909 cqe->header_len;
910}
911
912static int qede_fill_frag_skb(struct qede_dev *edev,
913 struct qede_rx_queue *rxq,
914 u8 tpa_agg_index,
915 u16 len_on_bd)
916{
917 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
918 NUM_RX_BDS_MAX];
919 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
920 struct sk_buff *skb = tpa_info->skb;
921
922 if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
923 goto out;
924
925 /* Add one frag and update the appropriate fields in the skb */
926 skb_fill_page_desc(skb, tpa_info->frag_id++,
927 current_bd->data, current_bd->page_offset,
928 len_on_bd);
929
930 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
931 /* Incr page ref count to reuse on allocation failure
932 * so that it doesn't get freed while freeing SKB.
933 */
934 atomic_inc(¤t_bd->data->_count);
935 goto out;
936 }
937
938 qed_chain_consume(&rxq->rx_bd_ring);
939 rxq->sw_rx_cons++;
940
941 skb->data_len += len_on_bd;
942 skb->truesize += rxq->rx_buf_seg_size;
943 skb->len += len_on_bd;
944
945 return 0;
946
947out:
948 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
949 qede_recycle_rx_bd_ring(rxq, edev, 1);
950 return -ENOMEM;
951}
952
953static void qede_tpa_start(struct qede_dev *edev,
954 struct qede_rx_queue *rxq,
955 struct eth_fast_path_rx_tpa_start_cqe *cqe)
956{
957 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
958 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
959 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
960 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
961 dma_addr_t mapping = tpa_info->replace_buf_mapping;
962 struct sw_rx_data *sw_rx_data_cons;
963 struct sw_rx_data *sw_rx_data_prod;
964 enum pkt_hash_types rxhash_type;
965 u32 rxhash;
966
967 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
968 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
969
970 /* Use pre-allocated replacement buffer - we can't release the agg.
971 * start until its over and we don't want to risk allocation failing
972 * here, so re-allocate when aggregation will be over.
973 */
974 dma_unmap_addr_set(sw_rx_data_prod, mapping,
975 dma_unmap_addr(replace_buf, mapping));
976
977 sw_rx_data_prod->data = replace_buf->data;
978 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
979 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
980 sw_rx_data_prod->page_offset = replace_buf->page_offset;
981
982 rxq->sw_rx_prod++;
983
984 /* move partial skb from cons to pool (don't unmap yet)
985 * save mapping, incase we drop the packet later on.
986 */
987 tpa_info->start_buf = *sw_rx_data_cons;
988 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
989 le32_to_cpu(rx_bd_cons->addr.lo));
990
991 tpa_info->start_buf_mapping = mapping;
992 rxq->sw_rx_cons++;
993
994 /* set tpa state to start only if we are able to allocate skb
995 * for this aggregation, otherwise mark as error and aggregation will
996 * be dropped
997 */
998 tpa_info->skb = netdev_alloc_skb(edev->ndev,
999 le16_to_cpu(cqe->len_on_first_bd));
1000 if (unlikely(!tpa_info->skb)) {
1001 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
1002 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
1003 goto cons_buf;
1004 }
1005
1006 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
1007 memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
1008
1009 /* Start filling in the aggregation info */
1010 tpa_info->frag_id = 0;
1011 tpa_info->agg_state = QEDE_AGG_STATE_START;
1012
1013 rxhash = qede_get_rxhash(edev, cqe->bitfields,
1014 cqe->rss_hash, &rxhash_type);
1015 skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
1016 if ((le16_to_cpu(cqe->pars_flags.flags) >>
1017 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
1018 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
1019 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
1020 else
1021 tpa_info->vlan_tag = 0;
1022
1023 /* This is needed in order to enable forwarding support */
1024 qede_set_gro_params(edev, tpa_info->skb, cqe);
1025
1026cons_buf: /* We still need to handle bd_len_list to consume buffers */
1027 if (likely(cqe->ext_bd_len_list[0]))
1028 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1029 le16_to_cpu(cqe->ext_bd_len_list[0]));
1030
1031 if (unlikely(cqe->ext_bd_len_list[1])) {
1032 DP_ERR(edev,
1033 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
1034 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
1035 }
1036}
1037
1038#ifdef CONFIG_INET
1039static void qede_gro_ip_csum(struct sk_buff *skb)
1040{
1041 const struct iphdr *iph = ip_hdr(skb);
1042 struct tcphdr *th;
1043
1044 skb_set_transport_header(skb, sizeof(struct iphdr));
1045 th = tcp_hdr(skb);
1046
1047 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
1048 iph->saddr, iph->daddr, 0);
1049
1050 tcp_gro_complete(skb);
1051}
1052
1053static void qede_gro_ipv6_csum(struct sk_buff *skb)
1054{
1055 struct ipv6hdr *iph = ipv6_hdr(skb);
1056 struct tcphdr *th;
1057
1058 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1059 th = tcp_hdr(skb);
1060
1061 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
1062 &iph->saddr, &iph->daddr, 0);
1063 tcp_gro_complete(skb);
1064}
1065#endif
1066
1067static void qede_gro_receive(struct qede_dev *edev,
1068 struct qede_fastpath *fp,
1069 struct sk_buff *skb,
1070 u16 vlan_tag)
1071{
1072 /* FW can send a single MTU sized packet from gro flow
1073 * due to aggregation timeout/last segment etc. which
1074 * is not expected to be a gro packet. If a skb has zero
1075 * frags then simply push it in the stack as non gso skb.
1076 */
1077 if (unlikely(!skb->data_len)) {
1078 skb_shinfo(skb)->gso_type = 0;
1079 skb_shinfo(skb)->gso_size = 0;
1080 goto send_skb;
1081 }
1082
1083#ifdef CONFIG_INET
1084 if (skb_shinfo(skb)->gso_size) {
1085 skb_set_network_header(skb, 0);
1086
1087 switch (skb->protocol) {
1088 case htons(ETH_P_IP):
1089 qede_gro_ip_csum(skb);
1090 break;
1091 case htons(ETH_P_IPV6):
1092 qede_gro_ipv6_csum(skb);
1093 break;
1094 default:
1095 DP_ERR(edev,
1096 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
1097 ntohs(skb->protocol));
1098 }
1099 }
1100#endif
1101
1102send_skb:
1103 skb_record_rx_queue(skb, fp->rss_id);
1104 qede_skb_receive(edev, fp, skb, vlan_tag);
1105}
1106
1107static inline void qede_tpa_cont(struct qede_dev *edev,
1108 struct qede_rx_queue *rxq,
1109 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1110{
1111 int i;
1112
1113 for (i = 0; cqe->len_list[i]; i++)
1114 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1115 le16_to_cpu(cqe->len_list[i]));
1116
1117 if (unlikely(i > 1))
1118 DP_ERR(edev,
1119 "Strange - TPA cont with more than a single len_list entry\n");
1120}
1121
1122static void qede_tpa_end(struct qede_dev *edev,
1123 struct qede_fastpath *fp,
1124 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1125{
1126 struct qede_rx_queue *rxq = fp->rxq;
1127 struct qede_agg_info *tpa_info;
1128 struct sk_buff *skb;
1129 int i;
1130
1131 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
1132 skb = tpa_info->skb;
1133
1134 for (i = 0; cqe->len_list[i]; i++)
1135 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1136 le16_to_cpu(cqe->len_list[i]));
1137 if (unlikely(i > 1))
1138 DP_ERR(edev,
1139 "Strange - TPA emd with more than a single len_list entry\n");
1140
1141 if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
1142 goto err;
1143
1144 /* Sanity */
1145 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1146 DP_ERR(edev,
1147 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1148 cqe->num_of_bds, tpa_info->frag_id);
1149 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1150 DP_ERR(edev,
1151 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1152 le16_to_cpu(cqe->total_packet_len), skb->len);
1153
1154 memcpy(skb->data,
1155 page_address(tpa_info->start_buf.data) +
1156 tpa_info->start_cqe.placement_offset +
1157 tpa_info->start_buf.page_offset,
1158 le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
1159
1160 /* Recycle [mapped] start buffer for the next replacement */
1161 tpa_info->replace_buf = tpa_info->start_buf;
1162 tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
1163
1164 /* Finalize the SKB */
1165 skb->protocol = eth_type_trans(skb, edev->ndev);
1166 skb->ip_summed = CHECKSUM_UNNECESSARY;
1167
1168 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1169 * to skb_shinfo(skb)->gso_segs
1170 */
1171 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1172
1173 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1174
1175 tpa_info->agg_state = QEDE_AGG_STATE_NONE;
1176
1177 return;
1178err:
1179 /* The BD starting the aggregation is still mapped; Re-use it for
1180 * future aggregations [as replacement buffer]
1181 */
1182 memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
1183 sizeof(struct sw_rx_data));
1184 tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
1185 tpa_info->start_buf.data = NULL;
1186 tpa_info->agg_state = QEDE_AGG_STATE_NONE;
1187 dev_kfree_skb_any(tpa_info->skb);
1188 tpa_info->skb = NULL;
1189}
1190
1191static u8 qede_check_csum(u16 flag)
1192{
1193 u16 csum_flag = 0;
1194 u8 csum = 0;
1195
1196 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1197 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
1198 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1199 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1200 csum = QEDE_CSUM_UNNECESSARY;
1201 }
1202
1203 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1204 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1205
1206 if (csum_flag & flag)
1207 return QEDE_CSUM_ERROR;
1208
1209 return csum;
1210}
1211
1212static int qede_rx_int(struct qede_fastpath *fp, int budget)
1213{
1214 struct qede_dev *edev = fp->edev;
1215 struct qede_rx_queue *rxq = fp->rxq;
1216
1217 u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
1218 int rx_pkt = 0;
1219 u8 csum_flag;
1220
1221 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1222 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1223
1224 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1225 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1226 * read before it is written by FW, then FW writes CQE and SB, and then
1227 * the CPU reads the hw_comp_cons, it will use an old CQE.
1228 */
1229 rmb();
1230
1231 /* Loop to complete all indicated BDs */
1232 while (sw_comp_cons != hw_comp_cons) {
1233 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1234 enum pkt_hash_types rxhash_type;
1235 enum eth_rx_cqe_type cqe_type;
1236 struct sw_rx_data *sw_rx_data;
1237 union eth_rx_cqe *cqe;
1238 struct sk_buff *skb;
1239 struct page *data;
1240 __le16 flags;
1241 u16 len, pad;
1242 u32 rx_hash;
1243
1244 /* Get the CQE from the completion ring */
1245 cqe = (union eth_rx_cqe *)
1246 qed_chain_consume(&rxq->rx_comp_ring);
1247 cqe_type = cqe->fast_path_regular.type;
1248
1249 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1250 edev->ops->eth_cqe_completion(
1251 edev->cdev, fp->rss_id,
1252 (struct eth_slow_path_rx_cqe *)cqe);
1253 goto next_cqe;
1254 }
1255
1256 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
1257 switch (cqe_type) {
1258 case ETH_RX_CQE_TYPE_TPA_START:
1259 qede_tpa_start(edev, rxq,
1260 &cqe->fast_path_tpa_start);
1261 goto next_cqe;
1262 case ETH_RX_CQE_TYPE_TPA_CONT:
1263 qede_tpa_cont(edev, rxq,
1264 &cqe->fast_path_tpa_cont);
1265 goto next_cqe;
1266 case ETH_RX_CQE_TYPE_TPA_END:
1267 qede_tpa_end(edev, fp,
1268 &cqe->fast_path_tpa_end);
1269 goto next_rx_only;
1270 default:
1271 break;
1272 }
1273 }
1274
1275 /* Get the data from the SW ring */
1276 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1277 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1278 data = sw_rx_data->data;
1279
1280 fp_cqe = &cqe->fast_path_regular;
1281 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1282 pad = fp_cqe->placement_offset;
1283 flags = cqe->fast_path_regular.pars_flags.flags;
1284
1285 /* If this is an error packet then drop it */
1286 parse_flag = le16_to_cpu(flags);
1287
1288 csum_flag = qede_check_csum(parse_flag);
1289 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1290 DP_NOTICE(edev,
1291 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1292 sw_comp_cons, parse_flag);
1293 rxq->rx_hw_errors++;
1294 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1295 goto next_cqe;
1296 }
1297
1298 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1299 if (unlikely(!skb)) {
1300 DP_NOTICE(edev,
1301 "Build_skb failed, dropping incoming packet\n");
1302 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1303 rxq->rx_alloc_errors++;
1304 goto next_cqe;
1305 }
1306
1307 /* Copy data into SKB */
1308 if (len + pad <= QEDE_RX_HDR_SIZE) {
1309 memcpy(skb_put(skb, len),
1310 page_address(data) + pad +
1311 sw_rx_data->page_offset, len);
1312 qede_reuse_page(edev, rxq, sw_rx_data);
1313 } else {
1314 struct skb_frag_struct *frag;
1315 unsigned int pull_len;
1316 unsigned char *va;
1317
1318 frag = &skb_shinfo(skb)->frags[0];
1319
1320 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
1321 pad + sw_rx_data->page_offset,
1322 len, rxq->rx_buf_seg_size);
1323
1324 va = skb_frag_address(frag);
1325 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
1326
1327 /* Align the pull_len to optimize memcpy */
1328 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
1329
1330 skb_frag_size_sub(frag, pull_len);
1331 frag->page_offset += pull_len;
1332 skb->data_len -= pull_len;
1333 skb->tail += pull_len;
1334
1335 if (unlikely(qede_realloc_rx_buffer(edev, rxq,
1336 sw_rx_data))) {
1337 DP_ERR(edev, "Failed to allocate rx buffer\n");
1338 /* Incr page ref count to reuse on allocation
1339 * failure so that it doesn't get freed while
1340 * freeing SKB.
1341 */
1342
1343 atomic_inc(&sw_rx_data->data->_count);
1344 rxq->rx_alloc_errors++;
1345 qede_recycle_rx_bd_ring(rxq, edev,
1346 fp_cqe->bd_num);
1347 dev_kfree_skb_any(skb);
1348 goto next_cqe;
1349 }
1350 }
1351
1352 qede_rx_bd_ring_consume(rxq);
1353
1354 if (fp_cqe->bd_num != 1) {
1355 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
1356 u8 num_frags;
1357
1358 pkt_len -= len;
1359
1360 for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
1361 num_frags--) {
1362 u16 cur_size = pkt_len > rxq->rx_buf_size ?
1363 rxq->rx_buf_size : pkt_len;
1364 if (unlikely(!cur_size)) {
1365 DP_ERR(edev,
1366 "Still got %d BDs for mapping jumbo, but length became 0\n",
1367 num_frags);
1368 qede_recycle_rx_bd_ring(rxq, edev,
1369 num_frags);
1370 dev_kfree_skb_any(skb);
1371 goto next_cqe;
1372 }
1373
1374 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
1375 qede_recycle_rx_bd_ring(rxq, edev,
1376 num_frags);
1377 dev_kfree_skb_any(skb);
1378 goto next_cqe;
1379 }
1380
1381 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1382 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1383 qede_rx_bd_ring_consume(rxq);
1384
1385 dma_unmap_page(&edev->pdev->dev,
1386 sw_rx_data->mapping,
1387 PAGE_SIZE, DMA_FROM_DEVICE);
1388
1389 skb_fill_page_desc(skb,
1390 skb_shinfo(skb)->nr_frags++,
1391 sw_rx_data->data, 0,
1392 cur_size);
1393
1394 skb->truesize += PAGE_SIZE;
1395 skb->data_len += cur_size;
1396 skb->len += cur_size;
1397 pkt_len -= cur_size;
1398 }
1399
1400 if (unlikely(pkt_len))
1401 DP_ERR(edev,
1402 "Mapped all BDs of jumbo, but still have %d bytes\n",
1403 pkt_len);
1404 }
1405
1406 skb->protocol = eth_type_trans(skb, edev->ndev);
1407
1408 rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
1409 fp_cqe->rss_hash,
1410 &rxhash_type);
1411
1412 skb_set_hash(skb, rx_hash, rxhash_type);
1413
1414 qede_set_skb_csum(skb, csum_flag);
1415
1416 skb_record_rx_queue(skb, fp->rss_id);
1417
1418 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
1419next_rx_only:
1420 rx_pkt++;
1421
1422next_cqe: /* don't consume bd rx buffer */
1423 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1424 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1425 /* CR TPA - revisit how to handle budget in TPA perhaps
1426 * increase on "end"
1427 */
1428 if (rx_pkt == budget)
1429 break;
1430 } /* repeat while sw_comp_cons != hw_comp_cons... */
1431
1432 /* Update producers */
1433 qede_update_rx_prod(edev, rxq);
1434
1435 return rx_pkt;
1436}
1437
1438static int qede_poll(struct napi_struct *napi, int budget)
1439{
1440 int work_done = 0;
1441 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1442 napi);
1443 struct qede_dev *edev = fp->edev;
1444
1445 while (1) {
1446 u8 tc;
1447
1448 for (tc = 0; tc < edev->num_tc; tc++)
1449 if (qede_txq_has_work(&fp->txqs[tc]))
1450 qede_tx_int(edev, &fp->txqs[tc]);
1451
1452 if (qede_has_rx_work(fp->rxq)) {
1453 work_done += qede_rx_int(fp, budget - work_done);
1454
1455 /* must not complete if we consumed full budget */
1456 if (work_done >= budget)
1457 break;
1458 }
1459
1460 /* Fall out from the NAPI loop if needed */
1461 if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
1462 qed_sb_update_sb_idx(fp->sb_info);
1463 /* *_has_*_work() reads the status block,
1464 * thus we need to ensure that status block indices
1465 * have been actually read (qed_sb_update_sb_idx)
1466 * prior to this check (*_has_*_work) so that
1467 * we won't write the "newer" value of the status block
1468 * to HW (if there was a DMA right after
1469 * qede_has_rx_work and if there is no rmb, the memory
1470 * reading (qed_sb_update_sb_idx) may be postponed
1471 * to right before *_ack_sb). In this case there
1472 * will never be another interrupt until there is
1473 * another update of the status block, while there
1474 * is still unhandled work.
1475 */
1476 rmb();
1477
1478 if (!(qede_has_rx_work(fp->rxq) ||
1479 qede_has_tx_work(fp))) {
1480 napi_complete(napi);
1481 /* Update and reenable interrupts */
1482 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1483 1 /*update*/);
1484 break;
1485 }
1486 }
1487 }
1488
1489 return work_done;
1490}
1491
1492static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1493{
1494 struct qede_fastpath *fp = fp_cookie;
1495
1496 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1497
1498 napi_schedule_irqoff(&fp->napi);
1499 return IRQ_HANDLED;
1500}
1501
1502/* -------------------------------------------------------------------------
1503 * END OF FAST-PATH
1504 * -------------------------------------------------------------------------
1505 */
1506
1507static int qede_open(struct net_device *ndev);
1508static int qede_close(struct net_device *ndev);
1509static int qede_set_mac_addr(struct net_device *ndev, void *p);
1510static void qede_set_rx_mode(struct net_device *ndev);
1511static void qede_config_rx_mode(struct net_device *ndev);
1512
1513static int qede_set_ucast_rx_mac(struct qede_dev *edev,
1514 enum qed_filter_xcast_params_type opcode,
1515 unsigned char mac[ETH_ALEN])
1516{
1517 struct qed_filter_params filter_cmd;
1518
1519 memset(&filter_cmd, 0, sizeof(filter_cmd));
1520 filter_cmd.type = QED_FILTER_TYPE_UCAST;
1521 filter_cmd.filter.ucast.type = opcode;
1522 filter_cmd.filter.ucast.mac_valid = 1;
1523 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
1524
1525 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1526}
1527
1528static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
1529 enum qed_filter_xcast_params_type opcode,
1530 u16 vid)
1531{
1532 struct qed_filter_params filter_cmd;
1533
1534 memset(&filter_cmd, 0, sizeof(filter_cmd));
1535 filter_cmd.type = QED_FILTER_TYPE_UCAST;
1536 filter_cmd.filter.ucast.type = opcode;
1537 filter_cmd.filter.ucast.vlan_valid = 1;
1538 filter_cmd.filter.ucast.vlan = vid;
1539
1540 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1541}
1542
1543void qede_fill_by_demand_stats(struct qede_dev *edev)
1544{
1545 struct qed_eth_stats stats;
1546
1547 edev->ops->get_vport_stats(edev->cdev, &stats);
1548 edev->stats.no_buff_discards = stats.no_buff_discards;
1549 edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
1550 edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
1551 edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
1552 edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
1553 edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
1554 edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
1555 edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
1556 edev->stats.mac_filter_discards = stats.mac_filter_discards;
1557
1558 edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
1559 edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
1560 edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
1561 edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
1562 edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
1563 edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
1564 edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
1565 edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
1566 edev->stats.coalesced_events = stats.tpa_coalesced_events;
1567 edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
1568 edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
1569 edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
1570
1571 edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
1572 edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
1573 edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
1574 edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
1575 edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
1576 edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
1577 edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
1578 edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
1579 edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
1580 edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
1581 edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
1582 edev->stats.rx_crc_errors = stats.rx_crc_errors;
1583 edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
1584 edev->stats.rx_pause_frames = stats.rx_pause_frames;
1585 edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
1586 edev->stats.rx_align_errors = stats.rx_align_errors;
1587 edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
1588 edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
1589 edev->stats.rx_jabbers = stats.rx_jabbers;
1590 edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
1591 edev->stats.rx_fragments = stats.rx_fragments;
1592 edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
1593 edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
1594 edev->stats.tx_128_to_255_byte_packets =
1595 stats.tx_128_to_255_byte_packets;
1596 edev->stats.tx_256_to_511_byte_packets =
1597 stats.tx_256_to_511_byte_packets;
1598 edev->stats.tx_512_to_1023_byte_packets =
1599 stats.tx_512_to_1023_byte_packets;
1600 edev->stats.tx_1024_to_1518_byte_packets =
1601 stats.tx_1024_to_1518_byte_packets;
1602 edev->stats.tx_1519_to_2047_byte_packets =
1603 stats.tx_1519_to_2047_byte_packets;
1604 edev->stats.tx_2048_to_4095_byte_packets =
1605 stats.tx_2048_to_4095_byte_packets;
1606 edev->stats.tx_4096_to_9216_byte_packets =
1607 stats.tx_4096_to_9216_byte_packets;
1608 edev->stats.tx_9217_to_16383_byte_packets =
1609 stats.tx_9217_to_16383_byte_packets;
1610 edev->stats.tx_pause_frames = stats.tx_pause_frames;
1611 edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
1612 edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
1613 edev->stats.tx_total_collisions = stats.tx_total_collisions;
1614 edev->stats.brb_truncates = stats.brb_truncates;
1615 edev->stats.brb_discards = stats.brb_discards;
1616 edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
1617}
1618
1619static struct rtnl_link_stats64 *qede_get_stats64(
1620 struct net_device *dev,
1621 struct rtnl_link_stats64 *stats)
1622{
1623 struct qede_dev *edev = netdev_priv(dev);
1624
1625 qede_fill_by_demand_stats(edev);
1626
1627 stats->rx_packets = edev->stats.rx_ucast_pkts +
1628 edev->stats.rx_mcast_pkts +
1629 edev->stats.rx_bcast_pkts;
1630 stats->tx_packets = edev->stats.tx_ucast_pkts +
1631 edev->stats.tx_mcast_pkts +
1632 edev->stats.tx_bcast_pkts;
1633
1634 stats->rx_bytes = edev->stats.rx_ucast_bytes +
1635 edev->stats.rx_mcast_bytes +
1636 edev->stats.rx_bcast_bytes;
1637
1638 stats->tx_bytes = edev->stats.tx_ucast_bytes +
1639 edev->stats.tx_mcast_bytes +
1640 edev->stats.tx_bcast_bytes;
1641
1642 stats->tx_errors = edev->stats.tx_err_drop_pkts;
1643 stats->multicast = edev->stats.rx_mcast_pkts +
1644 edev->stats.rx_bcast_pkts;
1645
1646 stats->rx_fifo_errors = edev->stats.no_buff_discards;
1647
1648 stats->collisions = edev->stats.tx_total_collisions;
1649 stats->rx_crc_errors = edev->stats.rx_crc_errors;
1650 stats->rx_frame_errors = edev->stats.rx_align_errors;
1651
1652 return stats;
1653}
1654
1655static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
1656{
1657 struct qed_update_vport_params params;
1658 int rc;
1659
1660 /* Proceed only if action actually needs to be performed */
1661 if (edev->accept_any_vlan == action)
1662 return;
1663
1664 memset(¶ms, 0, sizeof(params));
1665
1666 params.vport_id = 0;
1667 params.accept_any_vlan = action;
1668 params.update_accept_any_vlan_flg = 1;
1669
1670 rc = edev->ops->vport_update(edev->cdev, ¶ms);
1671 if (rc) {
1672 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
1673 action ? "enable" : "disable");
1674 } else {
1675 DP_INFO(edev, "%s accept-any-vlan\n",
1676 action ? "enabled" : "disabled");
1677 edev->accept_any_vlan = action;
1678 }
1679}
1680
1681static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1682{
1683 struct qede_dev *edev = netdev_priv(dev);
1684 struct qede_vlan *vlan, *tmp;
1685 int rc;
1686
1687 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
1688
1689 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1690 if (!vlan) {
1691 DP_INFO(edev, "Failed to allocate struct for vlan\n");
1692 return -ENOMEM;
1693 }
1694 INIT_LIST_HEAD(&vlan->list);
1695 vlan->vid = vid;
1696 vlan->configured = false;
1697
1698 /* Verify vlan isn't already configured */
1699 list_for_each_entry(tmp, &edev->vlan_list, list) {
1700 if (tmp->vid == vlan->vid) {
1701 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1702 "vlan already configured\n");
1703 kfree(vlan);
1704 return -EEXIST;
1705 }
1706 }
1707
1708 /* If interface is down, cache this VLAN ID and return */
1709 if (edev->state != QEDE_STATE_OPEN) {
1710 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1711 "Interface is down, VLAN %d will be configured when interface is up\n",
1712 vid);
1713 if (vid != 0)
1714 edev->non_configured_vlans++;
1715 list_add(&vlan->list, &edev->vlan_list);
1716
1717 return 0;
1718 }
1719
1720 /* Check for the filter limit.
1721 * Note - vlan0 has a reserved filter and can be added without
1722 * worrying about quota
1723 */
1724 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
1725 (vlan->vid == 0)) {
1726 rc = qede_set_ucast_rx_vlan(edev,
1727 QED_FILTER_XCAST_TYPE_ADD,
1728 vlan->vid);
1729 if (rc) {
1730 DP_ERR(edev, "Failed to configure VLAN %d\n",
1731 vlan->vid);
1732 kfree(vlan);
1733 return -EINVAL;
1734 }
1735 vlan->configured = true;
1736
1737 /* vlan0 filter isn't consuming out of our quota */
1738 if (vlan->vid != 0)
1739 edev->configured_vlans++;
1740 } else {
1741 /* Out of quota; Activate accept-any-VLAN mode */
1742 if (!edev->non_configured_vlans)
1743 qede_config_accept_any_vlan(edev, true);
1744
1745 edev->non_configured_vlans++;
1746 }
1747
1748 list_add(&vlan->list, &edev->vlan_list);
1749
1750 return 0;
1751}
1752
1753static void qede_del_vlan_from_list(struct qede_dev *edev,
1754 struct qede_vlan *vlan)
1755{
1756 /* vlan0 filter isn't consuming out of our quota */
1757 if (vlan->vid != 0) {
1758 if (vlan->configured)
1759 edev->configured_vlans--;
1760 else
1761 edev->non_configured_vlans--;
1762 }
1763
1764 list_del(&vlan->list);
1765 kfree(vlan);
1766}
1767
1768static int qede_configure_vlan_filters(struct qede_dev *edev)
1769{
1770 int rc = 0, real_rc = 0, accept_any_vlan = 0;
1771 struct qed_dev_eth_info *dev_info;
1772 struct qede_vlan *vlan = NULL;
1773
1774 if (list_empty(&edev->vlan_list))
1775 return 0;
1776
1777 dev_info = &edev->dev_info;
1778
1779 /* Configure non-configured vlans */
1780 list_for_each_entry(vlan, &edev->vlan_list, list) {
1781 if (vlan->configured)
1782 continue;
1783
1784 /* We have used all our credits, now enable accept_any_vlan */
1785 if ((vlan->vid != 0) &&
1786 (edev->configured_vlans == dev_info->num_vlan_filters)) {
1787 accept_any_vlan = 1;
1788 continue;
1789 }
1790
1791 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
1792
1793 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
1794 vlan->vid);
1795 if (rc) {
1796 DP_ERR(edev, "Failed to configure VLAN %u\n",
1797 vlan->vid);
1798 real_rc = rc;
1799 continue;
1800 }
1801
1802 vlan->configured = true;
1803 /* vlan0 filter doesn't consume our VLAN filter's quota */
1804 if (vlan->vid != 0) {
1805 edev->non_configured_vlans--;
1806 edev->configured_vlans++;
1807 }
1808 }
1809
1810 /* enable accept_any_vlan mode if we have more VLANs than credits,
1811 * or remove accept_any_vlan mode if we've actually removed
1812 * a non-configured vlan, and all remaining vlans are truly configured.
1813 */
1814
1815 if (accept_any_vlan)
1816 qede_config_accept_any_vlan(edev, true);
1817 else if (!edev->non_configured_vlans)
1818 qede_config_accept_any_vlan(edev, false);
1819
1820 return real_rc;
1821}
1822
1823static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1824{
1825 struct qede_dev *edev = netdev_priv(dev);
1826 struct qede_vlan *vlan = NULL;
1827 int rc;
1828
1829 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
1830
1831 /* Find whether entry exists */
1832 list_for_each_entry(vlan, &edev->vlan_list, list)
1833 if (vlan->vid == vid)
1834 break;
1835
1836 if (!vlan || (vlan->vid != vid)) {
1837 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
1838 "Vlan isn't configured\n");
1839 return 0;
1840 }
1841
1842 if (edev->state != QEDE_STATE_OPEN) {
1843 /* As interface is already down, we don't have a VPORT
1844 * instance to remove vlan filter. So just update vlan list
1845 */
1846 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1847 "Interface is down, removing VLAN from list only\n");
1848 qede_del_vlan_from_list(edev, vlan);
1849 return 0;
1850 }
1851
1852 /* Remove vlan */
1853 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
1854 if (rc) {
1855 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
1856 return -EINVAL;
1857 }
1858
1859 qede_del_vlan_from_list(edev, vlan);
1860
1861 /* We have removed a VLAN - try to see if we can
1862 * configure non-configured VLAN from the list.
1863 */
1864 rc = qede_configure_vlan_filters(edev);
1865
1866 return rc;
1867}
1868
1869static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
1870{
1871 struct qede_vlan *vlan = NULL;
1872
1873 if (list_empty(&edev->vlan_list))
1874 return;
1875
1876 list_for_each_entry(vlan, &edev->vlan_list, list) {
1877 if (!vlan->configured)
1878 continue;
1879
1880 vlan->configured = false;
1881
1882 /* vlan0 filter isn't consuming out of our quota */
1883 if (vlan->vid != 0) {
1884 edev->non_configured_vlans++;
1885 edev->configured_vlans--;
1886 }
1887
1888 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1889 "marked vlan %d as non-configured\n",
1890 vlan->vid);
1891 }
1892
1893 edev->accept_any_vlan = false;
1894}
1895
1896static const struct net_device_ops qede_netdev_ops = {
1897 .ndo_open = qede_open,
1898 .ndo_stop = qede_close,
1899 .ndo_start_xmit = qede_start_xmit,
1900 .ndo_set_rx_mode = qede_set_rx_mode,
1901 .ndo_set_mac_address = qede_set_mac_addr,
1902 .ndo_validate_addr = eth_validate_addr,
1903 .ndo_change_mtu = qede_change_mtu,
1904 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
1905 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
1906 .ndo_get_stats64 = qede_get_stats64,
1907};
1908
1909/* -------------------------------------------------------------------------
1910 * START OF PROBE / REMOVE
1911 * -------------------------------------------------------------------------
1912 */
1913
1914static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
1915 struct pci_dev *pdev,
1916 struct qed_dev_eth_info *info,
1917 u32 dp_module,
1918 u8 dp_level)
1919{
1920 struct net_device *ndev;
1921 struct qede_dev *edev;
1922
1923 ndev = alloc_etherdev_mqs(sizeof(*edev),
1924 info->num_queues,
1925 info->num_queues);
1926 if (!ndev) {
1927 pr_err("etherdev allocation failed\n");
1928 return NULL;
1929 }
1930
1931 edev = netdev_priv(ndev);
1932 edev->ndev = ndev;
1933 edev->cdev = cdev;
1934 edev->pdev = pdev;
1935 edev->dp_module = dp_module;
1936 edev->dp_level = dp_level;
1937 edev->ops = qed_ops;
1938 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
1939 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
1940
1941 SET_NETDEV_DEV(ndev, &pdev->dev);
1942
1943 memset(&edev->stats, 0, sizeof(edev->stats));
1944 memcpy(&edev->dev_info, info, sizeof(*info));
1945
1946 edev->num_tc = edev->dev_info.num_tc;
1947
1948 INIT_LIST_HEAD(&edev->vlan_list);
1949
1950 return edev;
1951}
1952
1953static void qede_init_ndev(struct qede_dev *edev)
1954{
1955 struct net_device *ndev = edev->ndev;
1956 struct pci_dev *pdev = edev->pdev;
1957 u32 hw_features;
1958
1959 pci_set_drvdata(pdev, ndev);
1960
1961 ndev->mem_start = edev->dev_info.common.pci_mem_start;
1962 ndev->base_addr = ndev->mem_start;
1963 ndev->mem_end = edev->dev_info.common.pci_mem_end;
1964 ndev->irq = edev->dev_info.common.pci_irq;
1965
1966 ndev->watchdog_timeo = TX_TIMEOUT;
1967
1968 ndev->netdev_ops = &qede_netdev_ops;
1969
1970 qede_set_ethtool_ops(ndev);
1971
1972 /* user-changeble features */
1973 hw_features = NETIF_F_GRO | NETIF_F_SG |
1974 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1975 NETIF_F_TSO | NETIF_F_TSO6;
1976
1977 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
1978 NETIF_F_HIGHDMA;
1979 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
1980 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
1981 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
1982
1983 ndev->hw_features = hw_features;
1984
1985 /* Set network device HW mac */
1986 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
1987}
1988
1989/* This function converts from 32b param to two params of level and module
1990 * Input 32b decoding:
1991 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
1992 * 'happy' flow, e.g. memory allocation failed.
1993 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
1994 * and provide important parameters.
1995 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
1996 * module. VERBOSE prints are for tracking the specific flow in low level.
1997 *
1998 * Notice that the level should be that of the lowest required logs.
1999 */
2000void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
2001{
2002 *p_dp_level = QED_LEVEL_NOTICE;
2003 *p_dp_module = 0;
2004
2005 if (debug & QED_LOG_VERBOSE_MASK) {
2006 *p_dp_level = QED_LEVEL_VERBOSE;
2007 *p_dp_module = (debug & 0x3FFFFFFF);
2008 } else if (debug & QED_LOG_INFO_MASK) {
2009 *p_dp_level = QED_LEVEL_INFO;
2010 } else if (debug & QED_LOG_NOTICE_MASK) {
2011 *p_dp_level = QED_LEVEL_NOTICE;
2012 }
2013}
2014
2015static void qede_free_fp_array(struct qede_dev *edev)
2016{
2017 if (edev->fp_array) {
2018 struct qede_fastpath *fp;
2019 int i;
2020
2021 for_each_rss(i) {
2022 fp = &edev->fp_array[i];
2023
2024 kfree(fp->sb_info);
2025 kfree(fp->rxq);
2026 kfree(fp->txqs);
2027 }
2028 kfree(edev->fp_array);
2029 }
2030 edev->num_rss = 0;
2031}
2032
2033static int qede_alloc_fp_array(struct qede_dev *edev)
2034{
2035 struct qede_fastpath *fp;
2036 int i;
2037
2038 edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
2039 sizeof(*edev->fp_array), GFP_KERNEL);
2040 if (!edev->fp_array) {
2041 DP_NOTICE(edev, "fp array allocation failed\n");
2042 goto err;
2043 }
2044
2045 for_each_rss(i) {
2046 fp = &edev->fp_array[i];
2047
2048 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
2049 if (!fp->sb_info) {
2050 DP_NOTICE(edev, "sb info struct allocation failed\n");
2051 goto err;
2052 }
2053
2054 fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
2055 if (!fp->rxq) {
2056 DP_NOTICE(edev, "RXQ struct allocation failed\n");
2057 goto err;
2058 }
2059
2060 fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
2061 if (!fp->txqs) {
2062 DP_NOTICE(edev, "TXQ array allocation failed\n");
2063 goto err;
2064 }
2065 }
2066
2067 return 0;
2068err:
2069 qede_free_fp_array(edev);
2070 return -ENOMEM;
2071}
2072
2073static void qede_sp_task(struct work_struct *work)
2074{
2075 struct qede_dev *edev = container_of(work, struct qede_dev,
2076 sp_task.work);
2077 mutex_lock(&edev->qede_lock);
2078
2079 if (edev->state == QEDE_STATE_OPEN) {
2080 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
2081 qede_config_rx_mode(edev->ndev);
2082 }
2083
2084 mutex_unlock(&edev->qede_lock);
2085}
2086
2087static void qede_update_pf_params(struct qed_dev *cdev)
2088{
2089 struct qed_pf_params pf_params;
2090
2091 /* 64 rx + 64 tx */
2092 memset(&pf_params, 0, sizeof(struct qed_pf_params));
2093 pf_params.eth_pf_params.num_cons = 128;
2094 qed_ops->common->update_pf_params(cdev, &pf_params);
2095}
2096
2097enum qede_probe_mode {
2098 QEDE_PROBE_NORMAL,
2099};
2100
2101static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2102 enum qede_probe_mode mode)
2103{
2104 struct qed_slowpath_params params;
2105 struct qed_dev_eth_info dev_info;
2106 struct qede_dev *edev;
2107 struct qed_dev *cdev;
2108 int rc;
2109
2110 if (unlikely(dp_level & QED_LEVEL_INFO))
2111 pr_notice("Starting qede probe\n");
2112
2113 cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
2114 dp_module, dp_level);
2115 if (!cdev) {
2116 rc = -ENODEV;
2117 goto err0;
2118 }
2119
2120 qede_update_pf_params(cdev);
2121
2122 /* Start the Slowpath-process */
2123 memset(¶ms, 0, sizeof(struct qed_slowpath_params));
2124 params.int_mode = QED_INT_MODE_MSIX;
2125 params.drv_major = QEDE_MAJOR_VERSION;
2126 params.drv_minor = QEDE_MINOR_VERSION;
2127 params.drv_rev = QEDE_REVISION_VERSION;
2128 params.drv_eng = QEDE_ENGINEERING_VERSION;
2129 strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
2130 rc = qed_ops->common->slowpath_start(cdev, ¶ms);
2131 if (rc) {
2132 pr_notice("Cannot start slowpath\n");
2133 goto err1;
2134 }
2135
2136 /* Learn information crucial for qede to progress */
2137 rc = qed_ops->fill_dev_info(cdev, &dev_info);
2138 if (rc)
2139 goto err2;
2140
2141 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
2142 dp_level);
2143 if (!edev) {
2144 rc = -ENOMEM;
2145 goto err2;
2146 }
2147
2148 qede_init_ndev(edev);
2149
2150 rc = register_netdev(edev->ndev);
2151 if (rc) {
2152 DP_NOTICE(edev, "Cannot register net-device\n");
2153 goto err3;
2154 }
2155
2156 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
2157
2158 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
2159
2160 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
2161 mutex_init(&edev->qede_lock);
2162
2163 DP_INFO(edev, "Ending successfully qede probe\n");
2164
2165 return 0;
2166
2167err3:
2168 free_netdev(edev->ndev);
2169err2:
2170 qed_ops->common->slowpath_stop(cdev);
2171err1:
2172 qed_ops->common->remove(cdev);
2173err0:
2174 return rc;
2175}
2176
2177static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2178{
2179 u32 dp_module = 0;
2180 u8 dp_level = 0;
2181
2182 qede_config_debug(debug, &dp_module, &dp_level);
2183
2184 return __qede_probe(pdev, dp_module, dp_level,
2185 QEDE_PROBE_NORMAL);
2186}
2187
2188enum qede_remove_mode {
2189 QEDE_REMOVE_NORMAL,
2190};
2191
2192static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
2193{
2194 struct net_device *ndev = pci_get_drvdata(pdev);
2195 struct qede_dev *edev = netdev_priv(ndev);
2196 struct qed_dev *cdev = edev->cdev;
2197
2198 DP_INFO(edev, "Starting qede_remove\n");
2199
2200 cancel_delayed_work_sync(&edev->sp_task);
2201 unregister_netdev(ndev);
2202
2203 edev->ops->common->set_power_state(cdev, PCI_D0);
2204
2205 pci_set_drvdata(pdev, NULL);
2206
2207 free_netdev(ndev);
2208
2209 /* Use global ops since we've freed edev */
2210 qed_ops->common->slowpath_stop(cdev);
2211 qed_ops->common->remove(cdev);
2212
2213 pr_notice("Ending successfully qede_remove\n");
2214}
2215
2216static void qede_remove(struct pci_dev *pdev)
2217{
2218 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
2219}
2220
2221/* -------------------------------------------------------------------------
2222 * START OF LOAD / UNLOAD
2223 * -------------------------------------------------------------------------
2224 */
2225
2226static int qede_set_num_queues(struct qede_dev *edev)
2227{
2228 int rc;
2229 u16 rss_num;
2230
2231 /* Setup queues according to possible resources*/
2232 if (edev->req_rss)
2233 rss_num = edev->req_rss;
2234 else
2235 rss_num = netif_get_num_default_rss_queues() *
2236 edev->dev_info.common.num_hwfns;
2237
2238 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
2239
2240 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
2241 if (rc > 0) {
2242 /* Managed to request interrupts for our queues */
2243 edev->num_rss = rc;
2244 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
2245 QEDE_RSS_CNT(edev), rss_num);
2246 rc = 0;
2247 }
2248 return rc;
2249}
2250
2251static void qede_free_mem_sb(struct qede_dev *edev,
2252 struct qed_sb_info *sb_info)
2253{
2254 if (sb_info->sb_virt)
2255 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
2256 (void *)sb_info->sb_virt, sb_info->sb_phys);
2257}
2258
2259/* This function allocates fast-path status block memory */
2260static int qede_alloc_mem_sb(struct qede_dev *edev,
2261 struct qed_sb_info *sb_info,
2262 u16 sb_id)
2263{
2264 struct status_block *sb_virt;
2265 dma_addr_t sb_phys;
2266 int rc;
2267
2268 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
2269 sizeof(*sb_virt),
2270 &sb_phys, GFP_KERNEL);
2271 if (!sb_virt) {
2272 DP_ERR(edev, "Status block allocation failed\n");
2273 return -ENOMEM;
2274 }
2275
2276 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
2277 sb_virt, sb_phys, sb_id,
2278 QED_SB_TYPE_L2_QUEUE);
2279 if (rc) {
2280 DP_ERR(edev, "Status block initialization failed\n");
2281 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
2282 sb_virt, sb_phys);
2283 return rc;
2284 }
2285
2286 return 0;
2287}
2288
2289static void qede_free_rx_buffers(struct qede_dev *edev,
2290 struct qede_rx_queue *rxq)
2291{
2292 u16 i;
2293
2294 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
2295 struct sw_rx_data *rx_buf;
2296 struct page *data;
2297
2298 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
2299 data = rx_buf->data;
2300
2301 dma_unmap_page(&edev->pdev->dev,
2302 rx_buf->mapping,
2303 PAGE_SIZE, DMA_FROM_DEVICE);
2304
2305 rx_buf->data = NULL;
2306 __free_page(data);
2307 }
2308}
2309
2310static void qede_free_sge_mem(struct qede_dev *edev,
2311 struct qede_rx_queue *rxq) {
2312 int i;
2313
2314 if (edev->gro_disable)
2315 return;
2316
2317 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
2318 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2319 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2320
2321 if (replace_buf->data) {
2322 dma_unmap_page(&edev->pdev->dev,
2323 dma_unmap_addr(replace_buf, mapping),
2324 PAGE_SIZE, DMA_FROM_DEVICE);
2325 __free_page(replace_buf->data);
2326 }
2327 }
2328}
2329
2330static void qede_free_mem_rxq(struct qede_dev *edev,
2331 struct qede_rx_queue *rxq)
2332{
2333 qede_free_sge_mem(edev, rxq);
2334
2335 /* Free rx buffers */
2336 qede_free_rx_buffers(edev, rxq);
2337
2338 /* Free the parallel SW ring */
2339 kfree(rxq->sw_rx_ring);
2340
2341 /* Free the real RQ ring used by FW */
2342 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
2343 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
2344}
2345
2346static int qede_alloc_rx_buffer(struct qede_dev *edev,
2347 struct qede_rx_queue *rxq)
2348{
2349 struct sw_rx_data *sw_rx_data;
2350 struct eth_rx_bd *rx_bd;
2351 dma_addr_t mapping;
2352 struct page *data;
2353 u16 rx_buf_size;
2354
2355 rx_buf_size = rxq->rx_buf_size;
2356
2357 data = alloc_pages(GFP_ATOMIC, 0);
2358 if (unlikely(!data)) {
2359 DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
2360 return -ENOMEM;
2361 }
2362
2363 /* Map the entire page as it would be used
2364 * for multiple RX buffer segment size mapping.
2365 */
2366 mapping = dma_map_page(&edev->pdev->dev, data, 0,
2367 PAGE_SIZE, DMA_FROM_DEVICE);
2368 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
2369 __free_page(data);
2370 DP_NOTICE(edev, "Failed to map Rx buffer\n");
2371 return -ENOMEM;
2372 }
2373
2374 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
2375 sw_rx_data->page_offset = 0;
2376 sw_rx_data->data = data;
2377 sw_rx_data->mapping = mapping;
2378
2379 /* Advance PROD and get BD pointer */
2380 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
2381 WARN_ON(!rx_bd);
2382 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
2383 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
2384
2385 rxq->sw_rx_prod++;
2386
2387 return 0;
2388}
2389
2390static int qede_alloc_sge_mem(struct qede_dev *edev,
2391 struct qede_rx_queue *rxq)
2392{
2393 dma_addr_t mapping;
2394 int i;
2395
2396 if (edev->gro_disable)
2397 return 0;
2398
2399 if (edev->ndev->mtu > PAGE_SIZE) {
2400 edev->gro_disable = 1;
2401 return 0;
2402 }
2403
2404 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
2405 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2406 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2407
2408 replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
2409 if (unlikely(!replace_buf->data)) {
2410 DP_NOTICE(edev,
2411 "Failed to allocate TPA skb pool [replacement buffer]\n");
2412 goto err;
2413 }
2414
2415 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
2416 rxq->rx_buf_size, DMA_FROM_DEVICE);
2417 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
2418 DP_NOTICE(edev,
2419 "Failed to map TPA replacement buffer\n");
2420 goto err;
2421 }
2422
2423 dma_unmap_addr_set(replace_buf, mapping, mapping);
2424 tpa_info->replace_buf.page_offset = 0;
2425
2426 tpa_info->replace_buf_mapping = mapping;
2427 tpa_info->agg_state = QEDE_AGG_STATE_NONE;
2428 }
2429
2430 return 0;
2431err:
2432 qede_free_sge_mem(edev, rxq);
2433 edev->gro_disable = 1;
2434 return -ENOMEM;
2435}
2436
2437/* This function allocates all memory needed per Rx queue */
2438static int qede_alloc_mem_rxq(struct qede_dev *edev,
2439 struct qede_rx_queue *rxq)
2440{
2441 int i, rc, size;
2442
2443 rxq->num_rx_buffers = edev->q_num_rx_buffers;
2444
2445 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
2446 edev->ndev->mtu;
2447 if (rxq->rx_buf_size > PAGE_SIZE)
2448 rxq->rx_buf_size = PAGE_SIZE;
2449
2450 /* Segment size to spilt a page in multiple equal parts */
2451 rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
2452
2453 /* Allocate the parallel driver ring for Rx buffers */
2454 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
2455 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
2456 if (!rxq->sw_rx_ring) {
2457 DP_ERR(edev, "Rx buffers ring allocation failed\n");
2458 rc = -ENOMEM;
2459 goto err;
2460 }
2461
2462 /* Allocate FW Rx ring */
2463 rc = edev->ops->common->chain_alloc(edev->cdev,
2464 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2465 QED_CHAIN_MODE_NEXT_PTR,
2466 RX_RING_SIZE,
2467 sizeof(struct eth_rx_bd),
2468 &rxq->rx_bd_ring);
2469
2470 if (rc)
2471 goto err;
2472
2473 /* Allocate FW completion ring */
2474 rc = edev->ops->common->chain_alloc(edev->cdev,
2475 QED_CHAIN_USE_TO_CONSUME,
2476 QED_CHAIN_MODE_PBL,
2477 RX_RING_SIZE,
2478 sizeof(union eth_rx_cqe),
2479 &rxq->rx_comp_ring);
2480 if (rc)
2481 goto err;
2482
2483 /* Allocate buffers for the Rx ring */
2484 for (i = 0; i < rxq->num_rx_buffers; i++) {
2485 rc = qede_alloc_rx_buffer(edev, rxq);
2486 if (rc) {
2487 DP_ERR(edev,
2488 "Rx buffers allocation failed at index %d\n", i);
2489 goto err;
2490 }
2491 }
2492
2493 rc = qede_alloc_sge_mem(edev, rxq);
2494err:
2495 return rc;
2496}
2497
2498static void qede_free_mem_txq(struct qede_dev *edev,
2499 struct qede_tx_queue *txq)
2500{
2501 /* Free the parallel SW ring */
2502 kfree(txq->sw_tx_ring);
2503
2504 /* Free the real RQ ring used by FW */
2505 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
2506}
2507
2508/* This function allocates all memory needed per Tx queue */
2509static int qede_alloc_mem_txq(struct qede_dev *edev,
2510 struct qede_tx_queue *txq)
2511{
2512 int size, rc;
2513 union eth_tx_bd_types *p_virt;
2514
2515 txq->num_tx_buffers = edev->q_num_tx_buffers;
2516
2517 /* Allocate the parallel driver ring for Tx buffers */
2518 size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
2519 txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
2520 if (!txq->sw_tx_ring) {
2521 DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
2522 goto err;
2523 }
2524
2525 rc = edev->ops->common->chain_alloc(edev->cdev,
2526 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
2527 QED_CHAIN_MODE_PBL,
2528 NUM_TX_BDS_MAX,
2529 sizeof(*p_virt),
2530 &txq->tx_pbl);
2531 if (rc)
2532 goto err;
2533
2534 return 0;
2535
2536err:
2537 qede_free_mem_txq(edev, txq);
2538 return -ENOMEM;
2539}
2540
2541/* This function frees all memory of a single fp */
2542static void qede_free_mem_fp(struct qede_dev *edev,
2543 struct qede_fastpath *fp)
2544{
2545 int tc;
2546
2547 qede_free_mem_sb(edev, fp->sb_info);
2548
2549 qede_free_mem_rxq(edev, fp->rxq);
2550
2551 for (tc = 0; tc < edev->num_tc; tc++)
2552 qede_free_mem_txq(edev, &fp->txqs[tc]);
2553}
2554
2555/* This function allocates all memory needed for a single fp (i.e. an entity
2556 * which contains status block, one rx queue and multiple per-TC tx queues.
2557 */
2558static int qede_alloc_mem_fp(struct qede_dev *edev,
2559 struct qede_fastpath *fp)
2560{
2561 int rc, tc;
2562
2563 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
2564 if (rc)
2565 goto err;
2566
2567 rc = qede_alloc_mem_rxq(edev, fp->rxq);
2568 if (rc)
2569 goto err;
2570
2571 for (tc = 0; tc < edev->num_tc; tc++) {
2572 rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
2573 if (rc)
2574 goto err;
2575 }
2576
2577 return 0;
2578err:
2579 return rc;
2580}
2581
2582static void qede_free_mem_load(struct qede_dev *edev)
2583{
2584 int i;
2585
2586 for_each_rss(i) {
2587 struct qede_fastpath *fp = &edev->fp_array[i];
2588
2589 qede_free_mem_fp(edev, fp);
2590 }
2591}
2592
2593/* This function allocates all qede memory at NIC load. */
2594static int qede_alloc_mem_load(struct qede_dev *edev)
2595{
2596 int rc = 0, rss_id;
2597
2598 for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
2599 struct qede_fastpath *fp = &edev->fp_array[rss_id];
2600
2601 rc = qede_alloc_mem_fp(edev, fp);
2602 if (rc) {
2603 DP_ERR(edev,
2604 "Failed to allocate memory for fastpath - rss id = %d\n",
2605 rss_id);
2606 qede_free_mem_load(edev);
2607 return rc;
2608 }
2609 }
2610
2611 return 0;
2612}
2613
2614/* This function inits fp content and resets the SB, RXQ and TXQ structures */
2615static void qede_init_fp(struct qede_dev *edev)
2616{
2617 int rss_id, txq_index, tc;
2618 struct qede_fastpath *fp;
2619
2620 for_each_rss(rss_id) {
2621 fp = &edev->fp_array[rss_id];
2622
2623 fp->edev = edev;
2624 fp->rss_id = rss_id;
2625
2626 memset((void *)&fp->napi, 0, sizeof(fp->napi));
2627
2628 memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
2629
2630 memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
2631 fp->rxq->rxq_id = rss_id;
2632
2633 memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
2634 for (tc = 0; tc < edev->num_tc; tc++) {
2635 txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
2636 fp->txqs[tc].index = txq_index;
2637 }
2638
2639 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
2640 edev->ndev->name, rss_id);
2641 }
2642
2643 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
2644}
2645
2646static int qede_set_real_num_queues(struct qede_dev *edev)
2647{
2648 int rc = 0;
2649
2650 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
2651 if (rc) {
2652 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
2653 return rc;
2654 }
2655 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
2656 if (rc) {
2657 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
2658 return rc;
2659 }
2660
2661 return 0;
2662}
2663
2664static void qede_napi_disable_remove(struct qede_dev *edev)
2665{
2666 int i;
2667
2668 for_each_rss(i) {
2669 napi_disable(&edev->fp_array[i].napi);
2670
2671 netif_napi_del(&edev->fp_array[i].napi);
2672 }
2673}
2674
2675static void qede_napi_add_enable(struct qede_dev *edev)
2676{
2677 int i;
2678
2679 /* Add NAPI objects */
2680 for_each_rss(i) {
2681 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
2682 qede_poll, NAPI_POLL_WEIGHT);
2683 napi_enable(&edev->fp_array[i].napi);
2684 }
2685}
2686
2687static void qede_sync_free_irqs(struct qede_dev *edev)
2688{
2689 int i;
2690
2691 for (i = 0; i < edev->int_info.used_cnt; i++) {
2692 if (edev->int_info.msix_cnt) {
2693 synchronize_irq(edev->int_info.msix[i].vector);
2694 free_irq(edev->int_info.msix[i].vector,
2695 &edev->fp_array[i]);
2696 } else {
2697 edev->ops->common->simd_handler_clean(edev->cdev, i);
2698 }
2699 }
2700
2701 edev->int_info.used_cnt = 0;
2702}
2703
2704static int qede_req_msix_irqs(struct qede_dev *edev)
2705{
2706 int i, rc;
2707
2708 /* Sanitize number of interrupts == number of prepared RSS queues */
2709 if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
2710 DP_ERR(edev,
2711 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
2712 QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
2713 return -EINVAL;
2714 }
2715
2716 for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
2717 rc = request_irq(edev->int_info.msix[i].vector,
2718 qede_msix_fp_int, 0, edev->fp_array[i].name,
2719 &edev->fp_array[i]);
2720 if (rc) {
2721 DP_ERR(edev, "Request fp %d irq failed\n", i);
2722 qede_sync_free_irqs(edev);
2723 return rc;
2724 }
2725 DP_VERBOSE(edev, NETIF_MSG_INTR,
2726 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
2727 edev->fp_array[i].name, i,
2728 &edev->fp_array[i]);
2729 edev->int_info.used_cnt++;
2730 }
2731
2732 return 0;
2733}
2734
2735static void qede_simd_fp_handler(void *cookie)
2736{
2737 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
2738
2739 napi_schedule_irqoff(&fp->napi);
2740}
2741
2742static int qede_setup_irqs(struct qede_dev *edev)
2743{
2744 int i, rc = 0;
2745
2746 /* Learn Interrupt configuration */
2747 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
2748 if (rc)
2749 return rc;
2750
2751 if (edev->int_info.msix_cnt) {
2752 rc = qede_req_msix_irqs(edev);
2753 if (rc)
2754 return rc;
2755 edev->ndev->irq = edev->int_info.msix[0].vector;
2756 } else {
2757 const struct qed_common_ops *ops;
2758
2759 /* qed should learn receive the RSS ids and callbacks */
2760 ops = edev->ops->common;
2761 for (i = 0; i < QEDE_RSS_CNT(edev); i++)
2762 ops->simd_handler_config(edev->cdev,
2763 &edev->fp_array[i], i,
2764 qede_simd_fp_handler);
2765 edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
2766 }
2767 return 0;
2768}
2769
2770static int qede_drain_txq(struct qede_dev *edev,
2771 struct qede_tx_queue *txq,
2772 bool allow_drain)
2773{
2774 int rc, cnt = 1000;
2775
2776 while (txq->sw_tx_cons != txq->sw_tx_prod) {
2777 if (!cnt) {
2778 if (allow_drain) {
2779 DP_NOTICE(edev,
2780 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2781 txq->index);
2782 rc = edev->ops->common->drain(edev->cdev);
2783 if (rc)
2784 return rc;
2785 return qede_drain_txq(edev, txq, false);
2786 }
2787 DP_NOTICE(edev,
2788 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2789 txq->index, txq->sw_tx_prod,
2790 txq->sw_tx_cons);
2791 return -ENODEV;
2792 }
2793 cnt--;
2794 usleep_range(1000, 2000);
2795 barrier();
2796 }
2797
2798 /* FW finished processing, wait for HW to transmit all tx packets */
2799 usleep_range(1000, 2000);
2800
2801 return 0;
2802}
2803
2804static int qede_stop_queues(struct qede_dev *edev)
2805{
2806 struct qed_update_vport_params vport_update_params;
2807 struct qed_dev *cdev = edev->cdev;
2808 int rc, tc, i;
2809
2810 /* Disable the vport */
2811 memset(&vport_update_params, 0, sizeof(vport_update_params));
2812 vport_update_params.vport_id = 0;
2813 vport_update_params.update_vport_active_flg = 1;
2814 vport_update_params.vport_active_flg = 0;
2815 vport_update_params.update_rss_flg = 0;
2816
2817 rc = edev->ops->vport_update(cdev, &vport_update_params);
2818 if (rc) {
2819 DP_ERR(edev, "Failed to update vport\n");
2820 return rc;
2821 }
2822
2823 /* Flush Tx queues. If needed, request drain from MCP */
2824 for_each_rss(i) {
2825 struct qede_fastpath *fp = &edev->fp_array[i];
2826
2827 for (tc = 0; tc < edev->num_tc; tc++) {
2828 struct qede_tx_queue *txq = &fp->txqs[tc];
2829
2830 rc = qede_drain_txq(edev, txq, true);
2831 if (rc)
2832 return rc;
2833 }
2834 }
2835
2836 /* Stop all Queues in reverse order*/
2837 for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
2838 struct qed_stop_rxq_params rx_params;
2839
2840 /* Stop the Tx Queue(s)*/
2841 for (tc = 0; tc < edev->num_tc; tc++) {
2842 struct qed_stop_txq_params tx_params;
2843
2844 tx_params.rss_id = i;
2845 tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
2846 rc = edev->ops->q_tx_stop(cdev, &tx_params);
2847 if (rc) {
2848 DP_ERR(edev, "Failed to stop TXQ #%d\n",
2849 tx_params.tx_queue_id);
2850 return rc;
2851 }
2852 }
2853
2854 /* Stop the Rx Queue*/
2855 memset(&rx_params, 0, sizeof(rx_params));
2856 rx_params.rss_id = i;
2857 rx_params.rx_queue_id = i;
2858
2859 rc = edev->ops->q_rx_stop(cdev, &rx_params);
2860 if (rc) {
2861 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2862 return rc;
2863 }
2864 }
2865
2866 /* Stop the vport */
2867 rc = edev->ops->vport_stop(cdev, 0);
2868 if (rc)
2869 DP_ERR(edev, "Failed to stop VPORT\n");
2870
2871 return rc;
2872}
2873
2874static int qede_start_queues(struct qede_dev *edev)
2875{
2876 int rc, tc, i;
2877 int vlan_removal_en = 1;
2878 struct qed_dev *cdev = edev->cdev;
2879 struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
2880 struct qed_update_vport_params vport_update_params;
2881 struct qed_queue_start_common_params q_params;
2882 struct qed_start_vport_params start = {0};
2883
2884 if (!edev->num_rss) {
2885 DP_ERR(edev,
2886 "Cannot update V-VPORT as active as there are no Rx queues\n");
2887 return -EINVAL;
2888 }
2889
2890 start.gro_enable = !edev->gro_disable;
2891 start.mtu = edev->ndev->mtu;
2892 start.vport_id = 0;
2893 start.drop_ttl0 = true;
2894 start.remove_inner_vlan = vlan_removal_en;
2895
2896 rc = edev->ops->vport_start(cdev, &start);
2897
2898 if (rc) {
2899 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2900 return rc;
2901 }
2902
2903 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2904 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2905 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2906
2907 for_each_rss(i) {
2908 struct qede_fastpath *fp = &edev->fp_array[i];
2909 dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
2910
2911 memset(&q_params, 0, sizeof(q_params));
2912 q_params.rss_id = i;
2913 q_params.queue_id = i;
2914 q_params.vport_id = 0;
2915 q_params.sb = fp->sb_info->igu_sb_id;
2916 q_params.sb_idx = RX_PI;
2917
2918 rc = edev->ops->q_rx_start(cdev, &q_params,
2919 fp->rxq->rx_buf_size,
2920 fp->rxq->rx_bd_ring.p_phys_addr,
2921 phys_table,
2922 fp->rxq->rx_comp_ring.page_cnt,
2923 &fp->rxq->hw_rxq_prod_addr);
2924 if (rc) {
2925 DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
2926 return rc;
2927 }
2928
2929 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
2930
2931 qede_update_rx_prod(edev, fp->rxq);
2932
2933 for (tc = 0; tc < edev->num_tc; tc++) {
2934 struct qede_tx_queue *txq = &fp->txqs[tc];
2935 int txq_index = tc * QEDE_RSS_CNT(edev) + i;
2936
2937 memset(&q_params, 0, sizeof(q_params));
2938 q_params.rss_id = i;
2939 q_params.queue_id = txq_index;
2940 q_params.vport_id = 0;
2941 q_params.sb = fp->sb_info->igu_sb_id;
2942 q_params.sb_idx = TX_PI(tc);
2943
2944 rc = edev->ops->q_tx_start(cdev, &q_params,
2945 txq->tx_pbl.pbl.p_phys_table,
2946 txq->tx_pbl.page_cnt,
2947 &txq->doorbell_addr);
2948 if (rc) {
2949 DP_ERR(edev, "Start TXQ #%d failed %d\n",
2950 txq_index, rc);
2951 return rc;
2952 }
2953
2954 txq->hw_cons_ptr =
2955 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
2956 SET_FIELD(txq->tx_db.data.params,
2957 ETH_DB_DATA_DEST, DB_DEST_XCM);
2958 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
2959 DB_AGG_CMD_SET);
2960 SET_FIELD(txq->tx_db.data.params,
2961 ETH_DB_DATA_AGG_VAL_SEL,
2962 DQ_XCM_ETH_TX_BD_PROD_CMD);
2963
2964 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2965 }
2966 }
2967
2968 /* Prepare and send the vport enable */
2969 memset(&vport_update_params, 0, sizeof(vport_update_params));
2970 vport_update_params.vport_id = start.vport_id;
2971 vport_update_params.update_vport_active_flg = 1;
2972 vport_update_params.vport_active_flg = 1;
2973
2974 /* Fill struct with RSS params */
2975 if (QEDE_RSS_CNT(edev) > 1) {
2976 vport_update_params.update_rss_flg = 1;
2977 for (i = 0; i < 128; i++)
2978 rss_params->rss_ind_table[i] =
2979 ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
2980 netdev_rss_key_fill(rss_params->rss_key,
2981 sizeof(rss_params->rss_key));
2982 } else {
2983 memset(rss_params, 0, sizeof(*rss_params));
2984 }
2985 memcpy(&vport_update_params.rss_params, rss_params,
2986 sizeof(*rss_params));
2987
2988 rc = edev->ops->vport_update(cdev, &vport_update_params);
2989 if (rc) {
2990 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2991 return rc;
2992 }
2993
2994 return 0;
2995}
2996
2997static int qede_set_mcast_rx_mac(struct qede_dev *edev,
2998 enum qed_filter_xcast_params_type opcode,
2999 unsigned char *mac, int num_macs)
3000{
3001 struct qed_filter_params filter_cmd;
3002 int i;
3003
3004 memset(&filter_cmd, 0, sizeof(filter_cmd));
3005 filter_cmd.type = QED_FILTER_TYPE_MCAST;
3006 filter_cmd.filter.mcast.type = opcode;
3007 filter_cmd.filter.mcast.num = num_macs;
3008
3009 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
3010 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
3011
3012 return edev->ops->filter_config(edev->cdev, &filter_cmd);
3013}
3014
3015enum qede_unload_mode {
3016 QEDE_UNLOAD_NORMAL,
3017};
3018
3019static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
3020{
3021 struct qed_link_params link_params;
3022 int rc;
3023
3024 DP_INFO(edev, "Starting qede unload\n");
3025
3026 mutex_lock(&edev->qede_lock);
3027 edev->state = QEDE_STATE_CLOSED;
3028
3029 /* Close OS Tx */
3030 netif_tx_disable(edev->ndev);
3031 netif_carrier_off(edev->ndev);
3032
3033 /* Reset the link */
3034 memset(&link_params, 0, sizeof(link_params));
3035 link_params.link_up = false;
3036 edev->ops->common->set_link(edev->cdev, &link_params);
3037 rc = qede_stop_queues(edev);
3038 if (rc) {
3039 qede_sync_free_irqs(edev);
3040 goto out;
3041 }
3042
3043 DP_INFO(edev, "Stopped Queues\n");
3044
3045 qede_vlan_mark_nonconfigured(edev);
3046 edev->ops->fastpath_stop(edev->cdev);
3047
3048 /* Release the interrupts */
3049 qede_sync_free_irqs(edev);
3050 edev->ops->common->set_fp_int(edev->cdev, 0);
3051
3052 qede_napi_disable_remove(edev);
3053
3054 qede_free_mem_load(edev);
3055 qede_free_fp_array(edev);
3056
3057out:
3058 mutex_unlock(&edev->qede_lock);
3059 DP_INFO(edev, "Ending qede unload\n");
3060}
3061
3062enum qede_load_mode {
3063 QEDE_LOAD_NORMAL,
3064};
3065
3066static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
3067{
3068 struct qed_link_params link_params;
3069 struct qed_link_output link_output;
3070 int rc;
3071
3072 DP_INFO(edev, "Starting qede load\n");
3073
3074 rc = qede_set_num_queues(edev);
3075 if (rc)
3076 goto err0;
3077
3078 rc = qede_alloc_fp_array(edev);
3079 if (rc)
3080 goto err0;
3081
3082 qede_init_fp(edev);
3083
3084 rc = qede_alloc_mem_load(edev);
3085 if (rc)
3086 goto err1;
3087 DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
3088 QEDE_RSS_CNT(edev), edev->num_tc);
3089
3090 rc = qede_set_real_num_queues(edev);
3091 if (rc)
3092 goto err2;
3093
3094 qede_napi_add_enable(edev);
3095 DP_INFO(edev, "Napi added and enabled\n");
3096
3097 rc = qede_setup_irqs(edev);
3098 if (rc)
3099 goto err3;
3100 DP_INFO(edev, "Setup IRQs succeeded\n");
3101
3102 rc = qede_start_queues(edev);
3103 if (rc)
3104 goto err4;
3105 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
3106
3107 /* Add primary mac and set Rx filters */
3108 ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
3109
3110 mutex_lock(&edev->qede_lock);
3111 edev->state = QEDE_STATE_OPEN;
3112 mutex_unlock(&edev->qede_lock);
3113
3114 /* Program un-configured VLANs */
3115 qede_configure_vlan_filters(edev);
3116
3117 /* Ask for link-up using current configuration */
3118 memset(&link_params, 0, sizeof(link_params));
3119 link_params.link_up = true;
3120 edev->ops->common->set_link(edev->cdev, &link_params);
3121
3122 /* Query whether link is already-up */
3123 memset(&link_output, 0, sizeof(link_output));
3124 edev->ops->common->get_link(edev->cdev, &link_output);
3125 qede_link_update(edev, &link_output);
3126
3127 DP_INFO(edev, "Ending successfully qede load\n");
3128
3129 return 0;
3130
3131err4:
3132 qede_sync_free_irqs(edev);
3133 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
3134err3:
3135 qede_napi_disable_remove(edev);
3136err2:
3137 qede_free_mem_load(edev);
3138err1:
3139 edev->ops->common->set_fp_int(edev->cdev, 0);
3140 qede_free_fp_array(edev);
3141 edev->num_rss = 0;
3142err0:
3143 return rc;
3144}
3145
3146void qede_reload(struct qede_dev *edev,
3147 void (*func)(struct qede_dev *, union qede_reload_args *),
3148 union qede_reload_args *args)
3149{
3150 qede_unload(edev, QEDE_UNLOAD_NORMAL);
3151 /* Call function handler to update parameters
3152 * needed for function load.
3153 */
3154 if (func)
3155 func(edev, args);
3156
3157 qede_load(edev, QEDE_LOAD_NORMAL);
3158
3159 mutex_lock(&edev->qede_lock);
3160 qede_config_rx_mode(edev->ndev);
3161 mutex_unlock(&edev->qede_lock);
3162}
3163
3164/* called with rtnl_lock */
3165static int qede_open(struct net_device *ndev)
3166{
3167 struct qede_dev *edev = netdev_priv(ndev);
3168
3169 netif_carrier_off(ndev);
3170
3171 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
3172
3173 return qede_load(edev, QEDE_LOAD_NORMAL);
3174}
3175
3176static int qede_close(struct net_device *ndev)
3177{
3178 struct qede_dev *edev = netdev_priv(ndev);
3179
3180 qede_unload(edev, QEDE_UNLOAD_NORMAL);
3181
3182 return 0;
3183}
3184
3185static void qede_link_update(void *dev, struct qed_link_output *link)
3186{
3187 struct qede_dev *edev = dev;
3188
3189 if (!netif_running(edev->ndev)) {
3190 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
3191 return;
3192 }
3193
3194 if (link->link_up) {
3195 if (!netif_carrier_ok(edev->ndev)) {
3196 DP_NOTICE(edev, "Link is up\n");
3197 netif_tx_start_all_queues(edev->ndev);
3198 netif_carrier_on(edev->ndev);
3199 }
3200 } else {
3201 if (netif_carrier_ok(edev->ndev)) {
3202 DP_NOTICE(edev, "Link is down\n");
3203 netif_tx_disable(edev->ndev);
3204 netif_carrier_off(edev->ndev);
3205 }
3206 }
3207}
3208
3209static int qede_set_mac_addr(struct net_device *ndev, void *p)
3210{
3211 struct qede_dev *edev = netdev_priv(ndev);
3212 struct sockaddr *addr = p;
3213 int rc;
3214
3215 ASSERT_RTNL(); /* @@@TBD To be removed */
3216
3217 DP_INFO(edev, "Set_mac_addr called\n");
3218
3219 if (!is_valid_ether_addr(addr->sa_data)) {
3220 DP_NOTICE(edev, "The MAC address is not valid\n");
3221 return -EFAULT;
3222 }
3223
3224 ether_addr_copy(ndev->dev_addr, addr->sa_data);
3225
3226 if (!netif_running(ndev)) {
3227 DP_NOTICE(edev, "The device is currently down\n");
3228 return 0;
3229 }
3230
3231 /* Remove the previous primary mac */
3232 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
3233 edev->primary_mac);
3234 if (rc)
3235 return rc;
3236
3237 /* Add MAC filter according to the new unicast HW MAC address */
3238 ether_addr_copy(edev->primary_mac, ndev->dev_addr);
3239 return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
3240 edev->primary_mac);
3241}
3242
3243static int
3244qede_configure_mcast_filtering(struct net_device *ndev,
3245 enum qed_filter_rx_mode_type *accept_flags)
3246{
3247 struct qede_dev *edev = netdev_priv(ndev);
3248 unsigned char *mc_macs, *temp;
3249 struct netdev_hw_addr *ha;
3250 int rc = 0, mc_count;
3251 size_t size;
3252
3253 size = 64 * ETH_ALEN;
3254
3255 mc_macs = kzalloc(size, GFP_KERNEL);
3256 if (!mc_macs) {
3257 DP_NOTICE(edev,
3258 "Failed to allocate memory for multicast MACs\n");
3259 rc = -ENOMEM;
3260 goto exit;
3261 }
3262
3263 temp = mc_macs;
3264
3265 /* Remove all previously configured MAC filters */
3266 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
3267 mc_macs, 1);
3268 if (rc)
3269 goto exit;
3270
3271 netif_addr_lock_bh(ndev);
3272
3273 mc_count = netdev_mc_count(ndev);
3274 if (mc_count < 64) {
3275 netdev_for_each_mc_addr(ha, ndev) {
3276 ether_addr_copy(temp, ha->addr);
3277 temp += ETH_ALEN;
3278 }
3279 }
3280
3281 netif_addr_unlock_bh(ndev);
3282
3283 /* Check for all multicast @@@TBD resource allocation */
3284 if ((ndev->flags & IFF_ALLMULTI) ||
3285 (mc_count > 64)) {
3286 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
3287 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
3288 } else {
3289 /* Add all multicast MAC filters */
3290 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
3291 mc_macs, mc_count);
3292 }
3293
3294exit:
3295 kfree(mc_macs);
3296 return rc;
3297}
3298
3299static void qede_set_rx_mode(struct net_device *ndev)
3300{
3301 struct qede_dev *edev = netdev_priv(ndev);
3302
3303 DP_INFO(edev, "qede_set_rx_mode called\n");
3304
3305 if (edev->state != QEDE_STATE_OPEN) {
3306 DP_INFO(edev,
3307 "qede_set_rx_mode called while interface is down\n");
3308 } else {
3309 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
3310 schedule_delayed_work(&edev->sp_task, 0);
3311 }
3312}
3313
3314/* Must be called with qede_lock held */
3315static void qede_config_rx_mode(struct net_device *ndev)
3316{
3317 enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
3318 struct qede_dev *edev = netdev_priv(ndev);
3319 struct qed_filter_params rx_mode;
3320 unsigned char *uc_macs, *temp;
3321 struct netdev_hw_addr *ha;
3322 int rc, uc_count;
3323 size_t size;
3324
3325 netif_addr_lock_bh(ndev);
3326
3327 uc_count = netdev_uc_count(ndev);
3328 size = uc_count * ETH_ALEN;
3329
3330 uc_macs = kzalloc(size, GFP_ATOMIC);
3331 if (!uc_macs) {
3332 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
3333 netif_addr_unlock_bh(ndev);
3334 return;
3335 }
3336
3337 temp = uc_macs;
3338 netdev_for_each_uc_addr(ha, ndev) {
3339 ether_addr_copy(temp, ha->addr);
3340 temp += ETH_ALEN;
3341 }
3342
3343 netif_addr_unlock_bh(ndev);
3344
3345 /* Configure the struct for the Rx mode */
3346 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
3347 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
3348
3349 /* Remove all previous unicast secondary macs and multicast macs
3350 * (configrue / leave the primary mac)
3351 */
3352 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
3353 edev->primary_mac);
3354 if (rc)
3355 goto out;
3356
3357 /* Check for promiscuous */
3358 if ((ndev->flags & IFF_PROMISC) ||
3359 (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
3360 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
3361 } else {
3362 /* Add MAC filters according to the unicast secondary macs */
3363 int i;
3364
3365 temp = uc_macs;
3366 for (i = 0; i < uc_count; i++) {
3367 rc = qede_set_ucast_rx_mac(edev,
3368 QED_FILTER_XCAST_TYPE_ADD,
3369 temp);
3370 if (rc)
3371 goto out;
3372
3373 temp += ETH_ALEN;
3374 }
3375
3376 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
3377 if (rc)
3378 goto out;
3379 }
3380
3381 /* take care of VLAN mode */
3382 if (ndev->flags & IFF_PROMISC) {
3383 qede_config_accept_any_vlan(edev, true);
3384 } else if (!edev->non_configured_vlans) {
3385 /* It's possible that accept_any_vlan mode is set due to a
3386 * previous setting of IFF_PROMISC. If vlan credits are
3387 * sufficient, disable accept_any_vlan.
3388 */
3389 qede_config_accept_any_vlan(edev, false);
3390 }
3391
3392 rx_mode.filter.accept_flags = accept_flags;
3393 edev->ops->filter_config(edev->cdev, &rx_mode);
3394out:
3395 kfree(uc_macs);
3396}